From 4686438b21faef6b417f00c2811cbe18cfefd1d5 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 27 Nov 2024 11:04:52 -0500 Subject: [PATCH 01/20] Start work on `small_size_allocator` --- include/chainbase/small_size_allocator.hpp | 98 ++++++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 include/chainbase/small_size_allocator.hpp diff --git a/include/chainbase/small_size_allocator.hpp b/include/chainbase/small_size_allocator.hpp new file mode 100644 index 0000000..8d3db73 --- /dev/null +++ b/include/chainbase/small_size_allocator.hpp @@ -0,0 +1,98 @@ +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace chainbase { + +namespace detail { + +template +class allocator { + backing_allocator& _back_alloc; + + allocator(backing_allocator& back_alloc) : + _back_alloc(back_alloc) { + } + +public: + using pointer = backing_allocator::pointer; + + pointer allocate(); + void deallocate(pointer p); +}; + +} // namespace detail + +// --------------------------------------------------------------------------------------- +// An array of 64 allocators for sizes from 8 to 512 bytes +// ------------------------------------------------------- +// +// All pointers used are of type `backing_allocator::pointer` +// --------------------------------------------------------------------------------------- +template +class small_size_allocator { +public: + using pointer = backing_allocator::pointer; + +private: + static constexpr size_t _mask = size_increment - 1; + + using alloc_tuple_t = decltype(make_slab_helper(1, std::make_index_sequence{})); + using alloc_fn_t = std::function; + using dealloc_fn_t = std::function; + + alloc_tuple_t _allocators; + std::array _alloc_functions; // using arrays of functions for fast access + std::array _dealloc_functions; + backing_allocator& _back_alloc; + +public: + static constexpr size_t max_size = num_allocators * size_increment; + + small_size_allocator(backing_allocator& back_alloc) + : _allocators( make_alloc_tuple(back_alloc, std::make_index_sequence{})) + , _alloc_functions( make_alloc_fn_array(std::make_index_sequence{})) + , _dealloc_functions(make_dealloc_fn_array(std::make_index_sequence{})) + , _back_alloc(back_alloc) + {} + + pointer allocate(std::size_t sz) { + if (sz <= max_size) + return _alloc_functions[allocator_index(sz)](); + return _back_alloc.allocate(sz); + } + + void deallocate(const pointer &p, std::size_t sz) { + if (sz <= max_size) + _dealloc_functions[allocator_index(sz)](p); + _back_alloc.deallocate(p, sz); + } + +private: + template + static constexpr auto make_alloc_tuple(backing_allocator& back_alloc, std::index_sequence) { + return std::tuple{new detail::allocator(back_alloc)...}; + } + + template + constexpr auto make_alloc_fn_array(std::index_sequence) { + return std::array{std::function{[&allocator = std::get(_allocators)] { return allocator->allocate(); }}...}; + } + + template + constexpr auto make_dealloc_fn_array(std::index_sequence) { + return std::array{std::function{[&allocator = std::get(_allocators)](pointer p) { allocator->deallocate(p); }}...}; + } + + static constexpr size_t allocator_index(size_t sz) { return (sz + _mask) & ~_mask; } +}; + +} // namespace chainbase From 8bfe637c7e32aefa285fdc3b298c3ed7a34fbebb Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 27 Nov 2024 17:25:14 -0500 Subject: [PATCH 02/20] wip --- include/chainbase/pinnable_mapped_file.hpp | 21 +++++-- include/chainbase/shared_cow_string.hpp | 2 +- include/chainbase/shared_cow_vector.hpp | 2 +- include/chainbase/small_size_allocator.hpp | 73 ++++++++++++++++------ src/pinnable_mapped_file.cpp | 5 +- 5 files changed, 76 insertions(+), 27 deletions(-) diff --git a/include/chainbase/pinnable_mapped_file.hpp b/include/chainbase/pinnable_mapped_file.hpp index 3197f77..640a122 100644 --- a/include/chainbase/pinnable_mapped_file.hpp +++ b/include/chainbase/pinnable_mapped_file.hpp @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -45,7 +46,14 @@ class chainbase_error_category : public std::error_category { using segment_manager = bip::managed_mapped_file::segment_manager; template -using allocator = bip::allocator; +using segment_allocator = bip::allocator; + +using byte_segment_allocator = segment_allocator; + +using ss_allocator = small_size_allocator; + +template +using allocator = object_allocator; class pinnable_mapped_file { public: @@ -72,13 +80,15 @@ class pinnable_mapped_file { auto it = _segment_manager_map.upper_bound(object); if(it == _segment_manager_map.begin()) return {}; - auto [seg_start, seg_end] = *(--it); + auto [seg_start, seg_info] = *(--it); // important: we need to check whether the pointer is really within the segment, as shared objects' // can also be created on the stack (in which case the data is actually allocated on the heap using // std::allocator). This happens for example when `shared_cow_string`s are inserted into a bip::multimap, // and temporary pairs are created on the stack by the bip::multimap code. - if (object < seg_end) - return allocator(reinterpret_cast(seg_start)); + if (object < seg_info.seg_end) { + ss_allocator* ss_alloc = seg_info.alloc; + return std::optional>{allocator(ss_alloc)}; + } } return {}; } @@ -114,7 +124,8 @@ class pinnable_mapped_file { static std::vector _instance_tracker; - using segment_manager_map_t = boost::container::flat_map; + struct seg_info_t { void* seg_end; ss_allocator* alloc; }; + using segment_manager_map_t = boost::container::flat_map; static segment_manager_map_t _segment_manager_map; constexpr static unsigned _db_size_multiple_requirement = 1024*1024; //1MB diff --git a/include/chainbase/shared_cow_string.hpp b/include/chainbase/shared_cow_string.hpp index 4dd96dc..32e139f 100644 --- a/include/chainbase/shared_cow_string.hpp +++ b/include/chainbase/shared_cow_string.hpp @@ -26,7 +26,7 @@ namespace chainbase { }; public: - using allocator_type = bip::allocator; + using allocator_type = allocator; using iterator = const char*; using const_iterator = const char*; diff --git a/include/chainbase/shared_cow_vector.hpp b/include/chainbase/shared_cow_vector.hpp index 4af04ff..7de49e7 100644 --- a/include/chainbase/shared_cow_vector.hpp +++ b/include/chainbase/shared_cow_vector.hpp @@ -23,7 +23,7 @@ namespace chainbase { }; public: - using allocator_type = bip::allocator; + using allocator_type = allocator; using iterator = const T*; // const because of copy-on-write using const_iterator = const T*; using value_type = T; diff --git a/include/chainbase/small_size_allocator.hpp b/include/chainbase/small_size_allocator.hpp index 8d3db73..1aeee6b 100644 --- a/include/chainbase/small_size_allocator.hpp +++ b/include/chainbase/small_size_allocator.hpp @@ -16,17 +16,21 @@ namespace detail { template class allocator { +public: backing_allocator& _back_alloc; - allocator(backing_allocator& back_alloc) : - _back_alloc(back_alloc) { + allocator(backing_allocator* back_alloc) : + _back_alloc(*back_alloc) { } -public: using pointer = backing_allocator::pointer; pointer allocate(); + void deallocate(pointer p); + +private: + std::mutex _m; // must be thread-safe }; } // namespace detail @@ -36,6 +40,7 @@ class allocator { // ------------------------------------------------------- // // All pointers used are of type `backing_allocator::pointer` +// allocate/deallocate specify size in bytes. // --------------------------------------------------------------------------------------- template class small_size_allocator { @@ -43,45 +48,47 @@ class small_size_allocator { using pointer = backing_allocator::pointer; private: + template + static constexpr auto make_alloc_tuple(backing_allocator* back_alloc, std::index_sequence) { + return std::tuple{new detail::allocator(back_alloc)...}; + } + + static_assert(sizeof(typename backing_allocator::value_type) == 1, "backing_allocator should be allocating bytes"); + static constexpr size_t _mask = size_increment - 1; - using alloc_tuple_t = decltype(make_slab_helper(1, std::make_index_sequence{})); + using alloc_tuple_t = decltype(make_alloc_tuple(nullptr, std::make_index_sequence{})); using alloc_fn_t = std::function; using dealloc_fn_t = std::function; + backing_allocator _back_alloc; alloc_tuple_t _allocators; std::array _alloc_functions; // using arrays of functions for fast access std::array _dealloc_functions; - backing_allocator& _back_alloc; public: static constexpr size_t max_size = num_allocators * size_increment; - small_size_allocator(backing_allocator& back_alloc) - : _allocators( make_alloc_tuple(back_alloc, std::make_index_sequence{})) + small_size_allocator(backing_allocator back_alloc) + : _back_alloc(std::move(back_alloc)) + , _allocators( make_alloc_tuple(&_back_alloc, std::make_index_sequence{})) , _alloc_functions( make_alloc_fn_array(std::make_index_sequence{})) , _dealloc_functions(make_dealloc_fn_array(std::make_index_sequence{})) - , _back_alloc(back_alloc) {} pointer allocate(std::size_t sz) { - if (sz <= max_size) + if (0 && sz <= max_size) return _alloc_functions[allocator_index(sz)](); - return _back_alloc.allocate(sz); + return _back_alloc->allocate(sz); } - void deallocate(const pointer &p, std::size_t sz) { - if (sz <= max_size) + void deallocate(const pointer& p, std::size_t sz) { + if (0 && sz <= max_size) _dealloc_functions[allocator_index(sz)](p); - _back_alloc.deallocate(p, sz); + _back_alloc->deallocate(p, sz); } private: - template - static constexpr auto make_alloc_tuple(backing_allocator& back_alloc, std::index_sequence) { - return std::tuple{new detail::allocator(back_alloc)...}; - } - template constexpr auto make_alloc_fn_array(std::index_sequence) { return std::array{std::function{[&allocator = std::get(_allocators)] { return allocator->allocate(); }}...}; @@ -89,10 +96,38 @@ class small_size_allocator { template constexpr auto make_dealloc_fn_array(std::index_sequence) { - return std::array{std::function{[&allocator = std::get(_allocators)](pointer p) { allocator->deallocate(p); }}...}; + return std::array{std::function{[&allocator = std::get(_allocators)](const pointer& p) { allocator->deallocate(p); }}...}; } static constexpr size_t allocator_index(size_t sz) { return (sz + _mask) & ~_mask; } }; +// --------------------------------------------------------------------------------------- +// Object allocator +// ---------------- +// +// emulates the API of `bip::allocator` +// --------------------------------------------------------------------------------------- +template +class object_allocator { +public: + using pointer = backing_allocator::pointer; + + object_allocator(backing_allocator* back_alloc) :_back_alloc(back_alloc) { + } + + pointer allocate(std::size_t count) { + return _back_alloc->allocate(count*sizeof(T)); + } + + void deallocate(const pointer& p, std::size_t count) { + return _back_alloc->deallocate(p, count*sizeof(T)); + } + + bool operator==(const object_allocator&) const = default; + +private: + backing_allocator* _back_alloc; // allocates by size in bytes +}; + } // namespace chainbase diff --git a/src/pinnable_mapped_file.cpp b/src/pinnable_mapped_file.cpp index a1f65fb..c1ea338 100644 --- a/src/pinnable_mapped_file.cpp +++ b/src/pinnable_mapped_file.cpp @@ -251,7 +251,10 @@ pinnable_mapped_file::pinnable_mapped_file(const std::filesystem::path& dir, boo } std::byte* start = (std::byte*)_segment_manager; assert(_segment_manager_map.find(start) == _segment_manager_map.end()); - _segment_manager_map[start] = start + _segment_manager->get_size(); + + byte_segment_allocator byte_allocator(_segment_manager); + _segment_manager_map[start] = seg_info_t{start + _segment_manager->get_size(), + new ss_allocator(byte_allocator)}; } void pinnable_mapped_file::setup_copy_on_write_mapping() { From cb336e0ba3a58275b085d4f14e446e9d0e14fd81 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 2 Dec 2024 17:08:11 -0500 Subject: [PATCH 03/20] wip --- include/chainbase/pinnable_mapped_file.hpp | 20 ++++++++----- include/chainbase/small_size_allocator.hpp | 34 ++++++++++++--------- src/pinnable_mapped_file.cpp | 3 +- test/test.cpp | 35 +++++++++++++--------- 4 files changed, 55 insertions(+), 37 deletions(-) diff --git a/include/chainbase/pinnable_mapped_file.hpp b/include/chainbase/pinnable_mapped_file.hpp index 640a122..18552f3 100644 --- a/include/chainbase/pinnable_mapped_file.hpp +++ b/include/chainbase/pinnable_mapped_file.hpp @@ -46,14 +46,20 @@ class chainbase_error_category : public std::error_category { using segment_manager = bip::managed_mapped_file::segment_manager; template -using segment_allocator = bip::allocator; +using segment_allocator_t = bip::allocator; -using byte_segment_allocator = segment_allocator; +using byte_segment_allocator_t = segment_allocator_t; -using ss_allocator = small_size_allocator; +using ss_allocator_t = small_size_allocator; template -using allocator = object_allocator; +using allocator = object_allocator; + +template +auto make_small_size_allocator(segment_manager* seg_mgr) { + byte_segment_allocator_t byte_allocator(seg_mgr); + return std::make_unique(byte_allocator); +} class pinnable_mapped_file { public: @@ -80,13 +86,13 @@ class pinnable_mapped_file { auto it = _segment_manager_map.upper_bound(object); if(it == _segment_manager_map.begin()) return {}; - auto [seg_start, seg_info] = *(--it); + auto& [seg_start, seg_info] = *(--it); // important: we need to check whether the pointer is really within the segment, as shared objects' // can also be created on the stack (in which case the data is actually allocated on the heap using // std::allocator). This happens for example when `shared_cow_string`s are inserted into a bip::multimap, // and temporary pairs are created on the stack by the bip::multimap code. if (object < seg_info.seg_end) { - ss_allocator* ss_alloc = seg_info.alloc; + ss_allocator_t* ss_alloc = seg_info.alloc.get(); return std::optional>{allocator(ss_alloc)}; } } @@ -124,7 +130,7 @@ class pinnable_mapped_file { static std::vector _instance_tracker; - struct seg_info_t { void* seg_end; ss_allocator* alloc; }; + struct seg_info_t { void* seg_end; std::unique_ptr alloc; }; using segment_manager_map_t = boost::container::flat_map; static segment_manager_map_t _segment_manager_map; diff --git a/include/chainbase/small_size_allocator.hpp b/include/chainbase/small_size_allocator.hpp index 1aeee6b..6683e22 100644 --- a/include/chainbase/small_size_allocator.hpp +++ b/include/chainbase/small_size_allocator.hpp @@ -27,7 +27,7 @@ class allocator { pointer allocate(); - void deallocate(pointer p); + void deallocate(const pointer& p); private: std::mutex _m; // must be thread-safe @@ -35,6 +35,7 @@ class allocator { } // namespace detail + // --------------------------------------------------------------------------------------- // An array of 64 allocators for sizes from 8 to 512 bytes // ------------------------------------------------------- @@ -61,7 +62,10 @@ class small_size_allocator { using alloc_fn_t = std::function; using dealloc_fn_t = std::function; + // we store a constructed `bip::allocator` in _back_alloc + // all `bip::allocator` constructed from the same `segment_manager` are equivalent backing_allocator _back_alloc; + alloc_tuple_t _allocators; std::array _alloc_functions; // using arrays of functions for fast access std::array _dealloc_functions; @@ -76,16 +80,16 @@ class small_size_allocator { , _dealloc_functions(make_dealloc_fn_array(std::make_index_sequence{})) {} - pointer allocate(std::size_t sz) { - if (0 && sz <= max_size) - return _alloc_functions[allocator_index(sz)](); - return _back_alloc->allocate(sz); + pointer allocate(std::size_t sz_in_bytes) { + if (0 && sz_in_bytes <= max_size) + return _alloc_functions[allocator_index(sz_in_bytes)](); + return _back_alloc.allocate(sz_in_bytes); } - void deallocate(const pointer& p, std::size_t sz) { - if (0 && sz <= max_size) - _dealloc_functions[allocator_index(sz)](p); - _back_alloc->deallocate(p, sz); + void deallocate(const pointer& p, std::size_t sz_in_bytes) { + if (0 && sz_in_bytes <= max_size) + _dealloc_functions[allocator_index(sz_in_bytes)](p); + _back_alloc.deallocate(p, sz_in_bytes); } private: @@ -99,9 +103,10 @@ class small_size_allocator { return std::array{std::function{[&allocator = std::get(_allocators)](const pointer& p) { allocator->deallocate(p); }}...}; } - static constexpr size_t allocator_index(size_t sz) { return (sz + _mask) & ~_mask; } + static constexpr size_t allocator_index(size_t sz_in_bytes) { return (sz_in_bytes + _mask) & ~_mask; } }; + // --------------------------------------------------------------------------------------- // Object allocator // ---------------- @@ -112,16 +117,17 @@ template class object_allocator { public: using pointer = backing_allocator::pointer; + using value_type = T; object_allocator(backing_allocator* back_alloc) :_back_alloc(back_alloc) { } - pointer allocate(std::size_t count) { - return _back_alloc->allocate(count*sizeof(T)); + pointer allocate(std::size_t num_objects) { + return _back_alloc->allocate(num_objects * sizeof(T)); } - void deallocate(const pointer& p, std::size_t count) { - return _back_alloc->deallocate(p, count*sizeof(T)); + void deallocate(const pointer& p, std::size_t num_objects) { + return _back_alloc->deallocate(p, num_objects * sizeof(T)); } bool operator==(const object_allocator&) const = default; diff --git a/src/pinnable_mapped_file.cpp b/src/pinnable_mapped_file.cpp index c1ea338..b0e7c39 100644 --- a/src/pinnable_mapped_file.cpp +++ b/src/pinnable_mapped_file.cpp @@ -252,9 +252,8 @@ pinnable_mapped_file::pinnable_mapped_file(const std::filesystem::path& dir, boo std::byte* start = (std::byte*)_segment_manager; assert(_segment_manager_map.find(start) == _segment_manager_map.end()); - byte_segment_allocator byte_allocator(_segment_manager); _segment_manager_map[start] = seg_info_t{start + _segment_manager->get_size(), - new ss_allocator(byte_allocator)}; + make_small_size_allocator(_segment_manager)}; } void pinnable_mapped_file::setup_copy_on_write_mapping() { diff --git a/test/test.cpp b/test/test.cpp index ba9f576..b2ef9ff 100644 --- a/test/test.cpp +++ b/test/test.cpp @@ -176,8 +176,6 @@ void check_shared_vector_apis(VecOfVec& vec_of_vec, const Alloc& expected_alloc) // check that objects are allocated where we expect (i.e. using the same allocator as `vec_of_vec`) // ------------------------------------------------------------------------------------------------ BOOST_REQUIRE(v.get_allocator() == expected_alloc); - if constexpr(!std::is_same_v) - BOOST_REQUIRE(v[0].get_allocator() == expected_alloc); } { @@ -453,32 +451,41 @@ BOOST_AUTO_TEST_CASE(shared_vector_apis_segment_alloc) { const auto& temp = temp_dir.path(); pinnable_mapped_file pmf(temp, true, 1024 * 1024, false, pinnable_mapped_file::map_mode::mapped); - std::optional> expected_alloc = chainbase::allocator(pmf.get_segment_manager()); + + //auto expected_alloc = chainbase::allocator(pmf.get_segment_manager()); size_t free_memory = pmf.get_segment_manager()->get_free_memory(); + auto ss_alloc = chainbase::make_small_size_allocator(pmf.get_segment_manager()); { // do the test with `shared_vector` (trivial destructor) // ---------------------------------------------------------- - using sv = shared_vector; - chainbase::allocator sv_alloc(pmf.get_segment_manager()); - sv v; + using sv_t = shared_vector; + using expected_alloc_t = sv_t::allocator_type; - bip::vector> vec_of_vec(sv_alloc); + using vec_of_vec_alloc_t = chainbase::object_allocator; + using vec_of_vec_t = bip::vector; + + auto sv_alloc(expected_alloc_t{ss_alloc.get()}); + vec_of_vec_t vec_of_vec(vec_of_vec_alloc_t{ss_alloc.get()}); - check_shared_vector_apis(vec_of_vec, expected_alloc); + check_shared_vector_apis(vec_of_vec, std::optional{sv_alloc}); } { // do the test with `shared_vector` (non-trivial destructor) // -------------------------------------------------------------------- - using sv = shared_vector; - chainbase::allocator sv_alloc(pmf.get_segment_manager()); - sv v; - - bip::vector> vec_of_vec(sv_alloc); + using sv_t = shared_vector; + using expected_alloc_t = sv_t::allocator_type; + + using vec_of_vec_alloc_t = chainbase::object_allocator; + using vec_of_vec_t = bip::vector; + + auto sv_alloc(expected_alloc_t{ss_alloc.get()}); + vec_of_vec_t vec_of_vec(vec_of_vec_alloc_t{ss_alloc.get()}); + sv_t v; - check_shared_vector_apis(vec_of_vec, expected_alloc); + check_shared_vector_apis(vec_of_vec, std::optional{sv_alloc}); // clear both vectors. If our implementation of `shared_cow_vector` is correct, we should have an exact // match of the number of constructed and destroyed `my_string` objects, and therefore after clearing the vectors From 9d73d4881385453f838e6863a4daaa5eac70bd80 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 3 Dec 2024 13:13:06 -0500 Subject: [PATCH 04/20] wip --- include/chainbase/pinnable_mapped_file.hpp | 7 ++++ include/chainbase/small_size_allocator.hpp | 41 +++++++++++++++------- 2 files changed, 36 insertions(+), 12 deletions(-) diff --git a/include/chainbase/pinnable_mapped_file.hpp b/include/chainbase/pinnable_mapped_file.hpp index 18552f3..8ee7bd4 100644 --- a/include/chainbase/pinnable_mapped_file.hpp +++ b/include/chainbase/pinnable_mapped_file.hpp @@ -138,6 +138,13 @@ class pinnable_mapped_file { constexpr static size_t _db_size_copy_increment = 1024*1024*1024; //1GB }; +// pointer can be to the segment manager, or any object contained within. +// --------------------------------------------------------------------- +template +auto make_allocator(void* seg_mgr) { + return *pinnable_mapped_file::get_allocator(seg_mgr); +} + std::istream& operator>>(std::istream& in, pinnable_mapped_file::map_mode& runtime); std::ostream& operator<<(std::ostream& osm, pinnable_mapped_file::map_mode m); diff --git a/include/chainbase/small_size_allocator.hpp b/include/chainbase/small_size_allocator.hpp index 6683e22..d152dbf 100644 --- a/include/chainbase/small_size_allocator.hpp +++ b/include/chainbase/small_size_allocator.hpp @@ -9,28 +9,42 @@ #include #include #include +#include + +namespace bip = boost::interprocess; namespace chainbase { namespace detail { +template +class allocator_base { + using pointer = backing_allocator::pointer; + virtual pointer allocate() = 0; + virtual void deallocate(const pointer& p) = 0; +}; + template -class allocator { +class allocator : public allocator_base { public: - backing_allocator& _back_alloc; - allocator(backing_allocator* back_alloc) : _back_alloc(*back_alloc) { } using pointer = backing_allocator::pointer; - pointer allocate(); + pointer allocate() final { + std::lock_guard g(_m); + return pointer(nullptr); + } - void deallocate(const pointer& p); + void deallocate(const pointer& p) final { + std::lock_guard g(_m); + } private: - std::mutex _m; // must be thread-safe + bip::offset_ptr _back_alloc; + std::mutex _m; }; } // namespace detail @@ -51,6 +65,8 @@ class small_size_allocator { private: template static constexpr auto make_alloc_tuple(backing_allocator* back_alloc, std::index_sequence) { + // todo: should be tuple of `bip::offset_ptr` + // should be allocated from `backing_allocator` return std::tuple{new detail::allocator(back_alloc)...}; } @@ -116,24 +132,25 @@ class small_size_allocator { template class object_allocator { public: - using pointer = backing_allocator::pointer; - using value_type = T; - + using char_pointer = backing_allocator::pointer; + using pointer = char_pointer::template rebind; + using value_type = T; + object_allocator(backing_allocator* back_alloc) :_back_alloc(back_alloc) { } pointer allocate(std::size_t num_objects) { - return _back_alloc->allocate(num_objects * sizeof(T)); + return pointer(static_cast(static_cast(_back_alloc->allocate(num_objects * sizeof(T)).get()))); } void deallocate(const pointer& p, std::size_t num_objects) { - return _back_alloc->deallocate(p, num_objects * sizeof(T)); + return _back_alloc->deallocate(char_pointer(static_cast(static_cast(p.get()))), num_objects * sizeof(T)); } bool operator==(const object_allocator&) const = default; private: - backing_allocator* _back_alloc; // allocates by size in bytes + backing_allocator* _back_alloc; // allocates by size in bytes // todo: should be `offset_ptr` }; } // namespace chainbase From 9614c9de2fe281af2188c7459b994ca110d5d1f9 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 4 Dec 2024 09:34:12 -0500 Subject: [PATCH 05/20] All tests pass --- include/chainbase/pinnable_mapped_file.hpp | 8 +-- include/chainbase/small_size_allocator.hpp | 67 +++++++--------------- src/pinnable_mapped_file.cpp | 6 +- test/test.cpp | 34 +++++------ 4 files changed, 45 insertions(+), 70 deletions(-) diff --git a/include/chainbase/pinnable_mapped_file.hpp b/include/chainbase/pinnable_mapped_file.hpp index 8ee7bd4..696a398 100644 --- a/include/chainbase/pinnable_mapped_file.hpp +++ b/include/chainbase/pinnable_mapped_file.hpp @@ -58,7 +58,7 @@ using allocator = object_allocator; template auto make_small_size_allocator(segment_manager* seg_mgr) { byte_segment_allocator_t byte_allocator(seg_mgr); - return std::make_unique(byte_allocator); + return new (seg_mgr->allocate(sizeof(ss_allocator_t))) ss_allocator_t(byte_allocator); } class pinnable_mapped_file { @@ -92,8 +92,8 @@ class pinnable_mapped_file { // std::allocator). This happens for example when `shared_cow_string`s are inserted into a bip::multimap, // and temporary pairs are created on the stack by the bip::multimap code. if (object < seg_info.seg_end) { - ss_allocator_t* ss_alloc = seg_info.alloc.get(); - return std::optional>{allocator(ss_alloc)}; + assert(seg_info.ss_alloc); + return std::optional>{allocator(seg_info.ss_alloc)}; } } return {}; @@ -130,7 +130,7 @@ class pinnable_mapped_file { static std::vector _instance_tracker; - struct seg_info_t { void* seg_end; std::unique_ptr alloc; }; + struct seg_info_t { void* seg_end; ss_allocator_t* ss_alloc; }; using segment_manager_map_t = boost::container::flat_map; static segment_manager_map_t _segment_manager_map; diff --git a/include/chainbase/small_size_allocator.hpp b/include/chainbase/small_size_allocator.hpp index d152dbf..3774167 100644 --- a/include/chainbase/small_size_allocator.hpp +++ b/include/chainbase/small_size_allocator.hpp @@ -19,6 +19,7 @@ namespace detail { template class allocator_base { +public: using pointer = backing_allocator::pointer; virtual pointer allocate() = 0; virtual void deallocate(const pointer& p) = 0; @@ -27,8 +28,8 @@ class allocator_base { template class allocator : public allocator_base { public: - allocator(backing_allocator* back_alloc) : - _back_alloc(*back_alloc) { + allocator(backing_allocator back_alloc) : + _back_alloc(back_alloc) { } using pointer = backing_allocator::pointer; @@ -43,7 +44,7 @@ class allocator : public allocator_base { } private: - bip::offset_ptr _back_alloc; + backing_allocator _back_alloc; std::mutex _m; }; @@ -57,69 +58,45 @@ class allocator : public allocator_base { // All pointers used are of type `backing_allocator::pointer` // allocate/deallocate specify size in bytes. // --------------------------------------------------------------------------------------- -template +template class small_size_allocator { public: using pointer = backing_allocator::pointer; private: - template - static constexpr auto make_alloc_tuple(backing_allocator* back_alloc, std::index_sequence) { - // todo: should be tuple of `bip::offset_ptr` - // should be allocated from `backing_allocator` - return std::tuple{new detail::allocator(back_alloc)...}; - } + using base_alloc_ptr = bip::offset_ptr>; - static_assert(sizeof(typename backing_allocator::value_type) == 1, "backing_allocator should be allocating bytes"); - - static constexpr size_t _mask = size_increment - 1; - - using alloc_tuple_t = decltype(make_alloc_tuple(nullptr, std::make_index_sequence{})); - using alloc_fn_t = std::function; - using dealloc_fn_t = std::function; + backing_allocator _back_alloc; + std::array _allocators; - // we store a constructed `bip::allocator` in _back_alloc - // all `bip::allocator` constructed from the same `segment_manager` are equivalent - backing_allocator _back_alloc; + static constexpr size_t mask = size_increment - 1; + static constexpr size_t max_size = num_allocators * size_increment; - alloc_tuple_t _allocators; - std::array _alloc_functions; // using arrays of functions for fast access - std::array _dealloc_functions; + static constexpr size_t allocator_index(size_t sz_in_bytes) { return (sz_in_bytes + mask) & ~mask; } -public: - static constexpr size_t max_size = num_allocators * size_increment; + template + auto make_allocators(backing_allocator back_alloc, std::index_sequence) { + return std::array{ + new (&*_back_alloc.allocate(sizeof(detail::allocator))) + detail::allocator(back_alloc)...}; + } +public: small_size_allocator(backing_allocator back_alloc) : _back_alloc(std::move(back_alloc)) - , _allocators( make_alloc_tuple(&_back_alloc, std::make_index_sequence{})) - , _alloc_functions( make_alloc_fn_array(std::make_index_sequence{})) - , _dealloc_functions(make_dealloc_fn_array(std::make_index_sequence{})) - {} + , _allocators(make_allocators(back_alloc, std::make_index_sequence{})) {} pointer allocate(std::size_t sz_in_bytes) { if (0 && sz_in_bytes <= max_size) - return _alloc_functions[allocator_index(sz_in_bytes)](); + return _allocators[allocator_index(sz_in_bytes)]->allocate(); return _back_alloc.allocate(sz_in_bytes); } void deallocate(const pointer& p, std::size_t sz_in_bytes) { if (0 && sz_in_bytes <= max_size) - _dealloc_functions[allocator_index(sz_in_bytes)](p); + _allocators[allocator_index(sz_in_bytes)]->deallocate(p); _back_alloc.deallocate(p, sz_in_bytes); } - -private: - template - constexpr auto make_alloc_fn_array(std::index_sequence) { - return std::array{std::function{[&allocator = std::get(_allocators)] { return allocator->allocate(); }}...}; - } - - template - constexpr auto make_dealloc_fn_array(std::index_sequence) { - return std::array{std::function{[&allocator = std::get(_allocators)](const pointer& p) { allocator->deallocate(p); }}...}; - } - - static constexpr size_t allocator_index(size_t sz_in_bytes) { return (sz_in_bytes + _mask) & ~_mask; } }; @@ -150,7 +127,7 @@ class object_allocator { bool operator==(const object_allocator&) const = default; private: - backing_allocator* _back_alloc; // allocates by size in bytes // todo: should be `offset_ptr` + bip::offset_ptr _back_alloc; // allocates by size in bytes }; } // namespace chainbase diff --git a/src/pinnable_mapped_file.cpp b/src/pinnable_mapped_file.cpp index b0e7c39..e9aecf6 100644 --- a/src/pinnable_mapped_file.cpp +++ b/src/pinnable_mapped_file.cpp @@ -252,8 +252,10 @@ pinnable_mapped_file::pinnable_mapped_file(const std::filesystem::path& dir, boo std::byte* start = (std::byte*)_segment_manager; assert(_segment_manager_map.find(start) == _segment_manager_map.end()); - _segment_manager_map[start] = seg_info_t{start + _segment_manager->get_size(), - make_small_size_allocator(_segment_manager)}; + _segment_manager_map[start] = + seg_info_t{start + _segment_manager->get_size(), + _writable + ? make_small_size_allocator(_segment_manager) : nullptr }; } void pinnable_mapped_file::setup_copy_on_write_mapping() { diff --git a/test/test.cpp b/test/test.cpp index b2ef9ff..a649fe1 100644 --- a/test/test.cpp +++ b/test/test.cpp @@ -175,7 +175,8 @@ void check_shared_vector_apis(VecOfVec& vec_of_vec, const Alloc& expected_alloc) // check that objects are allocated where we expect (i.e. using the same allocator as `vec_of_vec`) // ------------------------------------------------------------------------------------------------ - BOOST_REQUIRE(v.get_allocator() == expected_alloc); + auto alloc = v.get_allocator(); + BOOST_REQUIRE(!alloc || *alloc == expected_alloc); } { @@ -193,6 +194,7 @@ void check_shared_vector_apis(VecOfVec& vec_of_vec, const Alloc& expected_alloc) // check copy constructor. Verify copy-on-write after assign // --------------------------------------------------------- vec_of_vec.clear(); + vec_of_vec.reserve(2); // so the second emplace_back doesn't reallocate vec_of_vec.emplace_back(int_array.cbegin(), int_array.cend()); vec_of_vec.emplace_back(vec_of_vec[0]); auto& v0 = vec_of_vec[0]; @@ -210,6 +212,7 @@ void check_shared_vector_apis(VecOfVec& vec_of_vec, const Alloc& expected_alloc) // check move constructor. // ----------------------- vec_of_vec.clear(); + vec_of_vec.reserve(2); // so the second emplace_back doesn't reallocate vec_of_vec.emplace_back(int_array.cbegin(), int_array.cend()); auto& v0 = vec_of_vec[0]; vec_of_vec.emplace_back(std::move(v0)); @@ -451,41 +454,34 @@ BOOST_AUTO_TEST_CASE(shared_vector_apis_segment_alloc) { const auto& temp = temp_dir.path(); pinnable_mapped_file pmf(temp, true, 1024 * 1024, false, pinnable_mapped_file::map_mode::mapped); + auto seg_mgr = pmf.get_segment_manager(); - //auto expected_alloc = chainbase::allocator(pmf.get_segment_manager()); - - size_t free_memory = pmf.get_segment_manager()->get_free_memory(); - auto ss_alloc = chainbase::make_small_size_allocator(pmf.get_segment_manager()); + size_t free_memory = seg_mgr->get_free_memory(); { // do the test with `shared_vector` (trivial destructor) // ---------------------------------------------------------- using sv_t = shared_vector; - using expected_alloc_t = sv_t::allocator_type; - - using vec_of_vec_alloc_t = chainbase::object_allocator; - using vec_of_vec_t = bip::vector; + auto sv_alloc = chainbase::make_allocator(seg_mgr); - auto sv_alloc(expected_alloc_t{ss_alloc.get()}); - vec_of_vec_t vec_of_vec(vec_of_vec_alloc_t{ss_alloc.get()}); + using vec_of_vec_t = bip::vector; + vec_of_vec_t vec_of_vec(sv_alloc); - check_shared_vector_apis(vec_of_vec, std::optional{sv_alloc}); + check_shared_vector_apis(vec_of_vec, chainbase::make_allocator(seg_mgr)); } { // do the test with `shared_vector` (non-trivial destructor) // -------------------------------------------------------------------- using sv_t = shared_vector; - using expected_alloc_t = sv_t::allocator_type; + auto sv_alloc = chainbase::make_allocator(seg_mgr); - using vec_of_vec_alloc_t = chainbase::object_allocator; - using vec_of_vec_t = bip::vector; - - auto sv_alloc(expected_alloc_t{ss_alloc.get()}); - vec_of_vec_t vec_of_vec(vec_of_vec_alloc_t{ss_alloc.get()}); + using vec_of_vec_t = bip::vector; + vec_of_vec_t vec_of_vec(sv_alloc); + sv_t v; - check_shared_vector_apis(vec_of_vec, std::optional{sv_alloc}); + check_shared_vector_apis(vec_of_vec, chainbase::make_allocator(seg_mgr)); // clear both vectors. If our implementation of `shared_cow_vector` is correct, we should have an exact // match of the number of constructed and destroyed `my_string` objects, and therefore after clearing the vectors From 7a72fdef07f33220ad4ac51d29a6ab5639cce666 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 5 Dec 2024 10:04:00 -0500 Subject: [PATCH 06/20] Implement the `detail::allocator` and fix a couple issues. --- include/chainbase/small_size_allocator.hpp | 70 ++++++++++++++++++---- 1 file changed, 57 insertions(+), 13 deletions(-) diff --git a/include/chainbase/small_size_allocator.hpp b/include/chainbase/small_size_allocator.hpp index 3774167..28b9a06 100644 --- a/include/chainbase/small_size_allocator.hpp +++ b/include/chainbase/small_size_allocator.hpp @@ -21,31 +21,68 @@ template class allocator_base { public: using pointer = backing_allocator::pointer; - virtual pointer allocate() = 0; - virtual void deallocate(const pointer& p) = 0; + + virtual pointer allocate() = 0; + virtual void deallocate(const pointer& p) = 0; + virtual size_t freelist_memory_usage() = 0; }; template class allocator : public allocator_base { public: + using T = std::array; + using pointer = backing_allocator::pointer; + allocator(backing_allocator back_alloc) : _back_alloc(back_alloc) { } - using pointer = backing_allocator::pointer; - pointer allocate() final { std::lock_guard g(_m); - return pointer(nullptr); + if (_freelist == nullptr) { + get_some(); + } + list_item* result = &*_freelist; + _freelist = _freelist->_next; + result->~list_item(); + --_freelist_size; + return pointer{(typename backing_allocator::value_type*)result}; } - + void deallocate(const pointer& p) final { std::lock_guard g(_m); + _freelist = new (&*p) list_item{_freelist}; + ++_freelist_size; + } + + size_t freelist_memory_usage() final { + std::lock_guard g(_m); + return _freelist_size * sizeof(T); } private: - backing_allocator _back_alloc; - std::mutex _m; + struct list_item { bip::offset_ptr _next; }; + static constexpr size_t allocation_batch_size = 128; + + void get_some() { + static_assert(sizeof(T) >= sizeof(list_item), "Too small for free list"); + static_assert(sizeof(T) % alignof(list_item) == 0, "Bad alignment for free list"); + + char* result = (char*)&*_back_alloc.allocate(sizeof(T) * allocation_batch_size); + _freelist_size += allocation_batch_size; + _freelist = bip::offset_ptr{(list_item*)result}; + for (unsigned i = 0; i < allocation_batch_size - 1; ++i) { + char* next = result + sizeof(T); + new (result) list_item{bip::offset_ptr{(list_item*)next}}; + result = next; + } + new (result) list_item{nullptr}; + } + + backing_allocator _back_alloc; + bip::offset_ptr _freelist; + size_t _freelist_size = 0; + std::mutex _m; }; } // namespace detail @@ -59,6 +96,7 @@ class allocator : public allocator_base { // allocate/deallocate specify size in bytes. // --------------------------------------------------------------------------------------- template +requires ((size_increment & (size_increment - 1)) == 0) // power of two class small_size_allocator { public: using pointer = backing_allocator::pointer; @@ -69,10 +107,12 @@ class small_size_allocator { backing_allocator _back_alloc; std::array _allocators; - static constexpr size_t mask = size_increment - 1; static constexpr size_t max_size = num_allocators * size_increment; - static constexpr size_t allocator_index(size_t sz_in_bytes) { return (sz_in_bytes + mask) & ~mask; } + static constexpr size_t allocator_index(size_t sz_in_bytes) { + assert(sz_in_bytes > 0); + return (sz_in_bytes -1) / size_increment; + } template auto make_allocators(backing_allocator back_alloc, std::index_sequence) { @@ -87,15 +127,16 @@ class small_size_allocator { , _allocators(make_allocators(back_alloc, std::make_index_sequence{})) {} pointer allocate(std::size_t sz_in_bytes) { - if (0 && sz_in_bytes <= max_size) + if (sz_in_bytes <= max_size) return _allocators[allocator_index(sz_in_bytes)]->allocate(); return _back_alloc.allocate(sz_in_bytes); } void deallocate(const pointer& p, std::size_t sz_in_bytes) { - if (0 && sz_in_bytes <= max_size) + if (sz_in_bytes <= max_size) _allocators[allocator_index(sz_in_bytes)]->deallocate(p); - _back_alloc.deallocate(p, sz_in_bytes); + else + _back_alloc.deallocate(p, sz_in_bytes); } }; @@ -105,6 +146,8 @@ class small_size_allocator { // ---------------- // // emulates the API of `bip::allocator` +// +// backing_allocator is `the small_size_allocator` // --------------------------------------------------------------------------------------- template class object_allocator { @@ -121,6 +164,7 @@ class object_allocator { } void deallocate(const pointer& p, std::size_t num_objects) { + assert(p != nullptr); return _back_alloc->deallocate(char_pointer(static_cast(static_cast(p.get()))), num_objects * sizeof(T)); } From 5bedb5ceff2b398fc556b507b8d2c328b3fbd526 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 6 Dec 2024 17:32:59 -0500 Subject: [PATCH 07/20] Fix storage of `small_size_allocator` and test. --- include/chainbase/environment.hpp | 15 ++++++---- include/chainbase/pinnable_mapped_file.hpp | 7 +++-- include/chainbase/small_size_allocator.hpp | 34 ++++++++++++++++++---- src/pinnable_mapped_file.cpp | 16 +++++++--- test/test.cpp | 4 ++- 5 files changed, 58 insertions(+), 18 deletions(-) diff --git a/include/chainbase/environment.hpp b/include/chainbase/environment.hpp index 322f9be..cc116ae 100644 --- a/include/chainbase/environment.hpp +++ b/include/chainbase/environment.hpp @@ -5,9 +5,13 @@ namespace chainbase { constexpr size_t header_size = 1024; + // `CHAINB01` reflects changes since `EOSIODB3`. +// `CHAINB02` adds the small size allocator // Spring 1.0 is compatible with `CHAINB01`. -constexpr uint64_t header_id = 0x3130424e49414843ULL; //"CHAINB01" little endian +// Spring 2.0 is compatible with `CHAINB02`. +// --------------------------------------------- +constexpr uint64_t header_id = 0x3230424e49414843ULL; //"CHAINB02" little endian struct environment { environment() { @@ -67,10 +71,11 @@ struct environment { } __attribute__ ((packed)); struct db_header { - uint64_t id = header_id; - bool dirty = false; - environment dbenviron; -} __attribute__ ((packed)); + uint64_t id = header_id; + bool dirty = false; + bip::offset_ptr small_size_allocator; + environment dbenviron; +}; constexpr size_t header_dirty_bit_offset = offsetof(db_header, dirty); diff --git a/include/chainbase/pinnable_mapped_file.hpp b/include/chainbase/pinnable_mapped_file.hpp index 696a398..4b99ef0 100644 --- a/include/chainbase/pinnable_mapped_file.hpp +++ b/include/chainbase/pinnable_mapped_file.hpp @@ -80,6 +80,8 @@ class pinnable_mapped_file { segment_manager* get_segment_manager() const { return _segment_manager;} size_t check_memory_and_flush_if_needed(); + static ss_allocator_t* get_small_size_allocator(std::byte* seg_mgr); + template static std::optional> get_allocator(void *object) { if (!_segment_manager_map.empty()) { @@ -92,8 +94,7 @@ class pinnable_mapped_file { // std::allocator). This happens for example when `shared_cow_string`s are inserted into a bip::multimap, // and temporary pairs are created on the stack by the bip::multimap code. if (object < seg_info.seg_end) { - assert(seg_info.ss_alloc); - return std::optional>{allocator(seg_info.ss_alloc)}; + return std::optional>{allocator(get_small_size_allocator(static_cast(seg_start)))}; } } return {}; @@ -130,7 +131,7 @@ class pinnable_mapped_file { static std::vector _instance_tracker; - struct seg_info_t { void* seg_end; ss_allocator_t* ss_alloc; }; + struct seg_info_t { void* seg_end; }; using segment_manager_map_t = boost::container::flat_map; static segment_manager_map_t _segment_manager_map; diff --git a/include/chainbase/small_size_allocator.hpp b/include/chainbase/small_size_allocator.hpp index 28b9a06..9f611c9 100644 --- a/include/chainbase/small_size_allocator.hpp +++ b/include/chainbase/small_size_allocator.hpp @@ -25,6 +25,7 @@ class allocator_base { virtual pointer allocate() = 0; virtual void deallocate(const pointer& p) = 0; virtual size_t freelist_memory_usage() = 0; + virtual size_t memory_overhead() = 0; }; template @@ -60,6 +61,11 @@ class allocator : public allocator_base { return _freelist_size * sizeof(T); } + size_t memory_overhead() final { + std::lock_guard g(_m); + return _memory_overhead; + } + private: struct list_item { bip::offset_ptr _next; }; static constexpr size_t allocation_batch_size = 128; @@ -70,6 +76,7 @@ class allocator : public allocator_base { char* result = (char*)&*_back_alloc.allocate(sizeof(T) * allocation_batch_size); _freelist_size += allocation_batch_size; + _memory_overhead += 16; _freelist = bip::offset_ptr{(list_item*)result}; for (unsigned i = 0; i < allocation_batch_size - 1; ++i) { char* next = result + sizeof(T); @@ -82,6 +89,7 @@ class allocator : public allocator_base { backing_allocator _back_alloc; bip::offset_ptr _freelist; size_t _freelist_size = 0; + size_t _memory_overhead = 0; // overhead from boost segment allocator (16 bytes per block) std::mutex _m; }; @@ -108,10 +116,10 @@ class small_size_allocator { std::array _allocators; static constexpr size_t max_size = num_allocators * size_increment; - + static constexpr size_t allocator_index(size_t sz_in_bytes) { assert(sz_in_bytes > 0); - return (sz_in_bytes -1) / size_increment; + return (sz_in_bytes - 1) / size_increment; } template @@ -127,17 +135,33 @@ class small_size_allocator { , _allocators(make_allocators(back_alloc, std::make_index_sequence{})) {} pointer allocate(std::size_t sz_in_bytes) { - if (sz_in_bytes <= max_size) + if (sz_in_bytes <= max_size) { return _allocators[allocator_index(sz_in_bytes)]->allocate(); + } return _back_alloc.allocate(sz_in_bytes); } void deallocate(const pointer& p, std::size_t sz_in_bytes) { - if (sz_in_bytes <= max_size) + if (sz_in_bytes <= max_size) { _allocators[allocator_index(sz_in_bytes)]->deallocate(p); - else + } else _back_alloc.deallocate(p, sz_in_bytes); } + + size_t freelist_memory_usage() const { + size_t sz = 0; + for (auto& alloc : _allocators) + sz += alloc->freelist_memory_usage(); + return sz; + } + + size_t memory_overhead() const { + size_t sz = 0; + for (auto& alloc : _allocators) + sz += alloc->memory_overhead(); + return sz; + } + }; diff --git a/src/pinnable_mapped_file.cpp b/src/pinnable_mapped_file.cpp index e9aecf6..b61b9e5 100644 --- a/src/pinnable_mapped_file.cpp +++ b/src/pinnable_mapped_file.cpp @@ -252,10 +252,18 @@ pinnable_mapped_file::pinnable_mapped_file(const std::filesystem::path& dir, boo std::byte* start = (std::byte*)_segment_manager; assert(_segment_manager_map.find(start) == _segment_manager_map.end()); - _segment_manager_map[start] = - seg_info_t{start + _segment_manager->get_size(), - _writable - ? make_small_size_allocator(_segment_manager) : nullptr }; + ss_allocator_t* ss_alloc = get_small_size_allocator(start); // relies on `_segment_manager` being initialized + if (!ss_alloc && _writable) { + db_header* header = reinterpret_cast(start - header_size); + header->small_size_allocator = (char *)make_small_size_allocator(_segment_manager); + } + + _segment_manager_map[start] = seg_info_t{start + _segment_manager->get_size()}; +} + +ss_allocator_t* pinnable_mapped_file::get_small_size_allocator(std::byte* seg_mgr) { + db_header* header = reinterpret_cast(seg_mgr - header_size); + return (ss_allocator_t*)&*header->small_size_allocator; } void pinnable_mapped_file::setup_copy_on_write_mapping() { diff --git a/test/test.cpp b/test/test.cpp index a649fe1..d53fb9f 100644 --- a/test/test.cpp +++ b/test/test.cpp @@ -494,7 +494,9 @@ BOOST_AUTO_TEST_CASE(shared_vector_apis_segment_alloc) { // make sure we didn't leak memory // ------------------------------- - BOOST_REQUIRE_EQUAL(free_memory, pmf.get_segment_manager()->get_free_memory()); + auto ss_alloc = pinnable_mapped_file::get_small_size_allocator((std::byte*)pmf.get_segment_manager()); + BOOST_REQUIRE_EQUAL(free_memory, pmf.get_segment_manager()->get_free_memory() + ss_alloc->freelist_memory_usage() + + ss_alloc->memory_overhead()); } // ----------------------------------------------------------------------------- From 1c5d37adbb02e7371b2f841dedf64f433ae96f76 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 10 Dec 2024 15:48:50 -0500 Subject: [PATCH 08/20] Update `chainbase_node_allocator` to be backed with the `small_size_allocator` --- .../chainbase/chainbase_node_allocator.hpp | 42 ++++++++++++++----- 1 file changed, 31 insertions(+), 11 deletions(-) diff --git a/include/chainbase/chainbase_node_allocator.hpp b/include/chainbase/chainbase_node_allocator.hpp index 2c0575e..e71413c 100644 --- a/include/chainbase/chainbase_node_allocator.hpp +++ b/include/chainbase/chainbase_node_allocator.hpp @@ -14,14 +14,20 @@ namespace chainbase { public: using value_type = T; using pointer = bip::offset_ptr; - chainbase_node_allocator(segment_manager* manager) : _manager{manager} {} - chainbase_node_allocator(const chainbase_node_allocator& other) : _manager(other._manager) {} + + chainbase_node_allocator(segment_manager* manager) : _manager{manager} { + _ss_alloc = pinnable_mapped_file::get_small_size_allocator((std::byte*)manager); + } + + chainbase_node_allocator(const chainbase_node_allocator& other) : chainbase_node_allocator(&*other._manager) {} + template - chainbase_node_allocator(const chainbase_node_allocator& other) : _manager(other._manager) {} + chainbase_node_allocator(const chainbase_node_allocator& other) : chainbase_node_allocator(&*other._manager) {} + pointer allocate(std::size_t num) { if (num == 1) { if (_freelist == nullptr) { - get_some(); + get_some(allocation_batch_size); } list_item* result = &*_freelist; _freelist = _freelist->_next; @@ -29,42 +35,56 @@ namespace chainbase { --_freelist_size; return pointer{(T*)result}; } else { - return pointer{(T*)_manager->allocate(num*sizeof(T))}; + return pointer{(T*)&*_ss_alloc->allocate(num*sizeof(T))}; } } + void deallocate(const pointer& p, std::size_t num) { if (num == 1) { _freelist = new (&*p) list_item{_freelist}; ++_freelist_size; } else { - _manager->deallocate(&*p); + _ss_alloc->deallocate(ss_allocator_t::pointer((char*)&*p), num*sizeof(T)); } } + + void preallocate(std::size_t num) { + if (num > allocation_batch_size) + get_some(((num - _freelist_size) + 7) & ~7); + } + bool operator==(const chainbase_node_allocator& other) const { return this == &other; } bool operator!=(const chainbase_node_allocator& other) const { return this != &other; } segment_manager* get_segment_manager() const { return _manager.get(); } size_t freelist_memory_usage() const { return _freelist_size * sizeof(T); } + private: template friend class chainbase_node_allocator; - void get_some() { + + void get_some(size_t allocation_batch_size) { static_assert(sizeof(T) >= sizeof(list_item), "Too small for free list"); static_assert(sizeof(T) % alignof(list_item) == 0, "Bad alignment for free list"); - const unsigned allocation_batch_size = 64; + char* result = (char*)_manager->allocate(sizeof(T) * allocation_batch_size); _freelist_size += allocation_batch_size; + auto old_freelist = _freelist; _freelist = bip::offset_ptr{(list_item*)result}; for(unsigned i = 0; i < allocation_batch_size-1; ++i) { char* next = result + sizeof(T); new(result) list_item{bip::offset_ptr{(list_item*)next}}; result = next; } - new(result) list_item{nullptr}; + new(result) list_item{old_freelist}; } + struct list_item { bip::offset_ptr _next; }; + + static constexpr size_t allocation_batch_size = 512; + bip::offset_ptr _ss_alloc; bip::offset_ptr _manager; - bip::offset_ptr _freelist{}; - size_t _freelist_size = 0; + bip::offset_ptr _freelist{}; + size_t _freelist_size = 0; }; } // namepsace chainbase From 81e3b7da5b5d0c63b78b55240b70fc9dbc61bf50 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 10 Dec 2024 15:50:45 -0500 Subject: [PATCH 09/20] Update batch size in `small_size_allocator` --- include/chainbase/small_size_allocator.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/chainbase/small_size_allocator.hpp b/include/chainbase/small_size_allocator.hpp index 9f611c9..91922c6 100644 --- a/include/chainbase/small_size_allocator.hpp +++ b/include/chainbase/small_size_allocator.hpp @@ -68,7 +68,7 @@ class allocator : public allocator_base { private: struct list_item { bip::offset_ptr _next; }; - static constexpr size_t allocation_batch_size = 128; + static constexpr size_t allocation_batch_size = 512; void get_some() { static_assert(sizeof(T) >= sizeof(list_item), "Too small for free list"); From 7ec42e76d9f905b23908985305029ce68c47b38a Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 11 Dec 2024 10:17:55 -0500 Subject: [PATCH 10/20] Try preallocation for tables with many rows, and insert at end (currently disabled) --- include/chainbase/chainbase.hpp | 12 ++++++++++++ include/chainbase/undo_index.hpp | 23 +++++++++++++++++++++++ 2 files changed, 35 insertions(+) diff --git a/include/chainbase/chainbase.hpp b/include/chainbase/chainbase.hpp index 0e29d1e..bf87be4 100644 --- a/include/chainbase/chainbase.hpp +++ b/include/chainbase/chainbase.hpp @@ -509,6 +509,18 @@ namespace chainbase { return get_mutable_index().remove( obj ); } + template + void preallocate( size_t num ) + { +#if 1 + if ( _read_only_mode ) { + BOOST_THROW_EXCEPTION( std::logic_error( "attempting to preallocate in read-only mode" ) ); + } + typedef typename get_index_type::type index_type; + get_mutable_index().preallocate( num ); +#endif + } + template const ObjectType& create( Constructor&& con ) { diff --git a/include/chainbase/undo_index.hpp b/include/chainbase/undo_index.hpp index b0b672a..622d5ef 100644 --- a/include/chainbase/undo_index.hpp +++ b/include/chainbase/undo_index.hpp @@ -349,6 +349,12 @@ namespace chainbase { uint64_t ctime = 0; // _monotonic_revision at the point the undo_state was created }; + void preallocate( std::size_t num ) { +#if 1 + _allocator.preallocate(num); +#endif + } + // Exception safety: strong template const value_type& emplace( Constructor&& c ) { @@ -690,9 +696,26 @@ namespace chainbase { template bool insert_impl(value_type& p) { if constexpr (N < sizeof...(Indices)) { +#if 0 + auto& index = std::get(_indices); + using index_t = std::decay_t; + std::pair pair; + constexpr bool has_insert_hint = requires(index_t& index, value_type& p) { + index.insert_unique(index.end(), p); + }; + if constexpr (false && N == 1 && has_insert_hint) { + pair.first = index.insert_unique(index.end(), p); + pair.second = true; + } else { + pair = index.insert_unique(p); + } + if(!pair.second) return false; + auto guard = scope_exit{[&index,iter=pair.first]{ index.erase(iter); }}; +#else auto [iter, inserted] = std::get(_indices).insert_unique(p); if(!inserted) return false; auto guard = scope_exit{[this,iter=iter]{ std::get(_indices).erase(iter); }}; +#endif if(insert_impl(p)) { guard.cancel(); return true; From 0117552265663ad0318fd615a3da48c4abceeaa5 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 11 Dec 2024 11:35:58 -0500 Subject: [PATCH 11/20] Remove `#if` directives and slightly restrict preallocate --- include/chainbase/chainbase.hpp | 2 -- include/chainbase/chainbase_node_allocator.hpp | 2 +- include/chainbase/undo_index.hpp | 2 -- 3 files changed, 1 insertion(+), 5 deletions(-) diff --git a/include/chainbase/chainbase.hpp b/include/chainbase/chainbase.hpp index bf87be4..315987e 100644 --- a/include/chainbase/chainbase.hpp +++ b/include/chainbase/chainbase.hpp @@ -512,13 +512,11 @@ namespace chainbase { template void preallocate( size_t num ) { -#if 1 if ( _read_only_mode ) { BOOST_THROW_EXCEPTION( std::logic_error( "attempting to preallocate in read-only mode" ) ); } typedef typename get_index_type::type index_type; get_mutable_index().preallocate( num ); -#endif } template diff --git a/include/chainbase/chainbase_node_allocator.hpp b/include/chainbase/chainbase_node_allocator.hpp index e71413c..3c47826 100644 --- a/include/chainbase/chainbase_node_allocator.hpp +++ b/include/chainbase/chainbase_node_allocator.hpp @@ -49,7 +49,7 @@ namespace chainbase { } void preallocate(std::size_t num) { - if (num > allocation_batch_size) + if (num >= 2 * allocation_batch_size) get_some(((num - _freelist_size) + 7) & ~7); } diff --git a/include/chainbase/undo_index.hpp b/include/chainbase/undo_index.hpp index 622d5ef..3b0fc61 100644 --- a/include/chainbase/undo_index.hpp +++ b/include/chainbase/undo_index.hpp @@ -350,9 +350,7 @@ namespace chainbase { }; void preallocate( std::size_t num ) { -#if 1 _allocator.preallocate(num); -#endif } // Exception safety: strong From a80da4d2e4b708b5dd0cad1dea15594527472a83 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Wed, 11 Dec 2024 11:44:44 -0500 Subject: [PATCH 12/20] Remove commented out block in `undo_index.hpp` --- include/chainbase/undo_index.hpp | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/include/chainbase/undo_index.hpp b/include/chainbase/undo_index.hpp index 3b0fc61..81c7f78 100644 --- a/include/chainbase/undo_index.hpp +++ b/include/chainbase/undo_index.hpp @@ -694,26 +694,9 @@ namespace chainbase { template bool insert_impl(value_type& p) { if constexpr (N < sizeof...(Indices)) { -#if 0 - auto& index = std::get(_indices); - using index_t = std::decay_t; - std::pair pair; - constexpr bool has_insert_hint = requires(index_t& index, value_type& p) { - index.insert_unique(index.end(), p); - }; - if constexpr (false && N == 1 && has_insert_hint) { - pair.first = index.insert_unique(index.end(), p); - pair.second = true; - } else { - pair = index.insert_unique(p); - } - if(!pair.second) return false; - auto guard = scope_exit{[&index,iter=pair.first]{ index.erase(iter); }}; -#else auto [iter, inserted] = std::get(_indices).insert_unique(p); if(!inserted) return false; auto guard = scope_exit{[this,iter=iter]{ std::get(_indices).erase(iter); }}; -#endif if(insert_impl(p)) { guard.cancel(); return true; From af9af72c5c059530dd940afa1b90ac3f73605ebd Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 12 Dec 2024 16:02:24 -0500 Subject: [PATCH 13/20] Fix sanitizer issue. --- src/pinnable_mapped_file.cpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pinnable_mapped_file.cpp b/src/pinnable_mapped_file.cpp index b61b9e5..98f4a3e 100644 --- a/src/pinnable_mapped_file.cpp +++ b/src/pinnable_mapped_file.cpp @@ -263,7 +263,7 @@ pinnable_mapped_file::pinnable_mapped_file(const std::filesystem::path& dir, boo ss_allocator_t* pinnable_mapped_file::get_small_size_allocator(std::byte* seg_mgr) { db_header* header = reinterpret_cast(seg_mgr - header_size); - return (ss_allocator_t*)&*header->small_size_allocator; + return header->small_size_allocator ? (ss_allocator_t*)&*header->small_size_allocator : nullptr; } void pinnable_mapped_file::setup_copy_on_write_mapping() { From 610665842f3ea7159413c6f3040831ba6a6bd7d2 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Thu, 12 Dec 2024 18:26:37 -0500 Subject: [PATCH 14/20] Fix test issue on macos. --- include/chainbase/small_size_allocator.hpp | 22 +++++++++++----------- src/pinnable_mapped_file.cpp | 4 ++-- test/test.cpp | 8 ++++++-- 3 files changed, 19 insertions(+), 15 deletions(-) diff --git a/include/chainbase/small_size_allocator.hpp b/include/chainbase/small_size_allocator.hpp index 91922c6..b79d9c7 100644 --- a/include/chainbase/small_size_allocator.hpp +++ b/include/chainbase/small_size_allocator.hpp @@ -22,10 +22,10 @@ class allocator_base { public: using pointer = backing_allocator::pointer; - virtual pointer allocate() = 0; - virtual void deallocate(const pointer& p) = 0; - virtual size_t freelist_memory_usage() = 0; - virtual size_t memory_overhead() = 0; + virtual pointer allocate() = 0; + virtual void deallocate(const pointer& p) = 0; + virtual size_t freelist_memory_usage() = 0; + virtual size_t num_blocks_allocated() = 0; }; template @@ -61,9 +61,9 @@ class allocator : public allocator_base { return _freelist_size * sizeof(T); } - size_t memory_overhead() final { + size_t num_blocks_allocated() final { std::lock_guard g(_m); - return _memory_overhead; + return _num_blocks_allocated; } private: @@ -76,7 +76,7 @@ class allocator : public allocator_base { char* result = (char*)&*_back_alloc.allocate(sizeof(T) * allocation_batch_size); _freelist_size += allocation_batch_size; - _memory_overhead += 16; + ++_num_blocks_allocated; _freelist = bip::offset_ptr{(list_item*)result}; for (unsigned i = 0; i < allocation_batch_size - 1; ++i) { char* next = result + sizeof(T); @@ -88,8 +88,8 @@ class allocator : public allocator_base { backing_allocator _back_alloc; bip::offset_ptr _freelist; - size_t _freelist_size = 0; - size_t _memory_overhead = 0; // overhead from boost segment allocator (16 bytes per block) + size_t _freelist_size = 0; + size_t _num_blocks_allocated = 0; // number of blocks allocated from boost segment allocator std::mutex _m; }; @@ -155,10 +155,10 @@ class small_size_allocator { return sz; } - size_t memory_overhead() const { + size_t num_blocks_allocated() const { size_t sz = 0; for (auto& alloc : _allocators) - sz += alloc->memory_overhead(); + sz += alloc->num_blocks_allocated(); return sz; } diff --git a/src/pinnable_mapped_file.cpp b/src/pinnable_mapped_file.cpp index 98f4a3e..c8bbf17 100644 --- a/src/pinnable_mapped_file.cpp +++ b/src/pinnable_mapped_file.cpp @@ -330,8 +330,8 @@ void pinnable_mapped_file::setup_non_file_mapping() { _non_file_mapped_mapping_size = (_non_file_mapped_mapping_size + (r-1u))/r*r; }; - const unsigned _1gb = 1u<<30u; - const unsigned _2mb = 1u<<21u; + [[maybe_unused]] const unsigned _1gb = 1u<<30u; + [[maybe_unused]] const unsigned _2mb = 1u<<21u; #if defined(MAP_HUGETLB) && defined(MAP_HUGE_1GB) _non_file_mapped_mapping = mmap(NULL, _non_file_mapped_mapping_size, PROT_READ|PROT_WRITE, common_map_opts|MAP_HUGETLB|MAP_HUGE_1GB, -1, 0); diff --git a/test/test.cpp b/test/test.cpp index d53fb9f..fdff6ae 100644 --- a/test/test.cpp +++ b/test/test.cpp @@ -495,8 +495,12 @@ BOOST_AUTO_TEST_CASE(shared_vector_apis_segment_alloc) { // make sure we didn't leak memory // ------------------------------- auto ss_alloc = pinnable_mapped_file::get_small_size_allocator((std::byte*)pmf.get_segment_manager()); - BOOST_REQUIRE_EQUAL(free_memory, pmf.get_segment_manager()->get_free_memory() + ss_alloc->freelist_memory_usage() + - ss_alloc->memory_overhead()); + auto num_blocks_allocated = ss_alloc->num_blocks_allocated(); + auto lost = free_memory - (seg_mgr->get_free_memory() + ss_alloc->freelist_memory_usage()); + std::cerr << "free_memory=" << free_memory << ", new_free_memory=" << (seg_mgr->get_free_memory() + ss_alloc->freelist_memory_usage()) << ", num_blocks_allocated=" << num_blocks_allocated << '\n'; + + // for every block allocated from the shared memory segment, we have an overhead of 8 or 16 bytes + BOOST_REQUIRE(lost == num_blocks_allocated * 8 || lost == num_blocks_allocated * 16); } // ----------------------------------------------------------------------------- From 5d86965724e5ba29e6ecc4f019a0f25bc2c475bf Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 13 Dec 2024 14:08:01 -0500 Subject: [PATCH 15/20] Minor changes, add comments. --- include/chainbase/pinnable_mapped_file.hpp | 29 ++++++++++++++++------ include/chainbase/small_size_allocator.hpp | 26 +++++++++++++++---- src/pinnable_mapped_file.cpp | 1 + test/test.cpp | 1 - 4 files changed, 44 insertions(+), 13 deletions(-) diff --git a/include/chainbase/pinnable_mapped_file.hpp b/include/chainbase/pinnable_mapped_file.hpp index 4b99ef0..0900052 100644 --- a/include/chainbase/pinnable_mapped_file.hpp +++ b/include/chainbase/pinnable_mapped_file.hpp @@ -52,14 +52,18 @@ using byte_segment_allocator_t = segment_allocator_t; using ss_allocator_t = small_size_allocator; +// An allocator for objects of type T within the segment_manager +// ------------------------------------------------------------- +// - If the allocation size (num_objects * sizeof(T)) is less than 512 bytes, it will be routed +// through the small size allocator which allocates in batch from the `segment_manager`. +// - If the allocation size (num_objects * sizeof(T)) is greater than 512 bytes, the allocator +// will allocate directly from the segment manager. +// - the 512 bytes limit is derived from the template parameters of `small_size_allocator` +// (size_t num_allocators = 64, size_t size_increment = 8) +// - emulates the API of `bip::allocator` +// --------------------------------------------------------------------------------------------- template -using allocator = object_allocator; - -template -auto make_small_size_allocator(segment_manager* seg_mgr) { - byte_segment_allocator_t byte_allocator(seg_mgr); - return new (seg_mgr->allocate(sizeof(ss_allocator_t))) ss_allocator_t(byte_allocator); -} +using allocator = object_allocator; class pinnable_mapped_file { public: @@ -139,6 +143,17 @@ class pinnable_mapped_file { constexpr static size_t _db_size_copy_increment = 1024*1024*1024; //1GB }; +// There can be at most one `small_size_allocator` per `segment_manager` (hence the `assert` below). +// There is none created if the pinnable_mapped_file is read-only. +// ---------------------------------------------------------------------------------------------------- +template +auto make_small_size_allocator(segment_manager* seg_mgr) { + assert(pinnable_mapped_file::get_small_size_allocator((std::byte*)seg_mgr) == nullptr); + byte_segment_allocator_t byte_allocator(seg_mgr); + return new (seg_mgr->allocate(sizeof(ss_allocator_t))) ss_allocator_t(byte_allocator); +} + +// Create an allocator for a specific object type. // pointer can be to the segment manager, or any object contained within. // --------------------------------------------------------------------- template diff --git a/include/chainbase/small_size_allocator.hpp b/include/chainbase/small_size_allocator.hpp index b79d9c7..6021a05 100644 --- a/include/chainbase/small_size_allocator.hpp +++ b/include/chainbase/small_size_allocator.hpp @@ -28,6 +28,16 @@ class allocator_base { virtual size_t num_blocks_allocated() = 0; }; +// --------------------------------------------------------------------------------------- +// One of the allocators from `small_size_allocator` below +// ------------------------------------------------------- +// +// - allocates buffers of `sz` bytes. +// - allocates in batch from `backing_allocator` (see `allocation_batch_size`) +// - freed buffers are linked into a free list for fast further allocations +// - allocated buffers are never returned to the `backing_allocator` +// - thread-safe +// --------------------------------------------------------------------------------------- template class allocator : public allocator_base { public: @@ -100,8 +110,10 @@ class allocator : public allocator_base { // An array of 64 allocators for sizes from 8 to 512 bytes // ------------------------------------------------------- // -// All pointers used are of type `backing_allocator::pointer` -// allocate/deallocate specify size in bytes. +// - All pointers used are of type `backing_allocator::pointer` +// - allocate/deallocate specify size in bytes. +// - Any requested size greater than `num_allocators * size_increment` will be routed +// to the backing_allocator // --------------------------------------------------------------------------------------- template requires ((size_increment & (size_increment - 1)) == 0) // power of two @@ -164,14 +176,18 @@ class small_size_allocator { }; - // --------------------------------------------------------------------------------------- // Object allocator // ---------------- // // emulates the API of `bip::allocator` -// -// backing_allocator is `the small_size_allocator` +// backing_allocator is normally the `small_size_allocator`, in which case: +// - If the allocation size (num_objects * sizeof(T)) is less than 512 bytes, it will be routed +// through the small size allocator which allocates in batch from the `segment_manager`. +// - If the allocation size (num_objects * sizeof(T)) is greater than 512 bytes, the allocator +// will allocate directly from the segment manager. +// - the 512 bytes limit is derived from the template parameters of `small_size_allocator` +// (size_t num_allocators = 64, size_t size_increment = 8) // --------------------------------------------------------------------------------------- template class object_allocator { diff --git a/src/pinnable_mapped_file.cpp b/src/pinnable_mapped_file.cpp index c8bbf17..30da7ee 100644 --- a/src/pinnable_mapped_file.cpp +++ b/src/pinnable_mapped_file.cpp @@ -254,6 +254,7 @@ pinnable_mapped_file::pinnable_mapped_file(const std::filesystem::path& dir, boo ss_allocator_t* ss_alloc = get_small_size_allocator(start); // relies on `_segment_manager` being initialized if (!ss_alloc && _writable) { + // create the unique `small_size_allocator` for this `segment_manager` db_header* header = reinterpret_cast(start - header_size); header->small_size_allocator = (char *)make_small_size_allocator(_segment_manager); } diff --git a/test/test.cpp b/test/test.cpp index fdff6ae..e7280a3 100644 --- a/test/test.cpp +++ b/test/test.cpp @@ -497,7 +497,6 @@ BOOST_AUTO_TEST_CASE(shared_vector_apis_segment_alloc) { auto ss_alloc = pinnable_mapped_file::get_small_size_allocator((std::byte*)pmf.get_segment_manager()); auto num_blocks_allocated = ss_alloc->num_blocks_allocated(); auto lost = free_memory - (seg_mgr->get_free_memory() + ss_alloc->freelist_memory_usage()); - std::cerr << "free_memory=" << free_memory << ", new_free_memory=" << (seg_mgr->get_free_memory() + ss_alloc->freelist_memory_usage()) << ", num_blocks_allocated=" << num_blocks_allocated << '\n'; // for every block allocated from the shared memory segment, we have an overhead of 8 or 16 bytes BOOST_REQUIRE(lost == num_blocks_allocated * 8 || lost == num_blocks_allocated * 16); From d127509f0b6cb7ee4247f22fd2bc4bfb6013009d Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Fri, 13 Dec 2024 18:36:26 -0500 Subject: [PATCH 16/20] Cannot have vtables in shared memory --- include/chainbase/small_size_allocator.hpp | 60 +++++++++------------- 1 file changed, 23 insertions(+), 37 deletions(-) diff --git a/include/chainbase/small_size_allocator.hpp b/include/chainbase/small_size_allocator.hpp index 6021a05..2aeacff 100644 --- a/include/chainbase/small_size_allocator.hpp +++ b/include/chainbase/small_size_allocator.hpp @@ -17,17 +17,6 @@ namespace chainbase { namespace detail { -template -class allocator_base { -public: - using pointer = backing_allocator::pointer; - - virtual pointer allocate() = 0; - virtual void deallocate(const pointer& p) = 0; - virtual size_t freelist_memory_usage() = 0; - virtual size_t num_blocks_allocated() = 0; -}; - // --------------------------------------------------------------------------------------- // One of the allocators from `small_size_allocator` below // ------------------------------------------------------- @@ -38,17 +27,16 @@ class allocator_base { // - allocated buffers are never returned to the `backing_allocator` // - thread-safe // --------------------------------------------------------------------------------------- -template -class allocator : public allocator_base { +template +class allocator { public: - using T = std::array; using pointer = backing_allocator::pointer; - allocator(backing_allocator back_alloc) : - _back_alloc(back_alloc) { - } + allocator(backing_allocator back_alloc, std::size_t sz) + : _back_alloc(back_alloc) + , _sz(sz) {} - pointer allocate() final { + pointer allocate() { std::lock_guard g(_m); if (_freelist == nullptr) { get_some(); @@ -60,18 +48,18 @@ class allocator : public allocator_base { return pointer{(typename backing_allocator::value_type*)result}; } - void deallocate(const pointer& p) final { + void deallocate(const pointer& p) { std::lock_guard g(_m); _freelist = new (&*p) list_item{_freelist}; ++_freelist_size; } - size_t freelist_memory_usage() final { + size_t freelist_memory_usage() const { std::lock_guard g(_m); - return _freelist_size * sizeof(T); + return _freelist_size * _sz; } - size_t num_blocks_allocated() final { + size_t num_blocks_allocated() const { std::lock_guard g(_m); return _num_blocks_allocated; } @@ -81,15 +69,15 @@ class allocator : public allocator_base { static constexpr size_t allocation_batch_size = 512; void get_some() { - static_assert(sizeof(T) >= sizeof(list_item), "Too small for free list"); - static_assert(sizeof(T) % alignof(list_item) == 0, "Bad alignment for free list"); + assert(_sz >= sizeof(list_item)); + assert(_sz % alignof(list_item) == 0); - char* result = (char*)&*_back_alloc.allocate(sizeof(T) * allocation_batch_size); + char* result = (char*)&*_back_alloc.allocate(_sz * allocation_batch_size); _freelist_size += allocation_batch_size; ++_num_blocks_allocated; _freelist = bip::offset_ptr{(list_item*)result}; for (unsigned i = 0; i < allocation_batch_size - 1; ++i) { - char* next = result + sizeof(T); + char* next = result + _sz; new (result) list_item{bip::offset_ptr{(list_item*)next}}; result = next; } @@ -97,10 +85,11 @@ class allocator : public allocator_base { } backing_allocator _back_alloc; + std::size_t _sz; bip::offset_ptr _freelist; size_t _freelist_size = 0; size_t _num_blocks_allocated = 0; // number of blocks allocated from boost segment allocator - std::mutex _m; + mutable std::mutex _m; }; } // namespace detail @@ -119,13 +108,12 @@ template >; + using alloc_array_t = std::array; private: - using base_alloc_ptr = bip::offset_ptr>; - - backing_allocator _back_alloc; - std::array _allocators; + backing_allocator _back_alloc; + alloc_array_t _allocators; static constexpr size_t max_size = num_allocators * size_increment; @@ -136,9 +124,8 @@ class small_size_allocator { template auto make_allocators(backing_allocator back_alloc, std::index_sequence) { - return std::array{ - new (&*_back_alloc.allocate(sizeof(detail::allocator))) - detail::allocator(back_alloc)...}; + return alloc_array_t{new (&*_back_alloc.allocate(sizeof(detail::allocator))) + detail::allocator(back_alloc, (I + 1) * size_increment)...}; } public: @@ -173,7 +160,6 @@ class small_size_allocator { sz += alloc->num_blocks_allocated(); return sz; } - }; // --------------------------------------------------------------------------------------- From 54950d355492a7f22967037164174a7a56a243e5 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Sun, 15 Dec 2024 20:50:59 -0500 Subject: [PATCH 17/20] Avoid linking into free list on block alloc Also use increasing batch size --- .../chainbase/chainbase_node_allocator.hpp | 39 ++++++++++-------- include/chainbase/small_size_allocator.hpp | 40 ++++++++++--------- 2 files changed, 44 insertions(+), 35 deletions(-) diff --git a/include/chainbase/chainbase_node_allocator.hpp b/include/chainbase/chainbase_node_allocator.hpp index 3c47826..479f9ed 100644 --- a/include/chainbase/chainbase_node_allocator.hpp +++ b/include/chainbase/chainbase_node_allocator.hpp @@ -26,9 +26,15 @@ namespace chainbase { pointer allocate(std::size_t num) { if (num == 1) { - if (_freelist == nullptr) { - get_some(allocation_batch_size); + if (_block_start == _block_end && _freelist == nullptr) { + get_some(_allocation_batch_size); } + if (_block_start < _block_end) { + pointer result = pointer{static_cast(static_cast(_block_start.get()))}; + _block_start += sizeof(T); + return result; + } + assert(_freelist != nullptr); list_item* result = &*_freelist; _freelist = _freelist->_next; result->~list_item(); @@ -49,41 +55,40 @@ namespace chainbase { } void preallocate(std::size_t num) { - if (num >= 2 * allocation_batch_size) + if (num >= 2 * _allocation_batch_size) get_some(((num - _freelist_size) + 7) & ~7); } bool operator==(const chainbase_node_allocator& other) const { return this == &other; } bool operator!=(const chainbase_node_allocator& other) const { return this != &other; } segment_manager* get_segment_manager() const { return _manager.get(); } - size_t freelist_memory_usage() const { return _freelist_size * sizeof(T); } + size_t freelist_memory_usage() const { return _freelist_size * sizeof(T) + (_block_end - _block_start); } private: template friend class chainbase_node_allocator; - void get_some(size_t allocation_batch_size) { + void get_some(size_t num_to_alloc) { static_assert(sizeof(T) >= sizeof(list_item), "Too small for free list"); static_assert(sizeof(T) % alignof(list_item) == 0, "Bad alignment for free list"); - char* result = (char*)_manager->allocate(sizeof(T) * allocation_batch_size); - _freelist_size += allocation_batch_size; - auto old_freelist = _freelist; - _freelist = bip::offset_ptr{(list_item*)result}; - for(unsigned i = 0; i < allocation_batch_size-1; ++i) { - char* next = result + sizeof(T); - new(result) list_item{bip::offset_ptr{(list_item*)next}}; - result = next; - } - new(result) list_item{old_freelist}; + _block_start = static_cast(_manager->allocate(sizeof(T) * num_to_alloc)); + _block_end = _block_start + sizeof(T) * num_to_alloc; + + if (_allocation_batch_size < max_allocation_batch_size) + _allocation_batch_size *= 2; } struct list_item { bip::offset_ptr _next; }; - static constexpr size_t allocation_batch_size = 512; + static constexpr size_t max_allocation_batch_size = 512; + + bip::offset_ptr _block_start; + bip::offset_ptr _block_end; + bip::offset_ptr _freelist{}; bip::offset_ptr _ss_alloc; bip::offset_ptr _manager; - bip::offset_ptr _freelist{}; + size_t _allocation_batch_size = 4; size_t _freelist_size = 0; }; diff --git a/include/chainbase/small_size_allocator.hpp b/include/chainbase/small_size_allocator.hpp index 2aeacff..ad1fb37 100644 --- a/include/chainbase/small_size_allocator.hpp +++ b/include/chainbase/small_size_allocator.hpp @@ -33,14 +33,20 @@ class allocator { using pointer = backing_allocator::pointer; allocator(backing_allocator back_alloc, std::size_t sz) - : _back_alloc(back_alloc) - , _sz(sz) {} + : _sz(sz) + , _back_alloc(back_alloc) {} pointer allocate() { std::lock_guard g(_m); - if (_freelist == nullptr) { + if (_block_start == _block_end && _freelist == nullptr) { get_some(); } + if (_block_start < _block_end) { + pointer result = pointer{_block_start.get()}; + _block_start += _sz; + return result; + } + assert(_freelist != nullptr); list_item* result = &*_freelist; _freelist = _freelist->_next; result->~list_item(); @@ -56,7 +62,7 @@ class allocator { size_t freelist_memory_usage() const { std::lock_guard g(_m); - return _freelist_size * _sz; + return _freelist_size * _sz + (_block_end - _block_start); } size_t num_blocks_allocated() const { @@ -66,29 +72,27 @@ class allocator { private: struct list_item { bip::offset_ptr _next; }; - static constexpr size_t allocation_batch_size = 512; + static constexpr size_t max_allocation_batch_size = 512; void get_some() { assert(_sz >= sizeof(list_item)); assert(_sz % alignof(list_item) == 0); - char* result = (char*)&*_back_alloc.allocate(_sz * allocation_batch_size); - _freelist_size += allocation_batch_size; + _block_start = _back_alloc.allocate(_sz * _allocation_batch_size); + _block_end = _block_start + _sz * _allocation_batch_size; ++_num_blocks_allocated; - _freelist = bip::offset_ptr{(list_item*)result}; - for (unsigned i = 0; i < allocation_batch_size - 1; ++i) { - char* next = result + _sz; - new (result) list_item{bip::offset_ptr{(list_item*)next}}; - result = next; - } - new (result) list_item{nullptr}; + if (_allocation_batch_size < max_allocation_batch_size) + _allocation_batch_size *= 2; } - backing_allocator _back_alloc; std::size_t _sz; bip::offset_ptr _freelist; - size_t _freelist_size = 0; - size_t _num_blocks_allocated = 0; // number of blocks allocated from boost segment allocator + bip::offset_ptr _block_start; + bip::offset_ptr _block_end; + backing_allocator _back_alloc; + size_t _allocation_batch_size = 4; + size_t _freelist_size = 0; + size_t _num_blocks_allocated = 0; // number of blocks allocated from boost segment allocator mutable std::mutex _m; }; @@ -104,7 +108,7 @@ class allocator { // - Any requested size greater than `num_allocators * size_increment` will be routed // to the backing_allocator // --------------------------------------------------------------------------------------- -template +template requires ((size_increment & (size_increment - 1)) == 0) // power of two class small_size_allocator { public: From 27a561dc0218d6eef8871be6709d3d69063a70b1 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Sun, 15 Dec 2024 20:54:53 -0500 Subject: [PATCH 18/20] Avoid subtraction which may overflow. --- include/chainbase/chainbase_node_allocator.hpp | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/chainbase/chainbase_node_allocator.hpp b/include/chainbase/chainbase_node_allocator.hpp index 479f9ed..adccd7b 100644 --- a/include/chainbase/chainbase_node_allocator.hpp +++ b/include/chainbase/chainbase_node_allocator.hpp @@ -56,7 +56,7 @@ namespace chainbase { void preallocate(std::size_t num) { if (num >= 2 * _allocation_batch_size) - get_some(((num - _freelist_size) + 7) & ~7); + get_some((num + 7) & ~7); } bool operator==(const chainbase_node_allocator& other) const { return this == &other; } From a0797d2be1f4021dee2fdfed4e622ad9cc292dfe Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Mon, 16 Dec 2024 10:54:01 -0500 Subject: [PATCH 19/20] Start `_allocation_batch_size` at `32`, seems faster --- include/chainbase/chainbase_node_allocator.hpp | 2 +- include/chainbase/small_size_allocator.hpp | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/include/chainbase/chainbase_node_allocator.hpp b/include/chainbase/chainbase_node_allocator.hpp index adccd7b..f51d667 100644 --- a/include/chainbase/chainbase_node_allocator.hpp +++ b/include/chainbase/chainbase_node_allocator.hpp @@ -88,7 +88,7 @@ namespace chainbase { bip::offset_ptr _freelist{}; bip::offset_ptr _ss_alloc; bip::offset_ptr _manager; - size_t _allocation_batch_size = 4; + size_t _allocation_batch_size = 32; size_t _freelist_size = 0; }; diff --git a/include/chainbase/small_size_allocator.hpp b/include/chainbase/small_size_allocator.hpp index ad1fb37..f3677d8 100644 --- a/include/chainbase/small_size_allocator.hpp +++ b/include/chainbase/small_size_allocator.hpp @@ -90,7 +90,7 @@ class allocator { bip::offset_ptr _block_start; bip::offset_ptr _block_end; backing_allocator _back_alloc; - size_t _allocation_batch_size = 4; + size_t _allocation_batch_size = 32; size_t _freelist_size = 0; size_t _num_blocks_allocated = 0; // number of blocks allocated from boost segment allocator mutable std::mutex _m; From 91df26d07752ba96f25a829650fc367b4bb3e5c8 Mon Sep 17 00:00:00 2001 From: greg7mdp Date: Tue, 17 Dec 2024 14:17:36 -0500 Subject: [PATCH 20/20] Make constructors explicit. --- include/chainbase/small_size_allocator.hpp | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/include/chainbase/small_size_allocator.hpp b/include/chainbase/small_size_allocator.hpp index f3677d8..20c542a 100644 --- a/include/chainbase/small_size_allocator.hpp +++ b/include/chainbase/small_size_allocator.hpp @@ -133,7 +133,7 @@ class small_size_allocator { } public: - small_size_allocator(backing_allocator back_alloc) + explicit small_size_allocator(backing_allocator back_alloc) : _back_alloc(std::move(back_alloc)) , _allocators(make_allocators(back_alloc, std::make_index_sequence{})) {} @@ -186,7 +186,7 @@ class object_allocator { using pointer = char_pointer::template rebind; using value_type = T; - object_allocator(backing_allocator* back_alloc) :_back_alloc(back_alloc) { + explicit object_allocator(backing_allocator* back_alloc) :_back_alloc(back_alloc) { } pointer allocate(std::size_t num_objects) {