From dd50dd1a5c73216c7baed8182aa9df8322d26071 Mon Sep 17 00:00:00 2001 From: Vesa Karvonen Date: Thu, 5 Sep 2019 22:21:17 +0300 Subject: [PATCH] Transactional hash map `hash_map_tm` test --- internals/CMakeLists.txt | 4 +- internals/include/testing/config.hpp | 4 +- internals/include/testing/hash_map_tm.hpp | 231 ++++++++++++++++++++++ internals/include/testing/memory.hpp | 22 +++ internals/testing/contention_test.cpp | 4 +- internals/testing/hash_map_tm_test.cpp | 67 +++++++ 6 files changed, 328 insertions(+), 4 deletions(-) create mode 100644 internals/include/testing/hash_map_tm.hpp create mode 100644 internals/include/testing/memory.hpp create mode 100644 internals/testing/hash_map_tm_test.cpp diff --git a/internals/CMakeLists.txt b/internals/CMakeLists.txt index 5dfcf23..4379245 100644 --- a/internals/CMakeLists.txt +++ b/internals/CMakeLists.txt @@ -1,2 +1,4 @@ -add_conventional_library(testing PUBLIC trade_v1) +add_conventional_library(testing) +target_link_libraries(testing INTERFACE dumpster_v1 trade_v1) + add_conventional_executable_tests(PRIVATE testing trade_v1 testing_v1 std_thread) diff --git a/internals/include/testing/config.hpp b/internals/include/testing/config.hpp index cbde416..d203468 100644 --- a/internals/include/testing/config.hpp +++ b/internals/include/testing/config.hpp @@ -1,9 +1,11 @@ #pragma once +#include "dumpster_v1/config.hpp" #include "trade_v1/config.hpp" namespace testing { +namespace dumpster = dumpster_v1; namespace trade = trade_v1; -} +} // namespace testing diff --git a/internals/include/testing/hash_map_tm.hpp b/internals/include/testing/hash_map_tm.hpp new file mode 100644 index 0000000..f7f4212 --- /dev/null +++ b/internals/include/testing/hash_map_tm.hpp @@ -0,0 +1,231 @@ +#pragma once + +#include "testing/config.hpp" + +#include "trade_v1/trade.hpp" + +#include "dumpster_v1/primes.hpp" + +#include "polyfill_v1/memory.hpp" +#include +#include +#include + +namespace testing { + +/// A transactional hash map for testing purposes. +template , + class Equal = std::equal_to> +class hash_map_tm; + +class hash_map_tm_private { + template friend class hash_map_tm; + + // This hack is a workaround for not having std::shared_ptr support + // in AppleClang. + template struct array_hack { + void operator delete(void *self) { delete[] reinterpret_cast(self); } + T &at(size_t i) { return reinterpret_cast(this)[i]; } + }; +}; + +template +class hash_map_tm : hash_map_tm_private { + template using ptr_t = std::shared_ptr; + + struct node_t; + + using link = trade::atom>; + + trade::atom m_item_count; + trade::atom m_buckets_count; + trade::atom>> m_buckets; + +public: + using size_type = size_t; + + using key_type = Key; + using mapped_type = Mapped; + + hash_map_tm(); + + hash_map_tm(const hash_map_tm &) = delete; + hash_map_tm &operator=(const hash_map_tm &) = delete; + + size_t size() const; + + bool empty() const; + + void clear(); + + void swap(hash_map_tm &that); + + template > + bool add_or_set(const Key &key, + ForwardableMapped &&mapped, + Config config = trade::stack<1024>); + + std::optional try_get(const Key &key) const; + + bool remove(const Key &key); + +#ifndef NDEBUG + static std::atomic s_live_nodes; // Only for testing purposes +#endif +}; + +// ----------------------------------------------------------------------------- + +template +struct hash_map_tm::node_t { +#ifndef NDEBUG + ~node_t() { --s_live_nodes; } +#endif + template + node_t(ForwardableKey &&key, ForwardableMapped &&value) + : m_next(nullptr), m_key(std::forward(key)), + m_mapped(std::forward(value)) { +#ifndef NDEBUG + ++s_live_nodes; +#endif + } + link m_next; + const Key m_key; + trade::atom m_mapped; +}; + +// + +template +hash_map_tm::hash_map_tm() + : m_item_count(0), m_buckets_count(0), m_buckets(nullptr) {} + +template +size_t hash_map_tm::size() const { + return trade::atomically(trade::assume_readonly, + [&]() { return m_item_count.load(); }); +} + +template +bool hash_map_tm::empty() const { + return trade::atomically(trade::assume_readonly, + [&]() { return m_item_count == 0; }); +} + +template +void hash_map_tm::clear() { + trade::atomically([&]() { + m_item_count = 0; + m_buckets_count = 0; + m_buckets = nullptr; + }); +} + +template +void hash_map_tm::swap(hash_map_tm &that) { + trade::atomically([&]() { + std::swap(m_item_count.ref(), that.m_item_count.ref()); + std::swap(m_buckets_count.ref(), that.m_buckets_count.ref()); + std::swap(m_buckets.ref(), that.m_buckets.ref()); + }); +} + +template +template +bool hash_map_tm::add_or_set( + const Key &key, ForwardableMapped &&mapped, Config config) { + auto key_hash = Hash()(key); + + return trade::atomically(config, [&]() { + auto item_count = m_item_count.load(); + auto buckets_count = m_buckets_count.load(); + auto buckets = m_buckets.load(); + + if (buckets_count <= item_count) { + auto old_buckets = std::move(buckets); + auto old_buckets_count = buckets_count; + + m_buckets_count = buckets_count = + dumpster::prime_less_than_next_pow_2_or_1(old_buckets_count * 2 + 1); + m_buckets = buckets = ptr_t>( + reinterpret_cast *>(new link[buckets_count])); + + for (size_t i = 0; i < old_buckets_count; ++i) { + auto work = old_buckets->at(i).load(); + while (work) { + auto &ref_next = work->m_next.ref(); + auto &ref_bucket = + buckets->at(Hash()(work->m_key) % buckets_count).ref(); + auto next = std::move(ref_next); + ref_next = std::move(ref_bucket); + ref_bucket = std::move(work); + work = std::move(next); + } + } + } + + auto prev = &buckets->at(key_hash % buckets_count); + while (true) { + if (auto node = prev->load()) { + if (Equal()(node->m_key, key)) { + node->m_mapped = std::forward(mapped); + return false; + } else { + prev = &node->m_next; + } + } else { + prev->ref().reset( + new node_t(key, std::forward(mapped))); + m_item_count = item_count + 1; + return true; + } + } + }); +} + +template +std::optional +hash_map_tm::try_get(const Key &key) const { + auto key_hash = Hash()(key); + return trade::atomically( + trade::assume_readonly, [&]() -> std::optional { + if (auto buckets_count = m_buckets_count.load()) + for (auto node = + m_buckets.load()->at(key_hash % buckets_count).load(); + node; + node = node->m_next) + if (Equal()(node->m_key, key)) + return node->m_mapped.load(); + return std::nullopt; + }); +} + +template +bool hash_map_tm::remove(const Key &key) { + auto key_hash = Hash()(key); + return trade::atomically([&]() { + if (auto buckets_count = m_buckets_count.load()) { + auto prev = &m_buckets.load()->at(key_hash % buckets_count); + while (true) { + auto node = prev->load(); + if (!node) + break; + if (Equal()(node->m_key, key)) { + *prev = node->m_next; + return true; + } + prev = &node->m_next; + } + } + return false; + }); +} + +#ifndef NDEBUG +template +std::atomic hash_map_tm::s_live_nodes = 0; +#endif + +} // namespace testing diff --git a/internals/include/testing/memory.hpp b/internals/include/testing/memory.hpp new file mode 100644 index 0000000..ff890c1 --- /dev/null +++ b/internals/include/testing/memory.hpp @@ -0,0 +1,22 @@ +#pragma once + +#include "trade_v1/trade.hpp" + +namespace testing { + +template class unique {}; + +template class shared {}; + +} // namespace testing + +namespace trade_v1 { + +using namespace testing; + +template class atom> { + + shared load() const; +}; + +} // namespace trade_v1 diff --git a/internals/testing/contention_test.cpp b/internals/testing/contention_test.cpp index 5b2562c..b1fa267 100644 --- a/internals/testing/contention_test.cpp +++ b/internals/testing/contention_test.cpp @@ -13,11 +13,11 @@ using namespace trade_v1; auto contention_test = test([]() { const size_t n_threads = std::thread::hardware_concurrency(); - const size_t n_ops = 100000; + const size_t n_ops = 1000000; atom n_threads_started = 0, n_threads_stopped = 0; - constexpr size_t n_atoms = 7; + constexpr size_t n_atoms = 7000; std::unique_ptr[]> atoms(new atom[n_atoms]); for (size_t i = 0; i < n_atoms; ++i) diff --git a/internals/testing/hash_map_tm_test.cpp b/internals/testing/hash_map_tm_test.cpp new file mode 100644 index 0000000..918bea8 --- /dev/null +++ b/internals/testing/hash_map_tm_test.cpp @@ -0,0 +1,67 @@ +#include "testing/hash_map_tm.hpp" + +#include "testing_v1/test.hpp" + +#include "dumpster_v1/ranqd1.hpp" + +#include "polyfill_v1/memory.hpp" +#include + +using namespace testing_v1; + +using namespace testing; +using namespace trade; + +auto hash_map_test = test([]() { + const size_t n_threads = std::thread::hardware_concurrency(); + const size_t n_ops = 100000; + const uint32_t max_keys = 31; + + using hash_map_tm_type = hash_map_tm; + + hash_map_tm_type map; + + atom done(0); + + auto start = std::chrono::high_resolution_clock::now(); + + for (size_t t = 0; t < n_threads; ++t) + std::thread([&, t]() { + auto s = static_cast(t); + + for (size_t i = 0; i < n_ops; ++i) { + uint32_t key = (s = dumpster::ranqd1(s)) % max_keys; + map.add_or_set(key, t, trade::stack<8192>); + } + + atomically([&]() { done.ref() += 1; }); + }).detach(); + + atomically(assume_readonly, [&]() { + if (done != n_threads) + retry(); + }); + + std::chrono::duration elapsed = + std::chrono::high_resolution_clock::now() - start; + auto n_total = n_ops * n_threads; + fprintf(stderr, + "%f Mops in %f s = %f Mops/s\n", + n_total / 1000000.0, + elapsed.count(), + n_total / elapsed.count() / 1000000.0); + + verify(map.size() == max_keys); + + { + hash_map_tm_type other; + map.swap(other); + verify(other.size() == max_keys); + verify(map.size() == 0); + other.clear(); + } + +#ifndef NDEBUG + verify(!hash_map_tm_type::s_live_nodes); +#endif +});