From 8a1b5abee1e9d1d3bb2b90b407f515f031f3a201 Mon Sep 17 00:00:00 2001 From: Nekotekina Date: Sun, 30 Sep 2018 02:47:14 +0300 Subject: [PATCH] utils::typemap implemented Container for all types --- Utilities/typemap.h | 1226 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 1226 insertions(+) create mode 100644 Utilities/typemap.h diff --git a/Utilities/typemap.h b/Utilities/typemap.h new file mode 100644 index 0000000000..0627dc4dba --- /dev/null +++ b/Utilities/typemap.h @@ -0,0 +1,1226 @@ +#pragma once + +#include "types.h" +#include "mutex.h" +#include "Atomic.h" +#include "VirtualMemory.h" +#include + +namespace utils +{ + class typemap; + + template + class typeptr; + + class typeptr_base; + + // Special tag for typemap access: request free id + constexpr struct id_new_t{} id_new{}; + + // Special tag for typemap access: unconditionally access the only object (max_count = 1 only) + constexpr struct id_any_t{} id_any{}; + + // Special tag for typemap access: like id_any but also default-construct the object if not exists + constexpr struct id_always_t{} id_always{}; + + // Aggregate with information for more accurate object retrieval, isn't accepted internally + struct weak_typeptr + { + uint id; + uint type; + + // Stamp isn't automatically stored and checked anywhere + ullong stamp; + }; + + // Detect shared type: id_share tag type can specify any type + template + struct typeinfo_share + { + static constexpr bool is_shared = false; + }; + + template + struct typeinfo_share::id_share>> + { + using share = std::decay_t::id_share>; + + static constexpr bool is_shared = true; + }; + + // Detect id transformation trait (multiplier) + template + struct typeinfo_step + { + static constexpr uint step = 1; + }; + + template + struct typeinfo_step::id_step)>> + { + static constexpr uint step = uint{std::decay_t::id_step}; + }; + + // Detect id transformation trait (addend) + template + struct typeinfo_bias + { + static constexpr uint bias = 0; + }; + + // template + // struct typeinfo_bias::id_bias)>> + // { + // static constexpr uint bias = uint{std::decay_t::id_bias}; + // }; + + template + struct typeinfo_bias::id_base)>> + { + static constexpr uint bias = uint{std::decay_t::id_base}; + }; + + // Detect max number of objects, default = 1 + template + struct typeinfo_count + { + static constexpr uint max_count = 1; + }; + + template + struct typeinfo_count::id_count)>> + { + static constexpr uint get_max() + { + // Use count of the "shared" tag type, it should be a public base of T in this case + if constexpr (typeinfo_share::is_shared) + { + using shared = typename typeinfo_share::share; + + if constexpr (!std::is_same_v, shared>) + { + return typeinfo_count::max_count; + } + } + + return uint{std::decay_t::id_count}; + } + + static constexpr uint max_count = get_max(); + + static_assert(ullong{max_count} * typeinfo_step::step <= 0x1'0000'0000ull); + }; + + // Detect polymorphic type enablement + template + struct typeinfo_poly + { + static constexpr bool is_poly = false; + }; + + template + struct typeinfo_poly::id_poly)>> + { + static constexpr bool is_poly = true; + + static_assert(std::has_virtual_destructor_v>); + }; + + // Type information + struct typeinfo_base + { + uint type = 0; + uint size = 0; + uint align = 0; + uint count = 0; + void(*clean)(class typemap_block*) = 0; + const typeinfo_base* base = 0; + + constexpr typeinfo_base() noexcept = default; + + protected: + // Next typeinfo in linked list + typeinfo_base* next = 0; + + template + friend struct typeinfo; + + friend class typecounter; + + friend class typemap_block; + friend class typemap; + }; + + template + inline typeinfo_base g_sh{}; + + // Class for automatic type registration + class typecounter + { + // Linked list built at global initialization time + typeinfo_base* first = &g_sh; + typeinfo_base* next = first; + typeinfo_base* last = first; + + template + friend struct typeinfo; + + friend class typemap_block; + friend class typemap; + + public: + constexpr typecounter() noexcept = default; + + // Get next type id, or total type count + operator uint() const + { + return last->type + 1; + } + }; + + // Global typecounter instance + inline typecounter g_typecounter{}; + + template + struct typeinfo : typeinfo_base + { + static void call_destructor(class typemap_block* ptr); + + typeinfo(); + + template + friend struct typepoly; + }; + + // Type information for each used type + template + inline const typeinfo g_typeinfo{}; + + template + struct typepoly + { + uint type = 0; + + typepoly(); + }; + + // Polymorphic type helper + template + inline const typepoly g_typepoly{}; + + template + typeinfo::typeinfo() + { + static_assert(alignof(T) < 4096); + + this->type = g_typecounter; + this->size = uint{sizeof(T)}; + this->align = uint{alignof(T)}; + this->count = typeinfo_count::max_count; + this->clean = &call_destructor; + + if (this != &g_typeinfo) + { + // Protect global state against unrelated constructions of typeinfo<> objects + this->type = g_typeinfo.type; + } + else + { + // Update linked list + g_typecounter.next->next = this; + g_typecounter.next = this; + g_typecounter.last = this; + } + + if constexpr (typeinfo_share::is_shared) + { + // Store additional information for shared types + using shared = typename typeinfo_share::share; + + // Bind + this->base = &g_sh; + + if (this != &g_typeinfo) + { + return; + } + + // Use smallest type id (void tag can reuse id 0) + if (g_sh.type == 0 && !std::is_void_v) + g_sh.type = this->type; + + // Update max size and alignment + if (g_sh.size < this->size) + g_sh.size = this->size; + if (g_sh.align < this->align) + g_sh.align = this->align; + if (g_sh.count < this->count) + g_sh.count = this->count; + } + } + + template + typepoly::typepoly() + { + static_assert(alignof(T) < 4096); + + if (this != &g_typepoly) + { + // Protect global state against unrelated constructions of typepoly<> objects + return; + } + + // Set min align 16 to make some space for a pointer + const uint size{sizeof(T) < 16 ? 16 : sizeof(T)}; + const uint align{alignof(T) < 16 ? 16 : alignof(T)}; + + typeinfo_base& info = const_cast&>(g_typeinfo); + + this->type = info.type; + + // Update max size and alignment of the base class typeinfo + if (info.size < size) + info.size = size; + if (info.align < align) + info.align = align; + + if constexpr (typeinfo_share::is_shared) + { + typeinfo_base& base = const_cast(*info.base); + + // Update max size and alignment of the shared type + if (base.size < size) + base.size = size; + if (base.align < align) + base.align = align; + } + } + + // Internal, control block for a particular object + class typemap_block + { + friend typemap; + + template + friend class typeptr; + + friend class typeptr_base; + + shared_mutex m_mutex; + atomic_t m_type; + public: + typemap_block() = default; + + // Get pointer to the object of type T, with respect to alignment + template + T* get_ptr() + { + constexpr uint offset = alignof(T) < SelfSize ? ::align(SelfSize, alignof(T)) : alignof(T); + return reinterpret_cast(reinterpret_cast(this) + offset); + } + }; + + static_assert(std::is_standard_layout_v); + static_assert(sizeof(typemap_block) == 8); + + template + void typeinfo::call_destructor(typemap_block* ptr) + { + // Choose cleanup routine + if constexpr (typeinfo_poly::is_poly) + { + // Read actual pointer to the base class + (*ptr->get_ptr())->~T(); + } + else + { + ptr->get_ptr()->~T(); + } + } + + // Internal, typemap control block for a particular type + struct alignas(64) typemap_head + { + // Pointer to the uninitialized storage + uchar* m_ptr = nullptr; + + // Free ID counter + atomic_t m_sema{0}; + + // Hint for next free ID + atomic_t m_hint{0}; + + // Max ID ever used + 1 + atomic_t m_limit{0}; + + // Increased on each constructor call + atomic_t m_total{0}; + + // Aligned size of the storage for each object + uint m_ssize = 0; + + // Total object count in the storage + uint m_count = 0; + + // Destructor caller; related to particular type, not the current storage + void(*clean)(typemap_block*) = 0; + }; + + class typeptr_base + { + typemap_head* m_head; + typemap_block* m_block; + + template + friend class typeptr; + + friend typemap; + }; + + // Pointer + lock object, possible states: + // 1) Invalid - bad id, no space, or after release() + // 2) Null - locked, but the object does not exist + // 3) OK - locked and the object exists + template + class typeptr : typeptr_base + { + using typeptr_base::m_head; + using typeptr_base::m_block; + + friend typemap; + + void unlock() + { + // Additional semaphore is not used for singletons + if constexpr (typeinfo_count::max_count > 1) + { + if (m_block->m_type == 0) + { + // Object deleted or not created: return semaphore + m_head->m_sema--; + } + } + + if constexpr (type_const()) + { + m_block->m_mutex.unlock_shared(); + } + else + { + m_block->m_mutex.unlock(); + } + } + + public: + constexpr typeptr(typeptr_base base) noexcept + : typeptr_base(base) + { + } + + typeptr(const typeptr&) = delete; + + typeptr& operator=(const typeptr&) = delete; + + ~typeptr() + { + if (m_block) + { + unlock(); + } + } + + // Verify the object exists + bool exists() const noexcept + { + return m_block->m_type != 0; + } + + // Verify the state is valid + explicit operator bool() const noexcept + { + return m_block != nullptr; + } + + // Get the pointer to the existing object + template > + auto get() const noexcept + { + ASSUME(m_block->m_type != 0); + + if constexpr (std::is_lvalue_reference_v) + { + return static_cast(*m_block->get_ptr*>()); + } + else + { + return m_block->get_ptr(); + } + } + + auto operator->() const noexcept + { + return get(); + } + + // Release the lock and set invalid state + void release() + { + if (m_block) + { + unlock(); + m_block = nullptr; + } + } + + // Call the constructor + template , typename... Args> + weak_typeptr create(Args&&... args) + { + static_assert(!type_const()); + + const uint this_id = this->get_id(); + + if constexpr (typeinfo_count::max_count > 1) + { + // Update hints only if the object is not being recreated + if (!m_block->m_type) + { + // Update max count + auto lim = m_head->m_limit.fetch_op([this_id](uint& limit) + { + if (limit <= this_id) + { + limit = this_id + 1; + return true; + } + + return false; + }); + + // Update hint (TODO) + m_head->m_hint.atomic_op([this_id, lim](uint& hint) + { + if (lim.first + 1 == typeinfo_count::max_count && hint <= this_id) + { + hint = this_id + 1; + } + else + { + hint++; + } + + if (hint == typeinfo_count::max_count) + { + hint = 0; + } + }); + } + } + + if constexpr (std::is_lvalue_reference_v) + { + using base = std::remove_reference_t; + + if (m_block->m_type.exchange(g_typepoly.type) != 0) + { + (*m_block->get_ptr())->~base(); + } + + *m_block->get_ptr() = new (m_block->get_ptr()) New(std::forward(args)...); + } + else + { + static_assert(std::is_same_v); + + // Set type; zero value shall not be observed in the case of recreation + if (m_block->m_type.exchange(type_index()) != 0) + { + // Destroy object if it exists + m_block->get_ptr()->~T(); + } + + new (m_block->get_ptr()) New(std::forward(args)...); + } + + // Return a weak pointer struct with a unique stamp number + weak_typeptr w; + w.id = this_id; + w.type = m_block->m_type; + w.stamp = ++m_head->m_total; + return w; + } + + // Call the destructor if object exists + void destroy() noexcept + { + static_assert(!type_const()); + + if (!m_block->m_type.exchange(0)) + { + return; + } + + if constexpr (std::is_lvalue_reference_v) + { + using base = std::remove_reference_t; + (*m_block->get_ptr())->~base(); + } + else + { + m_block->get_ptr()->~T(); + } + } + + // Get the ID + uint get_id() const + { + // It's not often needed so figure it out instead of storing it + const std::size_t diff = reinterpret_cast(m_block) - m_head->m_ptr; + const std::size_t quot = diff / m_head->m_ssize; + + if (diff % m_head->m_ssize || quot > typeinfo_count::max_count) + { + return -1; + } + + constexpr uint bias = typeinfo_bias::bias; + constexpr uint step = typeinfo_step::step; + return static_cast(quot) * step + bias; + } + + // Get current type + uint get_type() const + { + return m_block->m_type; + } + + static uint type_index() + { + return g_typeinfo>.type; + } + + static constexpr bool type_const() + { + return std::is_const_v>; + } + }; + + // Dynamic object collection, one or more per any type; shall not be initialized before main() + class typemap + { + // Pointer to the dynamic array + typemap_head* m_map = nullptr; + + // Pointer to the virtual memory + void* m_memory = nullptr; + + // Virtual memory size + std::size_t m_total = 0; + + public: + typemap(const typemap&) = delete; + + typemap& operator=(const typemap&) = delete; + + // Construct without initialization (suitable for global typemap) + explicit constexpr typemap(std::nullptr_t) noexcept + { + } + + // Construct with initialization + typemap() + { + init(); + } + + ~typemap() + { + delete[] m_map; + + if (m_memory) + { + utils::memory_release(m_memory, m_total); + } + } + + // Recreate, also required if constructed without initialization. + void init() + { + // Kill the ability to register more types (should segfault on attempt) + g_typecounter.next = nullptr; + + if (g_typecounter <= 1) + { + return; + } + + // Recreate and copy some type information + if (m_map == nullptr) + { + m_map = new typemap_head[g_typecounter](); + } + else + { + auto type = g_typecounter.first; + + for (uint i = 0; type; i++, type = type->next) + { + // Delete objects (there shall be no threads accessing them) + const uint lim = m_map[i].m_count != 1 ? +m_map[i].m_limit : 1; + + for (std::size_t j = 0; j < lim; j++) + { + const auto block = reinterpret_cast(m_map[i].m_ptr + j * m_map[i].m_ssize); + + if (const uint type_id = block->m_type) + { + m_map[type_id].clean(block); + } + } + + // Reset mutable fields + m_map[i].m_sema.raw() = 0; + m_map[i].m_hint.raw() = 0; + m_map[i].m_limit.raw() = 0; + m_map[i].m_total.raw() = 0; + } + } + + // Initialize virtual memory if necessary + if (m_memory == nullptr) + { + // Determine total size, copy typeinfo + auto type = g_typecounter.first; + + for (uint i = 0; type; i++, type = type->next) + { + // Use base info if provided + const auto base = type->base ? type->base : type; + + const uint align = base->align; + const uint ssize = ::align(sizeof(typemap_block), align) + ::align(base->size, align); + const auto total = std::size_t{ssize} * base->count; + const auto start = std::uintptr_t{::align(m_total, align)}; + + if (total && type->type == base->type) + { + // Move forward hoping there are no usable gaps wasted + m_total = start + total; + + // Store storage size and object count + m_map[i].m_ssize = ssize; + m_map[i].m_count = base->count; + m_map[i].m_ptr = reinterpret_cast(start); + } + + // Copy destructor for indexed access + m_map[i].clean = type->clean; + } + + // Allocate virtual memory + m_memory = utils::memory_reserve(m_total); + utils::memory_commit(m_memory, m_total); + + // Update pointers + for (uint i = 0, n = g_typecounter; i < n; i++) + { + if (m_map[i].m_count) + { + m_map[i].m_ptr = static_cast(m_memory) + reinterpret_cast(m_map[i].m_ptr); + } + } + } + else + { + // Reinitialize virtual memory at the same location + utils::memory_reset(m_memory, m_total); + } + } + + // Return allocated virtual memory block size (not aligned) + std::size_t get_memory_size() const + { + return m_total; + } + + private: + + // Prepare pointers + template + typeptr_base init_ptr(Arg&& id) + { + typemap_head* head; + typemap_block* block; + + if constexpr (typeinfo_count::max_count == 0) + { + return {}; + } + + const uint type_id = g_typeinfo>.type; + + if constexpr (typeinfo_share::is_shared) + { + head = &m_map[g_sh>.type]; + } + else + { + head = &m_map[type_id]; + } + + using id_tag = std::decay_t; + + if constexpr (std::is_same_v || std::is_same_v || std::is_same_v) + { + if constexpr (constexpr uint last = typeinfo_count::max_count - 1) + { + // If max_count > 1 only id_new is supported + static_assert(std::is_same_v && !std::is_const_v>); + + // Try to acquire the semaphore (conditional increment) + const uint old_sema = head->m_sema.load(); + + if (UNLIKELY(old_sema > last || !head->m_sema.compare_and_swap_test(old_sema, old_sema + 1))) + { + block = nullptr; + } + else + { + // Find empty location and lock it, starting from hint index + for (uint i = head->m_hint;; i = (i == last ? 0 : i + 1)) + { + block = reinterpret_cast(head->m_ptr + std::size_t{i} * head->m_ssize); + + if (block->m_type == 0 && block->m_mutex.try_lock()) + { + if (LIKELY(block->m_type == 0)) + { + break; + } + + block->m_mutex.unlock(); + } + } + } + } + else + { + // Always access first element + block = reinterpret_cast(head->m_ptr); + + if constexpr (std::is_same_v) + { + static_assert(!std::is_const_v>); + + if (block->m_type != 0 || !block->m_mutex.try_lock()) + { + block = nullptr; + } + else if (UNLIKELY(block->m_type != 0)) + { + block->m_mutex.unlock(); + block = nullptr; + } + } + else if constexpr (typeinfo_share::is_shared) + { + // id_any/id_always allows either null or matching type + if (UNLIKELY(block->m_type && block->m_type != type_id)) + { + block = nullptr; + } + } + } + } + else if constexpr (std::is_invocable_r_v) + { + // Access with a lookup function + for (std::size_t j = 0; j < (typeinfo_count::max_count != 1 ? +head->m_limit : 1); j++) + { + block = reinterpret_cast(head->m_ptr + j * head->m_ssize); + + if (block->m_type == type_id) + { + std::lock_guard lock(block->m_mutex); + + if (block->m_type == type_id) + { + if constexpr (std::is_lvalue_reference_v) + { + if (std::invoke(std::forward(id), std::as_const(**block->get_ptr*>()))) + { + break; + } + } + else if (std::invoke(std::forward(id), std::as_const(*block->get_ptr()))) + { + break; + } + } + } + + block = nullptr; + } + } + else + { + // Access by transformed id + constexpr uint bias = typeinfo_bias::bias; + constexpr uint step = typeinfo_step::step; + const uint unbiased = static_cast(std::forward(id)) - bias; + const uint unscaled = unbiased / step; + + block = reinterpret_cast(head->m_ptr + std::size_t{head->m_ssize} * unscaled); + + // Check id range and type + if (UNLIKELY(unscaled >= typeinfo_count::max_count || unbiased % step)) + { + block = nullptr; + } + else if constexpr (typeinfo_share::is_shared) + { + if (UNLIKELY(block->m_type != type_id)) + { + block = nullptr; + } + } + else + { + if (UNLIKELY(block->m_type == 0)) + { + block = nullptr; + } + } + } + + typeptr_base result; + result.m_head = head; + result.m_block = block; + return result; + } + + template + void check_ptr(typemap_block*& block, Arg&& id) + { + using id_tag = std::decay_t; + + const uint type_id = g_typeinfo>.type; + + if constexpr (std::is_same_v) + { + // No action for id_new + return; + } + else if constexpr (std::is_same_v && !typeinfo_share::is_shared) + { + // No action for unshared id_any + return; + } + else if constexpr (std::is_same_v) + { + // Possibly shared id_any + if (LIKELY(!block || block->m_type == type_id || block->m_type == 0)) + { + return; + } + } + else if constexpr (std::is_same_v) + { + if constexpr (typeinfo_share::is_shared) + { + if (!block) + { + return; + } + + if (block->m_type && block->m_type != type_id) + { + block->m_mutex.unlock(); + block = nullptr; + return; + } + } + + if (block->m_type == 0 && block->m_type.compare_and_swap_test(0, type_id)) + { + // Initialize object if necessary + static_assert(!std::is_const_v>); + + if constexpr (std::is_lvalue_reference_v) + { + using base = std::remove_reference_t; + *block->get_ptr() = new (block->get_ptr()) base(); + } + else + { + new (block->get_ptr) Type(); + } + } + + return; + } + else if constexpr (std::is_invocable_r_v) + { + if (UNLIKELY(!block)) + { + return; + } + + if (LIKELY(block->m_type == type_id)) + { + if constexpr (std::is_lvalue_reference_v) + { + if (std::invoke(std::forward(id), std::as_const(**block->get_ptr*>()))) + { + return; + } + } + else if (std::invoke(std::forward(id), std::as_const(*block->get_ptr()))) + { + return; + } + } + } + else if (block) + { + if constexpr (!typeinfo_share::is_shared) + { + if (LIKELY(block->m_type)) + { + return; + } + } + else + { + if (LIKELY(block->m_type == type_id)) + { + return; + } + } + } + else + { + return; + } + + // Fallback: unlock and invalidate + block->m_mutex.unlock(); + block = nullptr; + } + + template + bool lock_ptr(typemap_block* block) + { + // Use reader lock for const access + constexpr bool is_const = std::is_const_v>; + + // Already locked + if constexpr (!Lock) + { + return true; + } + else + { + // Skip failed ids + if (!block) + { + return true; + } + + if constexpr (Try) + { + if constexpr (is_const) + { + return block->m_mutex.try_lock_shared(); + } + else + { + return block->m_mutex.try_lock(); + } + } + else if constexpr (is_const) + { + if (LIKELY(block->m_mutex.is_lockable())) + { + return true; + } + + block->m_mutex.lock_shared(); + return false; + } + else + { + if (LIKELY(block->m_mutex.is_free())) + { + return true; + } + + block->m_mutex.lock(); + return false; + } + } + } + + template + bool try_lock(const std::array& array, uint locked, std::integer_sequence) + { + // Try to lock mutex if not locked from the previous step + if (I == locked || lock_ptr(array[I].m_block)) + { + if constexpr (I + 1 < N) + { + // Proceed recursively + if (LIKELY(try_lock(array, locked, std::integer_sequence{}))) + { + return true; + } + + // Retire: unlock everything, including (I == locked) case + if constexpr (Lock) + { + if (array[I].m_block) + { + if constexpr (std::is_const_v>) + { + array[I].m_block->m_mutex.unlock_shared(); + } + else + { + array[I].m_block->m_mutex.unlock(); + } + } + } + } + else + { + return true; + } + } + + return false; + } + + template + uint lock_array(const std::array& array, std::integer_sequence, std::integer_sequence) + { + // Verify all mutexes are free or wait for one of them and return its index + uint locked = 0; + ((lock_ptr(array[I].m_block) && ++locked) && ...); + return locked; + } + + template + void check_array(std::array& array, std::integer_sequence, Args&&... ids) + { + // Check types and unlock on mismatch + (check_ptr(array[I].m_block, std::forward(ids)), ...); + } + + template + std::tuple...> array_to_tuple(const std::array& array, std::integer_sequence) + { + return {array[I]...}; + } + + public: + // Lock any objects by their identifiers, special tags id_new/id_any/id_always, or search predicates + template > + auto lock(Args&&... ids) + { + static_assert(((!std::is_lvalue_reference_v == !typeinfo_poly::is_poly) && ...)); + static_assert(((!std::is_rvalue_reference_v) && ...)); + static_assert(((!std::is_array_v) && ...)); + static_assert(((!std::is_void_v) && ...)); + + // Initialize pointers + std::array result{this->init_ptr(std::forward(ids))...}; + + // Whether requires locking after init_ptr + using locks_t = std::integer_sequence, id_new_t>...>; + + // Array index helper + using seq_t = std::index_sequence_for; + + // Lock any number of objects in safe manner + while (true) + { + const uint locked = lock_array(result, seq_t{}, locks_t{}); + if (LIKELY(try_lock<0, Types...>(result, locked, locks_t{}))) + break; + } + + // Verify object types + check_array(result, seq_t{}, std::forward(ids)...); + + // Return tuple of possibly locked pointers, or a single pointer + if constexpr (sizeof...(Types) != 1) + { + return array_to_tuple(result, seq_t{}); + } + else + { + return typeptr(result[0]); + } + } + + // Apply a function to all objects of one or more types + template + ullong apply(F&& func) + { + static_assert(!std::is_lvalue_reference_v == !typeinfo_poly::is_poly); + static_assert(!std::is_rvalue_reference_v); + static_assert(!std::is_array_v); + static_assert(!std::is_void_v); + + const uint type_id = g_typeinfo>.type; + + typemap_head* head; + + if constexpr (typeinfo_share::is_shared) + { + head = &m_map[g_sh>.type]; + } + else + { + head = &m_map[type_id]; + } + + const ullong ix = head->m_total; + + for (std::size_t j = 0; j < (typeinfo_count::max_count != 1 ? +head->m_limit : 1); j++) + { + const auto block = reinterpret_cast(head->m_ptr + j * head->m_ssize); + + if (block->m_type == type_id) + { + std::lock_guard lock(block->m_mutex); + + if (block->m_type == type_id) + { + if constexpr (std::is_lvalue_reference_v) + { + std::invoke(std::forward(func), **block->get_ptr*>()); + } + else + { + std::invoke(std::forward(func), *block->get_ptr()); + } + } + } + } + + // Return "unsigned negative" value if the creation index has increased + const ullong result = ix - head->m_total; + + if constexpr (sizeof...(Types) > 0) + { + return (result + ... + apply(func)); + } + else + { + return result; + } + } + }; +} // namespace utils