#pragma once #include "Emu/Memory/atomic_type.h" static std::thread::id main_thread; class NamedThreadBase { std::string m_name; std::condition_variable m_signal_cv; std::mutex m_signal_mtx; public: std::atomic m_tls_assigned; NamedThreadBase(const std::string& name) : m_name(name), m_tls_assigned(false) { } NamedThreadBase() : m_tls_assigned(false) { } virtual std::string GetThreadName() const; virtual void SetThreadName(const std::string& name); void WaitForAnySignal(u64 time = 1); void Notify(); }; NamedThreadBase* GetCurrentNamedThread(); void SetCurrentNamedThread(NamedThreadBase* value); class ThreadBase : public NamedThreadBase { protected: std::atomic m_destroy; std::atomic m_alive; std::thread* m_executor; mutable std::mutex m_main_mutex; ThreadBase(const std::string& name); ~ThreadBase(); public: void Start(); void Stop(bool wait = true, bool send_destroy = true); bool Join() const; bool IsAlive() const; bool TestDestroy() const; virtual void Task() = 0; }; class thread { std::string m_name; std::thread m_thr; public: thread(const std::string& name, std::function func); thread(const std::string& name); thread(); public: void start(std::function func); void detach(); void join(); bool joinable() const; }; class slw_mutex_t { }; class slw_recursive_mutex_t { }; class slw_shared_mutex_t { }; class waiter_map_t { // TODO: optimize (use custom lightweight readers-writer lock) std::mutex m_mutex; struct waiter_t { u64 signal_id; NamedThreadBase* thread; }; std::vector m_waiters; std::string m_name; struct waiter_reg_t { NamedThreadBase* thread; const u64 signal_id; waiter_map_t& map; waiter_reg_t(waiter_map_t& map, u64 signal_id) : thread(nullptr) , signal_id(signal_id) , map(map) { } ~waiter_reg_t(); void init(); }; bool is_stopped(u64 signal_id); public: waiter_map_t(const char* name) : m_name(name) { } // wait until waiter_func() returns true, signal_id is an arbitrary number template __forceinline void wait_op(u64 signal_id, const WT waiter_func) { // register waiter waiter_reg_t waiter(*this, signal_id); // check the condition or if the emulator is stopped while (!waiter_func() && !is_stopped(signal_id)) { // initialize waiter (only once) waiter.init(); // wait for 1 ms or until signal arrived waiter.thread->WaitForAnySignal(1); } } // signal all threads waiting on waiter_op() with the same signal_id (signaling only hints those threads that corresponding conditions are *probably* met) void notify(u64 signal_id); }; bool squeue_test_exit(const volatile bool* do_exit); template class squeue_t { struct squeue_sync_var_t { struct { u32 position : 31; u32 read_lock : 1; }; struct { u32 count : 31; u32 write_lock : 1; }; }; atomic_le_t m_sync; mutable std::mutex m_rcv_mutex, m_wcv_mutex; mutable std::condition_variable m_rcv, m_wcv; T m_data[sq_size]; public: squeue_t() { m_sync.write_relaxed({}); } u32 get_max_size() const { return sq_size; } bool is_full() const volatile { return m_sync.read_relaxed().count == sq_size; } bool push(const T& data, const volatile bool* do_exit = nullptr) { u32 pos = 0; while (!m_sync.atomic_op_sync(true, [&pos](squeue_sync_var_t& sync) -> bool { assert(sync.count <= sq_size); assert(sync.position < sq_size); if (sync.write_lock || sync.count == sq_size) { return false; } sync.write_lock = 1; pos = sync.position + sync.count; return true; })) { if (squeue_test_exit(do_exit)) { return false; } std::unique_lock wcv_lock(m_wcv_mutex); m_wcv.wait_for(wcv_lock, std::chrono::milliseconds(1)); } m_data[pos >= sq_size ? pos - sq_size : pos] = data; m_sync.atomic_op([](squeue_sync_var_t& sync) { assert(sync.count <= sq_size); assert(sync.position < sq_size); assert(sync.write_lock); sync.write_lock = 0; sync.count++; }); m_rcv.notify_one(); m_wcv.notify_one(); return true; } bool try_push(const T& data) { static const volatile bool no_wait = true; return push(data, &no_wait); } bool pop(T& data, const volatile bool* do_exit = nullptr) { u32 pos = 0; while (!m_sync.atomic_op_sync(true, [&pos](squeue_sync_var_t& sync) -> bool { assert(sync.count <= sq_size); assert(sync.position < sq_size); if (sync.read_lock || !sync.count) { return false; } sync.read_lock = 1; pos = sync.position; return true; })) { if (squeue_test_exit(do_exit)) { return false; } std::unique_lock rcv_lock(m_rcv_mutex); m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1)); } data = m_data[pos]; m_sync.atomic_op([](squeue_sync_var_t& sync) { assert(sync.count <= sq_size); assert(sync.position < sq_size); assert(sync.read_lock); sync.read_lock = 0; sync.position++; sync.count--; if (sync.position == sq_size) { sync.position = 0; } }); m_rcv.notify_one(); m_wcv.notify_one(); return true; } bool try_pop(T& data) { static const volatile bool no_wait = true; return pop(data, &no_wait); } void clear() { while (!m_sync.atomic_op_sync(true, [](squeue_sync_var_t& sync) -> bool { assert(sync.count <= sq_size); assert(sync.position < sq_size); if (sync.read_lock || sync.write_lock) { return false; } sync.read_lock = 1; sync.write_lock = 1; return true; })) { std::unique_lock rcv_lock(m_rcv_mutex); m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1)); } m_sync.exchange({}); m_wcv.notify_one(); m_rcv.notify_one(); } bool peek(T& data, u32 start_pos = 0, const volatile bool* do_exit = nullptr) { assert(start_pos < sq_size); u32 pos = 0; while (!m_sync.atomic_op_sync(true, [&pos, start_pos](squeue_sync_var_t& sync) -> bool { assert(sync.count <= sq_size); assert(sync.position < sq_size); if (sync.read_lock || sync.count <= start_pos) { return false; } sync.read_lock = 1; pos = sync.position + start_pos; return true; })) { if (squeue_test_exit(do_exit)) { return false; } std::unique_lock rcv_lock(m_rcv_mutex); m_rcv.wait_for(rcv_lock, std::chrono::milliseconds(1)); } data = m_data[pos >= sq_size ? pos - sq_size : pos]; m_sync.atomic_op([](squeue_sync_var_t& sync) { assert(sync.count <= sq_size); assert(sync.position < sq_size); assert(sync.read_lock); sync.read_lock = 0; }); m_rcv.notify_one(); return true; } bool try_peek(T& data, u32 start_pos = 0) { static const volatile bool no_wait = true; return peek(data, start_pos, &no_wait); } };