From 011aabe9ed6307ffe827f0689f2fced116488755 Mon Sep 17 00:00:00 2001 From: Eladash Date: Thu, 28 Jul 2022 14:10:16 +0300 Subject: [PATCH] LV2: Make sys_mutex and sys_lwmutex lock-free, add some busy waiting in sys_mutex_lock --- rpcs3/Emu/Cell/PPUThread.h | 4 +- rpcs3/Emu/Cell/SPUThread.h | 2 +- rpcs3/Emu/Cell/lv2/lv2.cpp | 61 ++++++++++--- rpcs3/Emu/Cell/lv2/sys_cond.cpp | 68 +++++++++------ rpcs3/Emu/Cell/lv2/sys_cond.h | 2 +- rpcs3/Emu/Cell/lv2/sys_event.h | 4 +- rpcs3/Emu/Cell/lv2/sys_event_flag.cpp | 17 ++-- rpcs3/Emu/Cell/lv2/sys_event_flag.h | 2 +- rpcs3/Emu/Cell/lv2/sys_lwcond.cpp | 51 +++++------ rpcs3/Emu/Cell/lv2/sys_lwcond.h | 2 +- rpcs3/Emu/Cell/lv2/sys_lwmutex.cpp | 68 ++++++++------- rpcs3/Emu/Cell/lv2/sys_lwmutex.h | 93 ++++++++++++++++++-- rpcs3/Emu/Cell/lv2/sys_mutex.cpp | 43 +++++++--- rpcs3/Emu/Cell/lv2/sys_mutex.h | 118 +++++++++++++++++--------- rpcs3/Emu/Cell/lv2/sys_rwlock.cpp | 7 +- rpcs3/Emu/Cell/lv2/sys_rwlock.h | 4 +- rpcs3/Emu/Cell/lv2/sys_semaphore.h | 2 +- rpcs3/Emu/Cell/lv2/sys_sync.h | 64 ++++++++------ rpcs3/Emu/Cell/lv2/sys_usbd.cpp | 6 +- rpcs3/rpcs3qt/kernel_explorer.cpp | 8 +- 20 files changed, 402 insertions(+), 224 deletions(-) diff --git a/rpcs3/Emu/Cell/PPUThread.h b/rpcs3/Emu/Cell/PPUThread.h index 16e41c51e8..eea64ded55 100644 --- a/rpcs3/Emu/Cell/PPUThread.h +++ b/rpcs3/Emu/Cell/PPUThread.h @@ -323,8 +323,8 @@ public: std::shared_ptr optional_savestate_state; bool interrupt_thread_executing = false; - atomic_t next_cpu{}; // LV2 sleep queues' node link - atomic_t next_ppu{}; // LV2 PPU running queue's node link + ppu_thread* next_cpu{}; // LV2 sleep queues' node link + ppu_thread* next_ppu{}; // LV2 PPU running queue's node link bool ack_suspend = false; be_t* get_stack_arg(s32 i, u64 align = alignof(u64)); diff --git a/rpcs3/Emu/Cell/SPUThread.h b/rpcs3/Emu/Cell/SPUThread.h index a06abed34e..0f71e4b36e 100644 --- a/rpcs3/Emu/Cell/SPUThread.h +++ b/rpcs3/Emu/Cell/SPUThread.h @@ -815,7 +815,7 @@ public: const u32 option; // sys_spu_thread_initialize option const u32 lv2_id; // The actual id that is used by syscalls - atomic_t next_cpu{}; // LV2 thread queues' node link + spu_thread* next_cpu{}; // LV2 thread queues' node link // Thread name atomic_ptr spu_tname; diff --git a/rpcs3/Emu/Cell/lv2/lv2.cpp b/rpcs3/Emu/Cell/lv2/lv2.cpp index 0ee2798433..9a6ec2cee3 100644 --- a/rpcs3/Emu/Cell/lv2/lv2.cpp +++ b/rpcs3/Emu/Cell/lv2/lv2.cpp @@ -1276,7 +1276,7 @@ void lv2_obj::sleep_unlocked(cpu_thread& thread, u64 timeout, bool notify_later) { ppu_log.trace("sleep() - waiting (%zu)", g_pending); - const auto [_, ok] = ppu->state.fetch_op([&](bs_t& val) + const auto [_ ,ok] = ppu->state.fetch_op([&](bs_t& val) { if (!(val & cpu_flag::signal)) { @@ -1289,7 +1289,7 @@ void lv2_obj::sleep_unlocked(cpu_thread& thread, u64 timeout, bool notify_later) if (!ok) { - ppu_log.fatal("sleep() failed (signaled) (%s)", ppu->current_function); + ppu_log.trace("sleep() failed (signaled) (%s)", ppu->current_function); return; } @@ -1414,21 +1414,21 @@ bool lv2_obj::awake_unlocked(cpu_thread* cpu, bool notify_later, s32 prio) auto ppu2 = +*ppu2_next; // Rotate current thread to the last position of the 'same prio' threads list - ppu_next->release(ppu2); + *ppu_next = ppu2; // Exchange forward pointers if (ppu->next_ppu != ppu2) { auto ppu2_val = +ppu2->next_ppu; - ppu2->next_ppu.release(+ppu->next_ppu); - ppu->next_ppu.release(ppu2_val); - ppu2_next->release(ppu); + ppu2->next_ppu = +ppu->next_ppu; + ppu->next_ppu = ppu2_val; + *ppu2_next = ppu; } else { auto ppu2_val = +ppu2->next_ppu; - ppu2->next_ppu.release(ppu); - ppu->next_ppu.release(ppu2_val); + ppu2->next_ppu = ppu; + ppu->next_ppu = ppu2_val; } if (i <= g_cfg.core.ppu_threads + 0u) @@ -1468,8 +1468,8 @@ bool lv2_obj::awake_unlocked(cpu_thread* cpu, bool notify_later, s32 prio) // Use priority, also preserve FIFO order if (!next || next->prio > static_cast(cpu)->prio) { - it->release(static_cast(cpu)); - static_cast(cpu)->next_ppu.release(next); + atomic_storage::release(static_cast(cpu)->next_ppu, next); + atomic_storage::release(*it, static_cast(cpu)); break; } @@ -1496,12 +1496,34 @@ bool lv2_obj::awake_unlocked(cpu_thread* cpu, bool notify_later, s32 prio) if (cpu) { // Emplace current thread - changed_queue = emplace_thread(cpu); + if (!emplace_thread(cpu)) + { + if (notify_later) + { + // notify_later includes common optimizations regarding syscalls + // one of which is to allow a lock-free version of syscalls with awake behave as semaphore post: always notifies the thread, even if it hasn't slept yet + cpu->state += cpu_flag::signal; + } + } + else + { + changed_queue = true; + } } else for (const auto _cpu : g_to_awake) { // Emplace threads from list - changed_queue |= emplace_thread(_cpu); + if (!emplace_thread(_cpu)) + { + if (notify_later) + { + _cpu->state += cpu_flag::signal; + } + } + else + { + changed_queue = true; + } } // Remove pending if necessary @@ -1662,3 +1684,18 @@ bool lv2_obj::is_scheduler_ready() reader_lock lock(g_mutex); return g_to_sleep.empty(); } + +bool lv2_obj::has_ppus_in_running_state() +{ + auto target = atomic_storage::load(g_ppu); + + for (usz i = 0, thread_count = g_cfg.core.ppu_threads; target; target = atomic_storage::load(target->next_ppu), i++) + { + if (i >= thread_count) + { + return true; + } + } + + return false; +} diff --git a/rpcs3/Emu/Cell/lv2/sys_cond.cpp b/rpcs3/Emu/Cell/lv2/sys_cond.cpp index 97b13be5ac..f12bab35eb 100644 --- a/rpcs3/Emu/Cell/lv2/sys_cond.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_cond.cpp @@ -1,5 +1,4 @@ #include "stdafx.h" -#include "sys_cond.h" #include "util/serialization.hpp" #include "Emu/IdManager.h" @@ -9,6 +8,8 @@ #include "Emu/Cell/ErrorCodes.h" #include "Emu/Cell/PPUThread.h" +#include "sys_cond.h" + #include "util/asm.hpp" LOG_CHANNEL(sys_cond); @@ -112,7 +113,7 @@ error_code sys_cond_destroy(ppu_thread& ppu, u32 cond_id) { std::lock_guard lock(cond.mutex->mutex); - if (cond.sq) + if (atomic_storage::load(cond.sq)) { return CELL_EBUSY; } @@ -143,7 +144,7 @@ error_code sys_cond_signal(ppu_thread& ppu, u32 cond_id) const auto cond = idm::check(cond_id, [&](lv2_cond& cond) { - if (cond.sq) + if (atomic_storage::load(cond.sq)) { lv2_obj::notify_all_t notify; @@ -159,7 +160,7 @@ error_code sys_cond_signal(ppu_thread& ppu, u32 cond_id) // TODO: Is EBUSY returned after reqeueing, on sys_cond_destroy? - if (cond.mutex->try_own(*cpu, cpu->id)) + if (cond.mutex->try_own(*cpu)) { cond.awake(cpu, true); } @@ -183,7 +184,7 @@ error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id) const auto cond = idm::check(cond_id, [&](lv2_cond& cond) { - if (cond.sq) + if (atomic_storage::load(cond.sq)) { lv2_obj::notify_all_t notify; @@ -199,12 +200,12 @@ error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id) } cpu_thread* result = nullptr; - decltype(cond.sq) sq{+cond.sq}; - cond.sq.release(nullptr); + auto sq = cond.sq; + atomic_storage::release(cond.sq, nullptr); while (const auto cpu = cond.schedule(sq, SYS_SYNC_PRIORITY)) { - if (cond.mutex->try_own(*cpu, cpu->id)) + if (cond.mutex->try_own(*cpu)) { ensure(!std::exchange(result, cpu)); } @@ -238,7 +239,7 @@ error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id) return -1; } - if (cond.sq) + if (atomic_storage::load(cond.sq)) { lv2_obj::notify_all_t notify; @@ -256,7 +257,7 @@ error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id) ensure(cond.unqueue(cond.sq, cpu)); - if (cond.mutex->try_own(*cpu, cpu->id)) + if (cond.mutex->try_own(*cpu)) { cond.awake(cpu, true); } @@ -295,7 +296,7 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout) const auto cond = idm::get(cond_id, [&](lv2_cond& cond) -> s64 { - if (!ppu.loaded_from_savestate && cond.mutex->owner >> 1 != ppu.id) + if (!ppu.loaded_from_savestate && atomic_storage::load(cond.mutex->control.raw().owner) != ppu.id) { return -1; } @@ -307,19 +308,18 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout) const u64 syscall_state = sstate.try_read().second; sstate.clear(); - if (syscall_state & 1) - { - // Mutex sleep - ensure(!cond.mutex->try_own(ppu, ppu.id)); - } - else - { - // Register waiter - lv2_obj::emplace(cond.sq, &ppu); - } - if (ppu.loaded_from_savestate) { + if (syscall_state & 1) + { + // Mutex sleep + ensure(!cond.mutex->try_own(ppu)); + } + else + { + lv2_obj::emplace(cond.sq, &ppu); + } + cond.sleep(ppu, timeout, true); return static_cast(syscall_state >> 32); } @@ -329,9 +329,18 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout) if (const auto cpu = cond.mutex->reown()) { + if (cpu->state & cpu_flag::again) + { + ppu.state += cpu_flag::again; + return 0; + } + cond.mutex->append(cpu); } + // Register waiter + lv2_obj::emplace(cond.sq, &ppu); + // Sleep current thread and schedule mutex waiter cond.sleep(ppu, timeout, true); @@ -344,6 +353,11 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout) return CELL_ESRCH; } + if (ppu.state & cpu_flag::again) + { + return {}; + } + if (cond.ret < 0) { return CELL_EPERM; @@ -363,7 +377,7 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout) bool mutex_sleep = false; bool cond_sleep = false; - for (auto cpu = +cond->sq; cpu; cpu = cpu->next_cpu) + for (auto cpu = atomic_storage::load(cond->sq); cpu; cpu = cpu->next_cpu) { if (cpu == &ppu) { @@ -372,7 +386,7 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout) } } - for (auto cpu = +cond->mutex->sq; cpu; cpu = cpu->next_cpu) + for (auto cpu = atomic_storage::load(cond->mutex->control.raw().sq); cpu; cpu = cpu->next_cpu) { if (cpu == &ppu) { @@ -422,12 +436,12 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout) ppu.gpr[3] = CELL_ETIMEDOUT; // Own or requeue - if (cond->mutex->try_own(ppu, ppu.id)) + if (cond->mutex->try_own(ppu)) { break; } } - else if (cond->mutex->owner >> 1 == ppu.id) + else if (atomic_storage::load(cond->mutex->control.raw().owner) == ppu.id) { break; } @@ -444,7 +458,7 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout) } // Verify ownership - ensure(cond->mutex->owner >> 1 == ppu.id); + ensure(atomic_storage::load(cond->mutex->control.raw().owner) == ppu.id); // Restore the recursive value cond->mutex->lock_count.release(static_cast(cond.ret)); diff --git a/rpcs3/Emu/Cell/lv2/sys_cond.h b/rpcs3/Emu/Cell/lv2/sys_cond.h index 0b4f4123ec..54613250ed 100644 --- a/rpcs3/Emu/Cell/lv2/sys_cond.h +++ b/rpcs3/Emu/Cell/lv2/sys_cond.h @@ -27,7 +27,7 @@ struct lv2_cond final : lv2_obj const u32 mtx_id; std::shared_ptr mutex; // Associated Mutex - atomic_t sq{}; + ppu_thread* sq{}; lv2_cond(u64 key, u64 name, u32 mtx_id, std::shared_ptr mutex) : key(key) diff --git a/rpcs3/Emu/Cell/lv2/sys_event.h b/rpcs3/Emu/Cell/lv2/sys_event.h index a464508763..9f01a235d7 100644 --- a/rpcs3/Emu/Cell/lv2/sys_event.h +++ b/rpcs3/Emu/Cell/lv2/sys_event.h @@ -92,8 +92,8 @@ struct lv2_event_queue final : public lv2_obj shared_mutex mutex; std::deque events; - atomic_t sq{}; - atomic_t pq{}; + spu_thread* sq{}; + ppu_thread* pq{}; lv2_event_queue(u32 protocol, s32 type, s32 size, u64 name, u64 ipc_key) noexcept; diff --git a/rpcs3/Emu/Cell/lv2/sys_event_flag.cpp b/rpcs3/Emu/Cell/lv2/sys_event_flag.cpp index 4b9473acaa..7e14daacad 100644 --- a/rpcs3/Emu/Cell/lv2/sys_event_flag.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_event_flag.cpp @@ -349,17 +349,12 @@ error_code sys_event_flag_set(cpu_thread& cpu, u32 id, u64 bitptn) auto get_next = [&]() -> ppu_thread* { - if (flag->protocol != SYS_SYNC_PRIORITY) - { - return std::exchange(first, first ? +first->next_cpu : nullptr); - } - s32 prio = smax; ppu_thread* it{}; for (auto ppu = first; ppu; ppu = ppu->next_cpu) { - if (!ppu->gpr[7] && ppu->prio < prio) + if (!ppu->gpr[7] && (flag->protocol != SYS_SYNC_PRIORITY || ppu->prio <= prio)) { it = ppu; prio = ppu->prio; @@ -404,12 +399,12 @@ error_code sys_event_flag_set(cpu_thread& cpu, u32 id, u64 bitptn) // Remove waiters for (auto next_cpu = &flag->sq; *next_cpu;) { - auto& ppu = *+*next_cpu; + auto& ppu = **next_cpu; if (ppu.gpr[3] == CELL_OK) { - next_cpu->release(+ppu.next_cpu); - ppu.next_cpu.release(nullptr); + atomic_storage::release(*next_cpu, ppu.next_cpu); + ppu.next_cpu = nullptr; flag->append(&ppu); continue; } @@ -474,7 +469,7 @@ error_code sys_event_flag_cancel(ppu_thread& ppu, u32 id, vm::ptr num) const u64 pattern = flag->pattern; // Signal all threads to return CELL_ECANCELED (protocol does not matter) - for (auto ppu = +flag->sq; ppu; ppu = ppu->next_cpu) + while (auto ppu = flag->schedule(flag->sq, SYS_SYNC_FIFO)) { ppu->gpr[3] = CELL_ECANCELED; ppu->gpr[6] = pattern; @@ -483,8 +478,6 @@ error_code sys_event_flag_cancel(ppu_thread& ppu, u32 id, vm::ptr num) flag->append(ppu); } - flag->sq.release(nullptr); - if (value) { lv2_obj::awake_all(); diff --git a/rpcs3/Emu/Cell/lv2/sys_event_flag.h b/rpcs3/Emu/Cell/lv2/sys_event_flag.h index 090d47b03c..6af8887b99 100644 --- a/rpcs3/Emu/Cell/lv2/sys_event_flag.h +++ b/rpcs3/Emu/Cell/lv2/sys_event_flag.h @@ -42,7 +42,7 @@ struct lv2_event_flag final : lv2_obj shared_mutex mutex; atomic_t pattern; - atomic_t sq{}; + ppu_thread* sq{}; lv2_event_flag(u32 protocol, u64 key, s32 type, u64 name, u64 pattern) noexcept : protocol{static_cast(protocol)} diff --git a/rpcs3/Emu/Cell/lv2/sys_lwcond.cpp b/rpcs3/Emu/Cell/lv2/sys_lwcond.cpp index 3ae56020de..16c4996ed5 100644 --- a/rpcs3/Emu/Cell/lv2/sys_lwcond.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_lwcond.cpp @@ -1,4 +1,4 @@ -#include "stdafx.h" +#include "stdafx.h" #include "sys_lwcond.h" #include "Emu/IdManager.h" @@ -65,7 +65,7 @@ error_code _sys_lwcond_destroy(ppu_thread& ppu, u32 lwcond_id) const auto cond = idm::withdraw(lwcond_id, [&](lv2_lwcond& cond) -> CellError { - if (cond.sq) + if (atomic_storage::load(cond.sq)) { return CELL_EBUSY; } @@ -127,7 +127,7 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6 } } - if (cond.sq) + if (atomic_storage::load(cond.sq)) { lv2_obj::notify_all_t notify; @@ -160,16 +160,15 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6 if (mode != 2) { - ensure(!mutex->signaled); - std::lock_guard lock(mutex->mutex); - - if (mode == 3 && mutex->sq) [[unlikely]] + if (mode == 3 && mutex->load_sq()) [[unlikely]] { - // Respect ordering of the sleep queue - lv2_obj::emplace(mutex->sq, result); - result = mutex->schedule(mutex->sq, mutex->protocol); + std::lock_guard lock(mutex->mutex); - if (static_cast(result2)->state & cpu_flag::again) + // Respect ordering of the sleep queue + mutex->try_own(result, true); + auto result2 = mutex->reown(); + + if (result2->state & cpu_flag::again) { ppu.state += cpu_flag::again; return 0; @@ -183,7 +182,7 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6 } else if (mode == 1) { - mutex->add_waiter(result); + mutex->try_own(result, true); result = nullptr; } } @@ -253,7 +252,7 @@ error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id } } - if (cond.sq) + if (atomic_storage::load(cond.sq)) { lv2_obj::notify_all_t notify; @@ -270,8 +269,8 @@ error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id } } - decltype(cond.sq) sq{+cond.sq}; - cond.sq.release(nullptr); + auto sq = cond.sq; + atomic_storage::release(cond.sq, nullptr); while (const auto cpu = cond.schedule(sq, cond.protocol)) { @@ -282,9 +281,7 @@ error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id if (mode == 1) { - ensure(!mutex->signaled); - std::lock_guard lock(mutex->mutex); - mutex->add_waiter(cpu); + mutex->try_own(cpu, true); } else { @@ -353,8 +350,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id if (mutex_sleep) { // Special: loading state from the point of waiting on lwmutex sleep queue - std::lock_guard lock2(mutex->mutex); - lv2_obj::emplace(mutex->sq, &ppu); + mutex->try_own(&ppu, true); } else { @@ -362,25 +358,22 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id lv2_obj::emplace(cond.sq, &ppu); } - if (!ppu.loaded_from_savestate) + if (!ppu.loaded_from_savestate && !mutex->try_unlock(false)) { std::lock_guard lock2(mutex->mutex); // Process lwmutex sleep queue - if (const auto cpu = mutex->schedule(mutex->sq, mutex->protocol)) + if (const auto cpu = mutex->reown()) { if (static_cast(cpu)->state & cpu_flag::again) { + ensure(cond.unqueue(cond.sq, &ppu)); ppu.state += cpu_flag::again; return; } cond.append(cpu); } - else - { - mutex->signaled |= 1; - } } // Sleep current thread and schedule lwmutex waiter @@ -412,7 +405,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id bool mutex_sleep = false; bool cond_sleep = false; - for (auto cpu = +mutex->sq; cpu; cpu = cpu->next_cpu) + for (auto cpu = mutex->load_sq(); cpu; cpu = cpu->next_cpu) { if (cpu == &ppu) { @@ -421,7 +414,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id } } - for (auto cpu = +mutex->sq; cpu; cpu = cpu->next_cpu) + for (auto cpu = atomic_storage::load(cond->sq); cpu; cpu = cpu->next_cpu) { if (cpu == &ppu) { @@ -472,7 +465,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id bool mutex_sleep = false; - for (auto cpu = +mutex->sq; cpu; cpu = cpu->next_cpu) + for (auto cpu = mutex->load_sq(); cpu; cpu = cpu->next_cpu) { if (cpu == &ppu) { diff --git a/rpcs3/Emu/Cell/lv2/sys_lwcond.h b/rpcs3/Emu/Cell/lv2/sys_lwcond.h index a52f87efaf..58246f9b28 100644 --- a/rpcs3/Emu/Cell/lv2/sys_lwcond.h +++ b/rpcs3/Emu/Cell/lv2/sys_lwcond.h @@ -31,7 +31,7 @@ struct lv2_lwcond final : lv2_obj vm::ptr control; shared_mutex mutex; - atomic_t sq{}; + ppu_thread* sq{}; lv2_lwcond(u64 name, u32 lwid, u32 protocol, vm::ptr control) noexcept : name(std::bit_cast>(name)) diff --git a/rpcs3/Emu/Cell/lv2/sys_lwmutex.cpp b/rpcs3/Emu/Cell/lv2/sys_lwmutex.cpp index 2aaf24c769..4eef449bc4 100644 --- a/rpcs3/Emu/Cell/lv2/sys_lwmutex.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_lwmutex.cpp @@ -14,13 +14,13 @@ lv2_lwmutex::lv2_lwmutex(utils::serial& ar) : protocol(ar) , control(ar.operator decltype(control)()) , name(ar.operator be_t()) - , signaled(ar) { + ar(lv2_control.raw().signaled); } void lv2_lwmutex::save(utils::serial& ar) { - ar(protocol, control, name, signaled); + ar(protocol, control, name, lv2_control.raw().signaled); } error_code _sys_lwmutex_create(ppu_thread& ppu, vm::ptr lwmutex_id, u32 protocol, vm::ptr control, s32 has_name, u64 name) @@ -72,7 +72,7 @@ error_code _sys_lwmutex_destroy(ppu_thread& ppu, u32 lwmutex_id) std::lock_guard lock(mutex.mutex); - if (mutex.sq) + if (mutex.load_sq()) { return CELL_EBUSY; } @@ -141,29 +141,30 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout) const auto mutex = idm::get(lwmutex_id, [&](lv2_lwmutex& mutex) { - if (mutex.signaled.try_dec(0)) + if (s32 signal = mutex.lv2_control.fetch_op([](auto& data) { + if (data.signaled == 1) + { + data.signaled = 0; + return true; + } + + return false; + }).first.signaled) + { + if (signal == smin) + { + ppu.gpr[3] = CELL_EBUSY; + } + return true; } lv2_obj::notify_all_t notify(ppu); - std::lock_guard lock(mutex.mutex); - - auto [old, _] = mutex.signaled.fetch_op([](s32& value) + if (s32 signal = mutex.try_own(&ppu)) { - if (value) - { - value = 0; - return true; - } - - return false; - }); - - if (old) - { - if (old == smin) + if (signal == smin) { ppu.gpr[3] = CELL_EBUSY; } @@ -172,7 +173,6 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout) } mutex.sleep(ppu, timeout, true); - mutex.add_waiter(&ppu); return false; }); @@ -197,7 +197,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout) { std::lock_guard lock(mutex->mutex); - for (auto cpu = +mutex->sq; cpu; cpu = cpu->next_cpu) + for (auto cpu = mutex->load_sq(); cpu; cpu = cpu->next_cpu) { if (cpu == &ppu) { @@ -231,7 +231,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout) std::lock_guard lock(mutex->mutex); - if (!mutex->unqueue(mutex->sq, &ppu)) + if (!mutex->unqueue(mutex->lv2_control.raw().sq, &ppu)) { break; } @@ -257,11 +257,11 @@ error_code _sys_lwmutex_trylock(ppu_thread& ppu, u32 lwmutex_id) const auto mutex = idm::check(lwmutex_id, [&](lv2_lwmutex& mutex) { - auto [_, ok] = mutex.signaled.fetch_op([](s32& value) + auto [_, ok] = mutex.lv2_control.fetch_op([](auto& data) { - if (value & 1) + if (data.signaled & 1) { - value = 0; + data.signaled = 0; return true; } @@ -292,11 +292,16 @@ error_code _sys_lwmutex_unlock(ppu_thread& ppu, u32 lwmutex_id) const auto mutex = idm::check(lwmutex_id, [&](lv2_lwmutex& mutex) { + if (mutex.try_unlock(false)) + { + return; + } + lv2_obj::notify_all_t notify; std::lock_guard lock(mutex.mutex); - if (const auto cpu = mutex.schedule(mutex.sq, mutex.protocol)) + if (const auto cpu = mutex.reown()) { if (static_cast(cpu)->state & cpu_flag::again) { @@ -307,8 +312,6 @@ error_code _sys_lwmutex_unlock(ppu_thread& ppu, u32 lwmutex_id) mutex.awake(cpu, true); return; } - - mutex.signaled |= 1; }); if (!mutex) @@ -327,11 +330,16 @@ error_code _sys_lwmutex_unlock2(ppu_thread& ppu, u32 lwmutex_id) const auto mutex = idm::check(lwmutex_id, [&](lv2_lwmutex& mutex) { + if (mutex.try_unlock(true)) + { + return; + } + lv2_obj::notify_all_t notify; std::lock_guard lock(mutex.mutex); - if (const auto cpu = mutex.schedule(mutex.sq, mutex.protocol)) + if (const auto cpu = mutex.reown(true)) { if (static_cast(cpu)->state & cpu_flag::again) { @@ -343,8 +351,6 @@ error_code _sys_lwmutex_unlock2(ppu_thread& ppu, u32 lwmutex_id) mutex.awake(cpu, true); return; } - - mutex.signaled |= smin; }); if (!mutex) diff --git a/rpcs3/Emu/Cell/lv2/sys_lwmutex.h b/rpcs3/Emu/Cell/lv2/sys_lwmutex.h index ae31fa2fb4..75ed396856 100644 --- a/rpcs3/Emu/Cell/lv2/sys_lwmutex.h +++ b/rpcs3/Emu/Cell/lv2/sys_lwmutex.h @@ -60,10 +60,17 @@ struct lv2_lwmutex final : lv2_obj const be_t name; shared_mutex mutex; - atomic_t signaled{0}; - atomic_t sq{}; atomic_t lwcond_waiters{0}; + struct alignas(16) control_data_t + { + s32 signaled{0}; + u32 reserved{}; + ppu_thread* sq{}; + }; + + atomic_t lv2_control{}; + lv2_lwmutex(u32 protocol, vm::ptr control, u64 name) noexcept : protocol{static_cast(protocol)} , control(control) @@ -74,10 +81,28 @@ struct lv2_lwmutex final : lv2_obj lv2_lwmutex(utils::serial& ar); void save(utils::serial& ar); - // Add a waiter - template - void add_waiter(T* cpu) + ppu_thread* load_sq() const { + return atomic_storage::load(lv2_control.raw().sq); + } + + template + s32 try_own(T* cpu, bool wait_only = false) + { + const s32 signal = lv2_control.fetch_op([&](control_data_t& data) + { + if (!data.signaled) + { + cpu->next_cpu = data.sq; + data.sq = cpu; + } + else + { + ensure(!wait_only); + data.signaled = 0; + } + }).signaled; + const bool notify = lwcond_waiters.fetch_op([](s32& val) { if (val + 0u <= 1u << 31) @@ -92,13 +117,67 @@ struct lv2_lwmutex final : lv2_obj return true; }).second; - lv2_obj::emplace(sq, cpu); - if (notify) { // Notify lwmutex destroyer (may cause EBUSY to be returned for it) lwcond_waiters.notify_all(); } + + return signal; + } + + bool try_unlock(bool unlock2) + { + if (!load_sq()) + { + control_data_t old{}; + old.signaled = atomic_storage::load(lv2_control.raw().signaled); + control_data_t store = old; + store.signaled |= (unlock2 ? s32{smin} : 1); + + if (lv2_control.compare_and_swap_test(old, store)) + { + return true; + } + } + + return false; + } + + template + T* reown(bool unlock2 = false) + { + T* res{}; + T* restore_next{}; + + lv2_control.fetch_op([&](control_data_t& data) + { + if (res) + { + res->next_cpu = restore_next; + res = nullptr; + } + + if (auto sq = data.sq) + { + res = schedule(data.sq, protocol); + + if (sq == data.sq) + { + return false; + } + + restore_next = res->next_cpu; + return true; + } + else + { + data.signaled |= (unlock2 ? s32{smin} : 1); + return true; + } + }); + + return res; } }; diff --git a/rpcs3/Emu/Cell/lv2/sys_mutex.cpp b/rpcs3/Emu/Cell/lv2/sys_mutex.cpp index 5fc5c297af..97f211ee56 100644 --- a/rpcs3/Emu/Cell/lv2/sys_mutex.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_mutex.cpp @@ -1,5 +1,4 @@ #include "stdafx.h" -#include "sys_mutex.h" #include "Emu/IdManager.h" #include "Emu/IPC.h" @@ -9,6 +8,8 @@ #include "util/asm.hpp" +#include "sys_mutex.h" + LOG_CHANNEL(sys_mutex); lv2_mutex::lv2_mutex(utils::serial& ar) @@ -18,7 +19,10 @@ lv2_mutex::lv2_mutex(utils::serial& ar) , key(ar) , name(ar) { - ar(lock_count, owner); + ar(lock_count, control.raw().owner); + + // For backwards compatibility + control.raw().owner >>= 1; } std::shared_ptr lv2_mutex::load(utils::serial& ar) @@ -29,7 +33,7 @@ std::shared_ptr lv2_mutex::load(utils::serial& ar) void lv2_mutex::save(utils::serial& ar) { - ar(protocol, recursive, adaptive, key, name, lock_count, owner & -2); + ar(protocol, recursive, adaptive, key, name, lock_count, control.raw().owner << 1); } error_code sys_mutex_create(ppu_thread& ppu, vm::ptr mutex_id, vm::ptr attr) @@ -102,7 +106,7 @@ error_code sys_mutex_destroy(ppu_thread& ppu, u32 mutex_id) { std::lock_guard lock(mutex.mutex); - if (mutex.owner || mutex.lock_count) + if (atomic_storage::load(mutex.control.raw().owner)) { return CELL_EBUSY; } @@ -137,15 +141,28 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout) const auto mutex = idm::get(mutex_id, [&](lv2_mutex& mutex) { - CellError result = mutex.try_lock(ppu.id); + CellError result = mutex.try_lock(ppu); + + if (result == CELL_EBUSY && !atomic_storage::load(mutex.control.raw().sq)) + { + // Try busy waiting a bit if advantageous + for (u32 i = 0, end = lv2_obj::has_ppus_in_running_state() ? 3 : 10; id_manager::g_mutex.is_lockable() && i < end; i++) + { + busy_wait(300); + result = mutex.try_lock(ppu); + + if (!result || atomic_storage::load(mutex.control.raw().sq)) + { + break; + } + } + } if (result == CELL_EBUSY) { lv2_obj::notify_all_t notify(ppu); - std::lock_guard lock(mutex.mutex); - - if (mutex.try_own(ppu, ppu.id)) + if (mutex.try_own(ppu)) { result = {}; } @@ -188,7 +205,7 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout) { std::lock_guard lock(mutex->mutex); - for (auto cpu = +mutex->sq; cpu; cpu = cpu->next_cpu) + for (auto cpu = atomic_storage::load(mutex->control.raw().sq); cpu; cpu = cpu->next_cpu) { if (cpu == &ppu) { @@ -200,7 +217,7 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout) break; } - for (usz i = 0; cpu_flag::signal - ppu.state && i < 50; i++) + for (usz i = 0; cpu_flag::signal - ppu.state && i < 40; i++) { busy_wait(500); } @@ -222,7 +239,7 @@ error_code sys_mutex_lock(ppu_thread& ppu, u32 mutex_id, u64 timeout) std::lock_guard lock(mutex->mutex); - if (!mutex->unqueue(mutex->sq, &ppu)) + if (!mutex->unqueue(mutex->control.raw().sq, &ppu)) { break; } @@ -248,7 +265,7 @@ error_code sys_mutex_trylock(ppu_thread& ppu, u32 mutex_id) const auto mutex = idm::check(mutex_id, [&](lv2_mutex& mutex) { - return mutex.try_lock(ppu.id); + return mutex.try_lock(ppu); }); if (!mutex) @@ -277,7 +294,7 @@ error_code sys_mutex_unlock(ppu_thread& ppu, u32 mutex_id) const auto mutex = idm::check(mutex_id, [&](lv2_mutex& mutex) -> CellError { - CellError result = mutex.try_unlock(ppu.id); + auto result = mutex.try_unlock(ppu); if (result == CELL_EBUSY) { diff --git a/rpcs3/Emu/Cell/lv2/sys_mutex.h b/rpcs3/Emu/Cell/lv2/sys_mutex.h index f12ea35405..4c668625cc 100644 --- a/rpcs3/Emu/Cell/lv2/sys_mutex.h +++ b/rpcs3/Emu/Cell/lv2/sys_mutex.h @@ -4,6 +4,8 @@ #include "Emu/Memory/vm_ptr.h" +#include "Emu/Cell/PPUThread.h" + struct sys_mutex_attribute_t { be_t protocol; // SYS_SYNC_FIFO, SYS_SYNC_PRIORITY or SYS_SYNC_PRIORITY_INHERIT @@ -21,6 +23,8 @@ struct sys_mutex_attribute_t }; }; +class ppu_thread; + struct lv2_mutex final : lv2_obj { static const u32 id_base = 0x85000000; @@ -33,9 +37,16 @@ struct lv2_mutex final : lv2_obj u32 cond_count = 0; // Condition Variables shared_mutex mutex; - atomic_t owner{0}; atomic_t lock_count{0}; // Recursive Locks - atomic_t sq{}; + + struct alignas(16) control_data_t + { + u32 owner{}; + u32 reserved{}; + ppu_thread* sq{}; + }; + + atomic_t control{}; lv2_mutex(u32 protocol, u32 recursive,u32 adaptive, u64 key, u64 name) noexcept : protocol{static_cast(protocol)} @@ -50,11 +61,24 @@ struct lv2_mutex final : lv2_obj static std::shared_ptr load(utils::serial& ar); void save(utils::serial& ar); - CellError try_lock(u32 id) + template + CellError try_lock(T& cpu) { - const u32 value = owner; + auto it = control.load(); - if (value >> 1 == id) + if (!it.owner) + { + auto store = it; + store.owner = cpu.id; + if (!control.compare_and_swap_test(it, store)) + { + return CELL_EBUSY; + } + + return {}; + } + + if (it.owner == cpu.id) { // Recursive locking if (recursive == SYS_SYNC_RECURSIVE) @@ -71,44 +95,34 @@ struct lv2_mutex final : lv2_obj return CELL_EDEADLK; } - if (value == 0) - { - if (owner.compare_and_swap_test(0, id << 1)) - { - return {}; - } - } - return CELL_EBUSY; } template - bool try_own(T& cpu, u32 id) + bool try_own(T& cpu) { - if (owner.fetch_op([&](u32& val) + return control.atomic_op([&](control_data_t& data) { - if (val == 0) + if (data.owner) { - val = id << 1; + cpu.next_cpu = data.sq; + data.sq = &cpu; + return false; } else { - val |= 1; + data.owner = cpu.id; + return true; } - })) - { - lv2_obj::emplace(sq, &cpu); - return false; - } - - return true; + }); } - CellError try_unlock(u32 id) + template + CellError try_unlock(T& cpu) { - const u32 value = owner; + auto it = control.load(); - if (value >> 1 != id) + if (it.owner != cpu.id) { return CELL_EPERM; } @@ -119,9 +133,12 @@ struct lv2_mutex final : lv2_obj return {}; } - if (value == id << 1) + if (!it.sq) { - if (owner.compare_and_swap_test(value, 0)) + auto store = it; + store.owner = 0; + + if (control.compare_and_swap_test(it, store)) { return {}; } @@ -133,25 +150,42 @@ struct lv2_mutex final : lv2_obj template T* reown() { - if (auto cpu = schedule(sq, protocol)) + T* res{}; + T* restore_next{}; + + control.fetch_op([&](control_data_t& data) { - if (cpu->state & cpu_flag::again) + if (res) { - return static_cast(cpu); + res->next_cpu = restore_next; + res = nullptr; } - owner = cpu->id << 1 | !!sq; - return static_cast(cpu); - } - else - { - owner = 0; - return nullptr; - } + if (auto sq = data.sq) + { + res = schedule(data.sq, protocol); + + if (sq == data.sq) + { + atomic_storage::release(control.raw().owner, res->id); + return false; + } + + restore_next = res->next_cpu; + data.owner = res->id; + return true; + } + else + { + data.owner = 0; + return true; + } + }); + + return res; } }; -class ppu_thread; // Syscalls diff --git a/rpcs3/Emu/Cell/lv2/sys_rwlock.cpp b/rpcs3/Emu/Cell/lv2/sys_rwlock.cpp index ef0afe46ed..f393b4e620 100644 --- a/rpcs3/Emu/Cell/lv2/sys_rwlock.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_rwlock.cpp @@ -444,14 +444,12 @@ error_code sys_rwlock_wlock(ppu_thread& ppu, u32 rw_lock_id, u64 timeout) s64 size = 0; // Protocol doesn't matter here since they are all enqueued anyways - for (auto cpu = +rwlock->rq; cpu; cpu = cpu->next_cpu) + while (auto cpu = rwlock->schedule(rwlock->rq, SYS_SYNC_FIFO)) { size++; rwlock->append(cpu); } - rwlock->rq.release(nullptr); - rwlock->owner.atomic_op([&](s64& owner) { owner -= 2 * size; // Add readers to value @@ -564,13 +562,12 @@ error_code sys_rwlock_wunlock(ppu_thread& ppu, u32 rw_lock_id) s64 size = 0; // Protocol doesn't matter here since they are all enqueued anyways - for (auto cpu = +rwlock->rq; cpu; cpu = cpu->next_cpu) + while (auto cpu = rwlock->schedule(rwlock->rq, SYS_SYNC_FIFO)) { size++; rwlock->append(cpu); } - rwlock->rq.release(nullptr); rwlock->owner.release(-2 * static_cast(size)); lv2_obj::awake_all(); } diff --git a/rpcs3/Emu/Cell/lv2/sys_rwlock.h b/rpcs3/Emu/Cell/lv2/sys_rwlock.h index e16f036e14..c3016af5ea 100644 --- a/rpcs3/Emu/Cell/lv2/sys_rwlock.h +++ b/rpcs3/Emu/Cell/lv2/sys_rwlock.h @@ -29,8 +29,8 @@ struct lv2_rwlock final : lv2_obj shared_mutex mutex; atomic_t owner{0}; - atomic_t rq{}; - atomic_t wq{}; + ppu_thread* rq{}; + ppu_thread* wq{}; lv2_rwlock(u32 protocol, u64 key, u64 name) noexcept : protocol{static_cast(protocol)} diff --git a/rpcs3/Emu/Cell/lv2/sys_semaphore.h b/rpcs3/Emu/Cell/lv2/sys_semaphore.h index e12f20d3f3..9267c633c4 100644 --- a/rpcs3/Emu/Cell/lv2/sys_semaphore.h +++ b/rpcs3/Emu/Cell/lv2/sys_semaphore.h @@ -30,7 +30,7 @@ struct lv2_sema final : lv2_obj shared_mutex mutex; atomic_t val; - atomic_t sq{}; + ppu_thread* sq{}; lv2_sema(u32 protocol, u64 key, u64 name, s32 max, s32 value) noexcept : protocol{static_cast(protocol)} diff --git a/rpcs3/Emu/Cell/lv2/sys_sync.h b/rpcs3/Emu/Cell/lv2/sys_sync.h index be631488a8..88a24893b5 100644 --- a/rpcs3/Emu/Cell/lv2/sys_sync.h +++ b/rpcs3/Emu/Cell/lv2/sys_sync.h @@ -118,14 +118,14 @@ public: // Find and remove the object from the linked list template - static T* unqueue(atomic_t& first, T* object, atomic_t T::* mem_ptr = &T::next_cpu) + static T* unqueue(T*& first, T* object, T* T::* mem_ptr = &T::next_cpu) { auto it = +first; if (it == object) { - first.release(+it->*mem_ptr); - (it->*mem_ptr).release(nullptr); + atomic_storage::release(first, it->*mem_ptr); + atomic_storage::release(it->*mem_ptr, nullptr); return it; } @@ -135,8 +135,8 @@ public: if (next == object) { - (it->*mem_ptr).release(+next->*mem_ptr); - (next->*mem_ptr).release(nullptr); + atomic_storage::release(it->*mem_ptr, next->*mem_ptr); + atomic_storage::release(next->*mem_ptr, nullptr); return next; } @@ -146,8 +146,9 @@ public: return {}; } + // Remove an object from the linked set according to the protocol template - static E* schedule(atomic_t& first, u32 protocol) + static E* schedule(T& first, u32 protocol) { auto it = static_cast(first); @@ -156,20 +157,32 @@ public: return it; } + auto parent_found = &first; + if (protocol == SYS_SYNC_FIFO) { - if (it && cpu_flag::again - it->state) + while (true) { - first.release(+it->next_cpu); - it->next_cpu.release(nullptr); - } + const auto next = +it->next_cpu; - return it; + if (next) + { + parent_found = &it->next_cpu; + it = next; + continue; + } + + if (it && cpu_flag::again - it->state) + { + atomic_storage::release(*parent_found, nullptr); + } + + return it; + } } s32 prio = it->prio; auto found = it; - auto parent_found = &first; while (true) { @@ -183,7 +196,8 @@ public: const s32 _prio = static_cast(next)->prio; - if (_prio < prio) + // This condition tests for equality as well so the eraliest element to be pushed is popped + if (_prio <= prio) { found = next; parent_found = &node; @@ -195,27 +209,18 @@ public: if (cpu_flag::again - found->state) { - parent_found->release(+found->next_cpu); - found->next_cpu.release(nullptr); + atomic_storage::release(*parent_found, found->next_cpu); + atomic_storage::release(found->next_cpu, nullptr); } return found; } template - static auto emplace(atomic_t& first, T object) + static void emplace(T& first, T object) { - auto it = &first; - - while (auto ptr = static_cast(+*it)) - { - it = &ptr->next_cpu; - } - - it->release(object); - - // Return parent - return it; + atomic_storage::release(object->next_cpu, first); + atomic_storage::release(first, object); } private: @@ -258,6 +263,9 @@ public: static void set_future_sleep(cpu_thread* cpu); static bool is_scheduler_ready(); + // Must be called under IDM lock + static bool has_ppus_in_running_state(); + static void cleanup(); template @@ -538,7 +546,7 @@ private: static thread_local std::vector g_to_awake; // Scheduler queue for active PPU threads - static atomic_t g_ppu; + static class ppu_thread* g_ppu; // Waiting for the response from static u32 g_pending; diff --git a/rpcs3/Emu/Cell/lv2/sys_usbd.cpp b/rpcs3/Emu/Cell/lv2/sys_usbd.cpp index c2eddd79c9..97d002820b 100644 --- a/rpcs3/Emu/Cell/lv2/sys_usbd.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_usbd.cpp @@ -109,7 +109,7 @@ public: // sys_usbd_receive_event PPU Threads shared_mutex mutex_sq; - atomic_t sq{}; + ppu_thread* sq{}; static constexpr auto thread_name = "Usb Manager Thread"sv; @@ -642,7 +642,7 @@ error_code sys_usbd_finalize(ppu_thread& ppu, u32 handle) usbh.is_init = false; // Forcefully awake all waiters - for (auto cpu = +usbh.sq; cpu; cpu = cpu->next_cpu) + while (auto cpu = lv2_obj::schedule(usbh.sq, SYS_SYNC_FIFO)) { // Special ternimation signal value cpu->gpr[4] = 4; @@ -651,8 +651,6 @@ error_code sys_usbd_finalize(ppu_thread& ppu, u32 handle) lv2_obj::awake(cpu); } - usbh.sq.release(nullptr); - // TODO return CELL_OK; } diff --git a/rpcs3/rpcs3qt/kernel_explorer.cpp b/rpcs3/rpcs3qt/kernel_explorer.cpp index 72d2623e7c..41591f2cb3 100644 --- a/rpcs3/rpcs3qt/kernel_explorer.cpp +++ b/rpcs3/rpcs3qt/kernel_explorer.cpp @@ -364,8 +364,9 @@ void kernel_explorer::update() case SYS_MUTEX_OBJECT: { auto& mutex = static_cast(obj); + const auto control = mutex.control.load(); show_waiters(add_solid_node(node, qstr(fmt::format(u8"Mutex 0x%08x: “%s”, %s,%s Owner: %#x, Locks: %u, Key: %#llx, Conds: %u", id, lv2_obj::name64(mutex.name), mutex.protocol, - mutex.recursive == SYS_SYNC_RECURSIVE ? " Recursive," : "", mutex.owner >> 1, +mutex.lock_count, mutex.key, mutex.cond_count))), mutex.sq); + mutex.recursive == SYS_SYNC_RECURSIVE ? " Recursive," : "", control.owner, +mutex.lock_count, mutex.key, mutex.cond_count))), control.sq); break; } case SYS_COND_OBJECT: @@ -488,6 +489,7 @@ void kernel_explorer::update() auto& lwm = static_cast(obj); std::string owner_str = "unknown"; // Either invalid state or the lwmutex control data was moved from sys_lwmutex_t lwm_data{}; + auto lv2_control = lwm.lv2_control.load(); if (lwm.control.try_read(lwm_data) && lwm_data.sleep_queue == id) { @@ -513,12 +515,12 @@ void kernel_explorer::update() } else { - show_waiters(add_solid_node(node, qstr(fmt::format(u8"LWMutex 0x%08x: “%s”, %s, Signal: %#x (unmapped/invalid control data at *0x%x)", id, lv2_obj::name64(lwm.name), lwm.protocol, +lwm.signaled, lwm.control))), lwm.sq); + show_waiters(add_solid_node(node, qstr(fmt::format(u8"LWMutex 0x%08x: “%s”, %s, Signal: %#x (unmapped/invalid control data at *0x%x)", id, lv2_obj::name64(lwm.name), lwm.protocol, +lv2_control.signaled, lwm.control))), lv2_control.sq); break; } show_waiters(add_solid_node(node, qstr(fmt::format(u8"LWMutex 0x%08x: “%s”, %s,%s Owner: %s, Locks: %u, Signal: %#x, Control: *0x%x", id, lv2_obj::name64(lwm.name), lwm.protocol, - (lwm_data.attribute & SYS_SYNC_RECURSIVE) ? " Recursive," : "", owner_str, lwm_data.recursive_count, +lwm.signaled, lwm.control))), lwm.sq); + (lwm_data.attribute & SYS_SYNC_RECURSIVE) ? " Recursive," : "", owner_str, lwm_data.recursive_count, +lv2_control.signaled, lwm.control))), lv2_control.sq); break; } case SYS_TIMER_OBJECT: