diff --git a/rpcs3/Emu/Cell/PPUThread.cpp b/rpcs3/Emu/Cell/PPUThread.cpp index e84ff28cd7..b54779bdd1 100644 --- a/rpcs3/Emu/Cell/PPUThread.cpp +++ b/rpcs3/Emu/Cell/PPUThread.cpp @@ -67,25 +67,18 @@ extern atomic_t g_progr; extern atomic_t g_progr_ptotal; extern atomic_t g_progr_pdone; -enum class join_status : u32 -{ - joinable = 0, - detached = 0u-1, - exited = 0u-2, - zombie = 0u-3, -}; - template <> -void fmt_class_string::format(std::string& out, u64 arg) +void fmt_class_string::format(std::string& out, u64 arg) { - format_enum(out, arg, [](join_status js) + format_enum(out, arg, [](ppu_join_status js) { switch (js) { - case join_status::joinable: return ""; - case join_status::detached: return "detached"; - case join_status::zombie: return "zombie"; - case join_status::exited: return "exited"; + case ppu_join_status::joinable: return ""; + case ppu_join_status::detached: return "detached"; + case ppu_join_status::zombie: return "zombie"; + case ppu_join_status::exited: return "exited"; + case ppu_join_status::max: return "invalid"; } return unknown; @@ -429,7 +422,7 @@ std::string ppu_thread::dump() const std::string ret = cpu_thread::dump(); fmt::append(ret, "Priority: %d\n", +prio); fmt::append(ret, "Stack: 0x%x..0x%x\n", stack_addr, stack_addr + stack_size - 1); - fmt::append(ret, "Joiner: %s\n", join_status(joiner.load())); + fmt::append(ret, "Joiner: %s\n", joiner.load()); fmt::append(ret, "Commands: %u\n", cmd_queue.size()); const char* _func = current_function; @@ -705,7 +698,7 @@ ppu_thread::ppu_thread(const ppu_thread_params& param, std::string_view name, u3 , prio(prio) , stack_size(param.stack_size) , stack_addr(param.stack_addr) - , joiner(-!!detached) + , joiner(detached != 0 ? ppu_join_status::detached : ppu_join_status::joinable) , start_time(get_guest_system_time()) , ppu_tname(stx::shared_cptr::make(name)) { diff --git a/rpcs3/Emu/Cell/PPUThread.h b/rpcs3/Emu/Cell/PPUThread.h index dbcd3b5874..418e8fe246 100644 --- a/rpcs3/Emu/Cell/PPUThread.h +++ b/rpcs3/Emu/Cell/PPUThread.h @@ -21,6 +21,15 @@ enum class ppu_cmd : u32 reset_stack, // resets stack address }; +enum class ppu_join_status : u32 +{ + joinable = 0, + detached = 1, + zombie = 2, + exited = 3, + max = 4, // Values above it indicate PPU id of joining thread +}; + // Formatting helper enum class ppu_syscall_code : u64 { @@ -169,7 +178,7 @@ public: const u32 stack_size; // Stack size const u32 stack_addr; // Stack address - atomic_t joiner{~0u}; // Joining thread (-1 if detached) + atomic_t joiner; // Joining thread or status lf_fifo, 127> cmd_queue; // Command queue for asynchronous operations. @@ -194,6 +203,8 @@ public: static void stack_pop_verbose(u32 addr, u32 size) noexcept; }; +static_assert(ppu_join_status::max < ppu_join_status{ppu_thread::id_base}); + template struct ppu_gpr_cast_impl { diff --git a/rpcs3/Emu/Cell/lv2/sys_ppu_thread.cpp b/rpcs3/Emu/Cell/lv2/sys_ppu_thread.cpp index 7320cebbc8..b99a8b6f59 100644 --- a/rpcs3/Emu/Cell/lv2/sys_ppu_thread.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_ppu_thread.cpp @@ -42,40 +42,40 @@ void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode) ppu.state += cpu_flag::exit; - // Get joiner ID - const u32 jid = ppu.joiner.fetch_op([](u32& value) - { - if (value == 0) - { - // Joinable, not joined - value = -3; - } - else if (value != umax) - { - // Joinable, joined - value = -2; - } - - // Detached otherwise - }); - - if (jid == umax) - { - g_fxo->get()->clean(ppu.id); - } - else if (jid != 0) + ppu_join_status old_status; { std::lock_guard lock(id_manager::g_mutex); - // Schedule joiner and unqueue - lv2_obj::awake(idm::check_unlocked>(jid)); + // Get joiner ID + old_status = ppu.joiner.fetch_op([](ppu_join_status& status) + { + if (status == ppu_join_status::joinable) + { + // Joinable, not joined + status = ppu_join_status::zombie; + return; + } + + // Set deleted thread status + status = ppu_join_status::exited; + }); + + if (old_status > ppu_join_status::max) + { + lv2_obj::append(idm::check_unlocked>(static_cast(old_status))); + } + + // Unqueue + lv2_obj::sleep(ppu); + + // Remove suspend state (TODO) + ppu.state -= cpu_flag::suspend; } - // Unqueue - lv2_obj::sleep(ppu); - - // Remove suspend state (TODO) - ppu.state -= cpu_flag::suspend; + if (old_status == ppu_join_status::detached) + { + g_fxo->get()->clean(ppu.id); + } } void sys_ppu_thread_yield(ppu_thread& ppu) @@ -94,33 +94,32 @@ error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr vptr // Clean some detached thread (hack) g_fxo->get()->clean(0); - const auto thread = idm::get>(thread_id, [&](ppu_thread& thread) -> CellError + auto thread = idm::get>(thread_id, [&](ppu_thread& thread) -> CellError { - CellError result = thread.joiner.atomic_op([&](u32& value) -> CellError + if (&ppu == &thread) { - if (value == 0u - 3) + return CELL_EDEADLK; + } + + CellError result = thread.joiner.atomic_op([&](ppu_join_status& value) -> CellError + { + if (value == ppu_join_status::zombie) { - value = -2; + value = ppu_join_status::exited; return CELL_EBUSY; } - if (value == 0u - 2) + if (value == ppu_join_status::exited) { return CELL_ESRCH; } - if (value) + if (value > ppu_join_status::max) { return CELL_EINVAL; } - // TODO: check precedence? - if (&ppu == &thread) - { - return CELL_EDEADLK; - } - - value = ppu.id; + value = ppu_join_status{ppu.id}; return {}; }); @@ -150,16 +149,18 @@ error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr vptr return 0; } + // Get the exit status from the register + const u64 vret = thread->gpr[3]; + // Cleanup - idm::remove>(thread->id); + verify(HERE), idm::remove_verify>(thread_id, std::move(thread.ptr)); if (!vptr) { return not_an_error(CELL_EFAULT); } - // Get the exit status from the register - *vptr = thread->gpr[3]; + *vptr = vret; return CELL_OK; } @@ -172,30 +173,30 @@ error_code sys_ppu_thread_detach(u32 thread_id) const auto thread = idm::check>(thread_id, [&](ppu_thread& thread) -> CellError { - return thread.joiner.atomic_op([&](u32& value) -> CellError + return thread.joiner.atomic_op([](ppu_join_status& value) -> CellError { - if (value == 0u - 3) + if (value == ppu_join_status::zombie) { - value = -2; + value = ppu_join_status::exited; return CELL_EAGAIN; } - if (value == 0u - 2) + if (value == ppu_join_status::exited) { return CELL_ESRCH; } - if (value == umax) + if (value == ppu_join_status::detached) { return CELL_EINVAL; } - if (value) + if (value > ppu_join_status::max) { return CELL_EBUSY; } - value = -1; + value = ppu_join_status::detached; return {}; }); }); @@ -212,7 +213,7 @@ error_code sys_ppu_thread_detach(u32 thread_id) if (thread.ret == CELL_EAGAIN) { - idm::remove>(thread_id); + verify(HERE), idm::remove>(thread_id); } return CELL_OK; @@ -227,7 +228,7 @@ error_code sys_ppu_thread_get_join_state(ppu_thread& ppu, vm::ptr isjoinabl return CELL_EFAULT; } - *isjoinable = ppu.joiner != umax; + *isjoinable = ppu.joiner != ppu_join_status::detached; return CELL_OK; } @@ -245,13 +246,20 @@ error_code sys_ppu_thread_set_priority(ppu_thread& ppu, u32 thread_id, s32 prio) const auto thread = idm::check>(thread_id, [&](ppu_thread& thread) { + if (thread.joiner == ppu_join_status::exited) + { + return false; + } + if (thread.prio != prio) { lv2_obj::set_priority(thread, prio); } + + return true; }); - if (!thread) + if (!thread || !thread.ret) { return CELL_ESRCH; } @@ -268,10 +276,16 @@ error_code sys_ppu_thread_get_priority(u32 thread_id, vm::ptr priop) const auto thread = idm::check>(thread_id, [&](ppu_thread& thread) { + if (thread.joiner == ppu_join_status::exited) + { + return false; + } + *priop = thread.prio; + return true; }); - if (!thread) + if (!thread || !thread.ret) { return CELL_ESRCH; } @@ -293,9 +307,17 @@ error_code sys_ppu_thread_stop(u32 thread_id) { sys_ppu_thread.todo("sys_ppu_thread_stop(thread_id=0x%x)", thread_id); - const auto thread = idm::get>(thread_id); + if (!g_ps3_process_info.has_root_perm()) + { + return CELL_ENOSYS; + } - if (!thread) + const auto thread = idm::check>(thread_id, [&](ppu_thread& thread) + { + return thread.joiner != ppu_join_status::exited; + }); + + if (!thread || !thread.ret) { return CELL_ESRCH; } @@ -303,15 +325,13 @@ error_code sys_ppu_thread_stop(u32 thread_id) return CELL_OK; } -error_code sys_ppu_thread_restart(u32 thread_id) +error_code sys_ppu_thread_restart() { - sys_ppu_thread.todo("sys_ppu_thread_restart(thread_id=0x%x)", thread_id); + sys_ppu_thread.todo("sys_ppu_thread_restart()"); - const auto thread = idm::get>(thread_id); - - if (!thread) + if (!g_ps3_process_info.has_root_perm()) { - return CELL_ESRCH; + return CELL_ENOSYS; } return CELL_OK; @@ -401,10 +421,16 @@ error_code sys_ppu_thread_start(ppu_thread& ppu, u32 thread_id) const auto thread = idm::get>(thread_id, [&](ppu_thread& thread) { + if (thread.joiner == ppu_join_status::exited) + { + return false; + } + lv2_obj::awake(&thread); + return true; }); - if (!thread) + if (!thread || !thread.ret) { return CELL_ESRCH; } @@ -451,9 +477,12 @@ error_code sys_ppu_thread_rename(u32 thread_id, vm::cptr name) { sys_ppu_thread.warning("sys_ppu_thread_rename(thread_id=0x%x, name=*0x%x)", thread_id, name); - const auto thread = idm::get>(thread_id); + const auto thread = idm::get>(thread_id, [&](ppu_thread& thread) + { + return thread.joiner != ppu_join_status::exited; + }); - if (!thread) + if (!thread || !thread.ret) { return CELL_ESRCH; } @@ -479,9 +508,12 @@ error_code sys_ppu_thread_recover_page_fault(u32 thread_id) { sys_ppu_thread.warning("sys_ppu_thread_recover_page_fault(thread_id=0x%x)", thread_id); - const auto thread = idm::get>(thread_id); + const auto thread = idm::get>(thread_id, [&](ppu_thread& thread) + { + return thread.joiner != ppu_join_status::exited; + }); - if (!thread) + if (!thread || !thread.ret) { return CELL_ESRCH; } @@ -493,9 +525,12 @@ error_code sys_ppu_thread_get_page_fault_context(u32 thread_id, vm::ptr>(thread_id); + const auto thread = idm::get>(thread_id, [&](ppu_thread& thread) + { + return thread.joiner != ppu_join_status::exited; + }); - if (!thread) + if (!thread || !thread.ret) { return CELL_ESRCH; } diff --git a/rpcs3/Emu/Cell/lv2/sys_ppu_thread.h b/rpcs3/Emu/Cell/lv2/sys_ppu_thread.h index 880b9f5182..4ab89b0ef6 100644 --- a/rpcs3/Emu/Cell/lv2/sys_ppu_thread.h +++ b/rpcs3/Emu/Cell/lv2/sys_ppu_thread.h @@ -64,7 +64,7 @@ error_code sys_ppu_thread_set_priority(ppu_thread& ppu, u32 thread_id, s32 prio) error_code sys_ppu_thread_get_priority(u32 thread_id, vm::ptr priop); error_code sys_ppu_thread_get_stack_information(ppu_thread& ppu, vm::ptr sp); error_code sys_ppu_thread_stop(u32 thread_id); -error_code sys_ppu_thread_restart(u32 thread_id); +error_code sys_ppu_thread_restart(); error_code _sys_ppu_thread_create(vm::ptr thread_id, vm::ptr param, u64 arg, u64 arg4, s32 prio, u32 stacksize, u64 flags, vm::cptr threadname); error_code sys_ppu_thread_start(ppu_thread& ppu, u32 thread_id); error_code sys_ppu_thread_rename(u32 thread_id, vm::cptr name); diff --git a/rpcs3/Emu/IdManager.h b/rpcs3/Emu/IdManager.h index f8c5088ffa..3463ef6fc9 100644 --- a/rpcs3/Emu/IdManager.h +++ b/rpcs3/Emu/IdManager.h @@ -195,6 +195,11 @@ class idm return ptr.operator bool(); } + T& operator*() const + { + return *ptr; + } + T* operator->() const { return ptr.get(); @@ -213,6 +218,11 @@ class idm return ptr != nullptr; } + T& operator*() const + { + return *ptr; + } + T* operator->() const { return ptr; @@ -532,6 +542,28 @@ public: return true; } + // Remove the ID if matches the weak/shared ptr + template + static inline bool remove_verify(u32 id, Ptr sptr) + { + std::shared_ptr ptr; + { + std::lock_guard lock(id_manager::g_mutex); + + if (const auto found = find_id(id); found && + (!found->second.owner_before(sptr) && !sptr.owner_before(found->second))) + { + ptr = std::move(found->second); + } + else + { + return false; + } + } + + return true; + } + // Remove the ID and return the object template static inline std::shared_ptr withdraw(u32 id)