sys_ppu_thread: Make PPU id removal after exit atomic with descheduling

* Make PPU id removal after exit atomic with descheduling
* Make joining thread scheduling atomic with thread exit sleep.
* Update sys_ppu_thread_stop/restart.
* Add idm::remove_verify.
This commit is contained in:
Eladash 2020-03-03 22:39:40 +02:00
parent c3d36940c7
commit efe6e1eb0a
5 changed files with 160 additions and 89 deletions

View File

@ -67,25 +67,18 @@ extern atomic_t<const char*> g_progr;
extern atomic_t<u32> g_progr_ptotal;
extern atomic_t<u32> g_progr_pdone;
enum class join_status : u32
{
joinable = 0,
detached = 0u-1,
exited = 0u-2,
zombie = 0u-3,
};
template <>
void fmt_class_string<join_status>::format(std::string& out, u64 arg)
void fmt_class_string<ppu_join_status>::format(std::string& out, u64 arg)
{
format_enum(out, arg, [](join_status js)
format_enum(out, arg, [](ppu_join_status js)
{
switch (js)
{
case join_status::joinable: return "";
case join_status::detached: return "detached";
case join_status::zombie: return "zombie";
case join_status::exited: return "exited";
case ppu_join_status::joinable: return "";
case ppu_join_status::detached: return "detached";
case ppu_join_status::zombie: return "zombie";
case ppu_join_status::exited: return "exited";
case ppu_join_status::max: return "invalid";
}
return unknown;
@ -429,7 +422,7 @@ std::string ppu_thread::dump() const
std::string ret = cpu_thread::dump();
fmt::append(ret, "Priority: %d\n", +prio);
fmt::append(ret, "Stack: 0x%x..0x%x\n", stack_addr, stack_addr + stack_size - 1);
fmt::append(ret, "Joiner: %s\n", join_status(joiner.load()));
fmt::append(ret, "Joiner: %s\n", joiner.load());
fmt::append(ret, "Commands: %u\n", cmd_queue.size());
const char* _func = current_function;
@ -705,7 +698,7 @@ ppu_thread::ppu_thread(const ppu_thread_params& param, std::string_view name, u3
, prio(prio)
, stack_size(param.stack_size)
, stack_addr(param.stack_addr)
, joiner(-!!detached)
, joiner(detached != 0 ? ppu_join_status::detached : ppu_join_status::joinable)
, start_time(get_guest_system_time())
, ppu_tname(stx::shared_cptr<std::string>::make(name))
{

View File

@ -21,6 +21,15 @@ enum class ppu_cmd : u32
reset_stack, // resets stack address
};
enum class ppu_join_status : u32
{
joinable = 0,
detached = 1,
zombie = 2,
exited = 3,
max = 4, // Values above it indicate PPU id of joining thread
};
// Formatting helper
enum class ppu_syscall_code : u64
{
@ -169,7 +178,7 @@ public:
const u32 stack_size; // Stack size
const u32 stack_addr; // Stack address
atomic_t<u32> joiner{~0u}; // Joining thread (-1 if detached)
atomic_t<ppu_join_status> joiner; // Joining thread or status
lf_fifo<atomic_t<cmd64>, 127> cmd_queue; // Command queue for asynchronous operations.
@ -194,6 +203,8 @@ public:
static void stack_pop_verbose(u32 addr, u32 size) noexcept;
};
static_assert(ppu_join_status::max < ppu_join_status{ppu_thread::id_base});
template<typename T, typename = void>
struct ppu_gpr_cast_impl
{

View File

@ -42,40 +42,40 @@ void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
ppu.state += cpu_flag::exit;
// Get joiner ID
const u32 jid = ppu.joiner.fetch_op([](u32& value)
{
if (value == 0)
{
// Joinable, not joined
value = -3;
}
else if (value != umax)
{
// Joinable, joined
value = -2;
}
// Detached otherwise
});
if (jid == umax)
{
g_fxo->get<ppu_thread_cleaner>()->clean(ppu.id);
}
else if (jid != 0)
ppu_join_status old_status;
{
std::lock_guard lock(id_manager::g_mutex);
// Schedule joiner and unqueue
lv2_obj::awake(idm::check_unlocked<named_thread<ppu_thread>>(jid));
// Get joiner ID
old_status = ppu.joiner.fetch_op([](ppu_join_status& status)
{
if (status == ppu_join_status::joinable)
{
// Joinable, not joined
status = ppu_join_status::zombie;
return;
}
// Set deleted thread status
status = ppu_join_status::exited;
});
if (old_status > ppu_join_status::max)
{
lv2_obj::append(idm::check_unlocked<named_thread<ppu_thread>>(static_cast<u32>(old_status)));
}
// Unqueue
lv2_obj::sleep(ppu);
// Remove suspend state (TODO)
ppu.state -= cpu_flag::suspend;
}
// Unqueue
lv2_obj::sleep(ppu);
// Remove suspend state (TODO)
ppu.state -= cpu_flag::suspend;
if (old_status == ppu_join_status::detached)
{
g_fxo->get<ppu_thread_cleaner>()->clean(ppu.id);
}
}
void sys_ppu_thread_yield(ppu_thread& ppu)
@ -94,33 +94,32 @@ error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr<u64> vptr
// Clean some detached thread (hack)
g_fxo->get<ppu_thread_cleaner>()->clean(0);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id, [&](ppu_thread& thread) -> CellError
auto thread = idm::get<named_thread<ppu_thread>>(thread_id, [&](ppu_thread& thread) -> CellError
{
CellError result = thread.joiner.atomic_op([&](u32& value) -> CellError
if (&ppu == &thread)
{
if (value == 0u - 3)
return CELL_EDEADLK;
}
CellError result = thread.joiner.atomic_op([&](ppu_join_status& value) -> CellError
{
if (value == ppu_join_status::zombie)
{
value = -2;
value = ppu_join_status::exited;
return CELL_EBUSY;
}
if (value == 0u - 2)
if (value == ppu_join_status::exited)
{
return CELL_ESRCH;
}
if (value)
if (value > ppu_join_status::max)
{
return CELL_EINVAL;
}
// TODO: check precedence?
if (&ppu == &thread)
{
return CELL_EDEADLK;
}
value = ppu.id;
value = ppu_join_status{ppu.id};
return {};
});
@ -150,16 +149,18 @@ error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr<u64> vptr
return 0;
}
// Get the exit status from the register
const u64 vret = thread->gpr[3];
// Cleanup
idm::remove<named_thread<ppu_thread>>(thread->id);
verify(HERE), idm::remove_verify<named_thread<ppu_thread>>(thread_id, std::move(thread.ptr));
if (!vptr)
{
return not_an_error(CELL_EFAULT);
}
// Get the exit status from the register
*vptr = thread->gpr[3];
*vptr = vret;
return CELL_OK;
}
@ -172,30 +173,30 @@ error_code sys_ppu_thread_detach(u32 thread_id)
const auto thread = idm::check<named_thread<ppu_thread>>(thread_id, [&](ppu_thread& thread) -> CellError
{
return thread.joiner.atomic_op([&](u32& value) -> CellError
return thread.joiner.atomic_op([](ppu_join_status& value) -> CellError
{
if (value == 0u - 3)
if (value == ppu_join_status::zombie)
{
value = -2;
value = ppu_join_status::exited;
return CELL_EAGAIN;
}
if (value == 0u - 2)
if (value == ppu_join_status::exited)
{
return CELL_ESRCH;
}
if (value == umax)
if (value == ppu_join_status::detached)
{
return CELL_EINVAL;
}
if (value)
if (value > ppu_join_status::max)
{
return CELL_EBUSY;
}
value = -1;
value = ppu_join_status::detached;
return {};
});
});
@ -212,7 +213,7 @@ error_code sys_ppu_thread_detach(u32 thread_id)
if (thread.ret == CELL_EAGAIN)
{
idm::remove<named_thread<ppu_thread>>(thread_id);
verify(HERE), idm::remove<named_thread<ppu_thread>>(thread_id);
}
return CELL_OK;
@ -227,7 +228,7 @@ error_code sys_ppu_thread_get_join_state(ppu_thread& ppu, vm::ptr<s32> isjoinabl
return CELL_EFAULT;
}
*isjoinable = ppu.joiner != umax;
*isjoinable = ppu.joiner != ppu_join_status::detached;
return CELL_OK;
}
@ -245,13 +246,20 @@ error_code sys_ppu_thread_set_priority(ppu_thread& ppu, u32 thread_id, s32 prio)
const auto thread = idm::check<named_thread<ppu_thread>>(thread_id, [&](ppu_thread& thread)
{
if (thread.joiner == ppu_join_status::exited)
{
return false;
}
if (thread.prio != prio)
{
lv2_obj::set_priority(thread, prio);
}
return true;
});
if (!thread)
if (!thread || !thread.ret)
{
return CELL_ESRCH;
}
@ -268,10 +276,16 @@ error_code sys_ppu_thread_get_priority(u32 thread_id, vm::ptr<s32> priop)
const auto thread = idm::check<named_thread<ppu_thread>>(thread_id, [&](ppu_thread& thread)
{
if (thread.joiner == ppu_join_status::exited)
{
return false;
}
*priop = thread.prio;
return true;
});
if (!thread)
if (!thread || !thread.ret)
{
return CELL_ESRCH;
}
@ -293,9 +307,17 @@ error_code sys_ppu_thread_stop(u32 thread_id)
{
sys_ppu_thread.todo("sys_ppu_thread_stop(thread_id=0x%x)", thread_id);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id);
if (!g_ps3_process_info.has_root_perm())
{
return CELL_ENOSYS;
}
if (!thread)
const auto thread = idm::check<named_thread<ppu_thread>>(thread_id, [&](ppu_thread& thread)
{
return thread.joiner != ppu_join_status::exited;
});
if (!thread || !thread.ret)
{
return CELL_ESRCH;
}
@ -303,15 +325,13 @@ error_code sys_ppu_thread_stop(u32 thread_id)
return CELL_OK;
}
error_code sys_ppu_thread_restart(u32 thread_id)
error_code sys_ppu_thread_restart()
{
sys_ppu_thread.todo("sys_ppu_thread_restart(thread_id=0x%x)", thread_id);
sys_ppu_thread.todo("sys_ppu_thread_restart()");
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id);
if (!thread)
if (!g_ps3_process_info.has_root_perm())
{
return CELL_ESRCH;
return CELL_ENOSYS;
}
return CELL_OK;
@ -401,10 +421,16 @@ error_code sys_ppu_thread_start(ppu_thread& ppu, u32 thread_id)
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id, [&](ppu_thread& thread)
{
if (thread.joiner == ppu_join_status::exited)
{
return false;
}
lv2_obj::awake(&thread);
return true;
});
if (!thread)
if (!thread || !thread.ret)
{
return CELL_ESRCH;
}
@ -451,9 +477,12 @@ error_code sys_ppu_thread_rename(u32 thread_id, vm::cptr<char> name)
{
sys_ppu_thread.warning("sys_ppu_thread_rename(thread_id=0x%x, name=*0x%x)", thread_id, name);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id, [&](ppu_thread& thread)
{
return thread.joiner != ppu_join_status::exited;
});
if (!thread)
if (!thread || !thread.ret)
{
return CELL_ESRCH;
}
@ -479,9 +508,12 @@ error_code sys_ppu_thread_recover_page_fault(u32 thread_id)
{
sys_ppu_thread.warning("sys_ppu_thread_recover_page_fault(thread_id=0x%x)", thread_id);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id, [&](ppu_thread& thread)
{
return thread.joiner != ppu_join_status::exited;
});
if (!thread)
if (!thread || !thread.ret)
{
return CELL_ESRCH;
}
@ -493,9 +525,12 @@ error_code sys_ppu_thread_get_page_fault_context(u32 thread_id, vm::ptr<sys_ppu_
{
sys_ppu_thread.todo("sys_ppu_thread_get_page_fault_context(thread_id=0x%x, ctxt=*0x%x)", thread_id, ctxt);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id, [&](ppu_thread& thread)
{
return thread.joiner != ppu_join_status::exited;
});
if (!thread)
if (!thread || !thread.ret)
{
return CELL_ESRCH;
}

View File

@ -64,7 +64,7 @@ error_code sys_ppu_thread_set_priority(ppu_thread& ppu, u32 thread_id, s32 prio)
error_code sys_ppu_thread_get_priority(u32 thread_id, vm::ptr<s32> priop);
error_code sys_ppu_thread_get_stack_information(ppu_thread& ppu, vm::ptr<sys_ppu_thread_stack_t> sp);
error_code sys_ppu_thread_stop(u32 thread_id);
error_code sys_ppu_thread_restart(u32 thread_id);
error_code sys_ppu_thread_restart();
error_code _sys_ppu_thread_create(vm::ptr<u64> thread_id, vm::ptr<ppu_thread_param_t> param, u64 arg, u64 arg4, s32 prio, u32 stacksize, u64 flags, vm::cptr<char> threadname);
error_code sys_ppu_thread_start(ppu_thread& ppu, u32 thread_id);
error_code sys_ppu_thread_rename(u32 thread_id, vm::cptr<char> name);

View File

@ -195,6 +195,11 @@ class idm
return ptr.operator bool();
}
T& operator*() const
{
return *ptr;
}
T* operator->() const
{
return ptr.get();
@ -213,6 +218,11 @@ class idm
return ptr != nullptr;
}
T& operator*() const
{
return *ptr;
}
T* operator->() const
{
return ptr;
@ -532,6 +542,28 @@ public:
return true;
}
// Remove the ID if matches the weak/shared ptr
template <typename T, typename Get = T, typename Ptr>
static inline bool remove_verify(u32 id, Ptr sptr)
{
std::shared_ptr<void> ptr;
{
std::lock_guard lock(id_manager::g_mutex);
if (const auto found = find_id<T, Get>(id); found &&
(!found->second.owner_before(sptr) && !sptr.owner_before(found->second)))
{
ptr = std::move(found->second);
}
else
{
return false;
}
}
return true;
}
// Remove the ID and return the object
template <typename T, typename Get = T>
static inline std::shared_ptr<Get> withdraw(u32 id)