mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-02-19 03:39:54 +00:00
LV2: Make _sys_lwcond_destroy wait for lwmutex lock
This commit is contained in:
parent
1c36156594
commit
5d4e87373f
@ -64,24 +64,77 @@ error_code _sys_lwcond_destroy(ppu_thread& ppu, u32 lwcond_id)
|
||||
|
||||
sys_lwcond.warning("_sys_lwcond_destroy(lwcond_id=0x%x)", lwcond_id);
|
||||
|
||||
const auto cond = idm::withdraw<lv2_obj, lv2_lwcond>(lwcond_id, [&](lv2_lwcond& cond) -> CellError
|
||||
std::shared_ptr<lv2_lwcond> _cond;
|
||||
|
||||
while (true)
|
||||
{
|
||||
if (atomic_storage<ppu_thread*>::load(cond.sq))
|
||||
s32 old_val = 0;
|
||||
|
||||
auto [ptr, ret] = idm::withdraw<lv2_obj, lv2_lwcond>(lwcond_id, [&](lv2_lwcond& cond) -> CellError
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
// Ignore check on first iteration
|
||||
if (_cond && std::addressof(cond) != _cond.get())
|
||||
{
|
||||
// Other thread has destroyed the lwcond earlier
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
std::lock_guard lock(cond.mutex);
|
||||
|
||||
if (atomic_storage<ppu_thread*>::load(cond.sq))
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
old_val = cond.lwmutex_waiters.or_fetch(smin);
|
||||
|
||||
if (old_val != smin)
|
||||
{
|
||||
// De-schedule if waiters were found
|
||||
lv2_obj::sleep(ppu);
|
||||
|
||||
// Repeat loop: there are lwmutex waiters inside _sys_lwcond_queue_wait
|
||||
return CELL_EAGAIN;
|
||||
}
|
||||
|
||||
return {};
|
||||
});
|
||||
|
||||
if (!ptr)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
return {};
|
||||
});
|
||||
if (ret)
|
||||
{
|
||||
if (ret != CELL_EAGAIN)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
if (!cond)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
_cond = std::move(ptr);
|
||||
|
||||
if (cond.ret)
|
||||
{
|
||||
return cond.ret;
|
||||
// Wait for all lwcond waiters to quit
|
||||
while (old_val + 0u > 1u << 31)
|
||||
{
|
||||
thread_ctrl::wait_on(_cond->lwmutex_waiters, old_val);
|
||||
|
||||
if (ppu.is_stopped())
|
||||
{
|
||||
ppu.state += cpu_flag::again;
|
||||
return {};
|
||||
}
|
||||
|
||||
old_val = _cond->lwmutex_waiters;
|
||||
}
|
||||
|
||||
// Wake up from sleep
|
||||
ppu.check_state();
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
@ -341,6 +394,8 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
|
||||
|
||||
std::lock_guard lock(cond.mutex);
|
||||
|
||||
cond.lwmutex_waiters++;
|
||||
|
||||
const bool mutex_sleep = sstate.try_read<bool>().second;
|
||||
sstate.clear();
|
||||
|
||||
@ -510,6 +565,12 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
|
||||
mutex->lwcond_waiters.notify_all();
|
||||
}
|
||||
|
||||
if (--cond->lwmutex_waiters == smin)
|
||||
{
|
||||
// Notify the thread destroying lwcond on last waiter
|
||||
cond->lwmutex_waiters.notify_all();
|
||||
}
|
||||
|
||||
// Return cause
|
||||
return not_an_error(ppu.gpr[3]);
|
||||
}
|
||||
|
@ -33,6 +33,8 @@ struct lv2_lwcond final : lv2_obj
|
||||
shared_mutex mutex;
|
||||
ppu_thread* sq{};
|
||||
|
||||
atomic_t<s32> lwmutex_waiters = 0;
|
||||
|
||||
lv2_lwcond(u64 name, u32 lwid, u32 protocol, vm::ptr<sys_lwcond_t> control) noexcept
|
||||
: name(std::bit_cast<be_t<u64>>(name))
|
||||
, lwid(lwid)
|
||||
|
Loading…
x
Reference in New Issue
Block a user