mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-02-04 12:39:52 +00:00
sys_cond/sys_mutex improved
This commit is contained in:
parent
43d3ccce95
commit
8175630619
@ -75,7 +75,7 @@ public:
|
||||
bool is_current() const;
|
||||
|
||||
// get internal thread pointer
|
||||
const thread_ctrl_t* get_ctrl() const { return m_thread.get(); }
|
||||
const thread_ctrl_t* get_thread_ctrl() const { return m_thread.get(); }
|
||||
};
|
||||
|
||||
class autojoin_thread_t final : private thread_t
|
||||
|
@ -249,7 +249,7 @@ bool CPUThread::Signal()
|
||||
}
|
||||
}
|
||||
|
||||
bool CPUThread::Signaled()
|
||||
bool CPUThread::Unsignal()
|
||||
{
|
||||
// remove SIGNAL and return its old value
|
||||
return (m_state._and_not(CPU_STATE_SIGNAL) & CPU_STATE_SIGNAL) != 0;
|
||||
|
@ -51,7 +51,7 @@ public:
|
||||
using thread_t::mutex;
|
||||
using thread_t::cv;
|
||||
using thread_t::is_current;
|
||||
using thread_t::get_ctrl;
|
||||
using thread_t::get_thread_ctrl;
|
||||
|
||||
protected:
|
||||
CPUThread(CPUThreadType type, const std::string& name, std::function<std::string()> thread_name);
|
||||
@ -108,7 +108,7 @@ public:
|
||||
bool Signal();
|
||||
|
||||
// test SIGNAL and reset
|
||||
bool Signaled();
|
||||
bool Unsignal();
|
||||
|
||||
// process m_state flags, returns true if the checker must return
|
||||
bool CheckStatus();
|
||||
|
@ -535,7 +535,7 @@ void SPUThread::process_mfc_cmd(u32 cmd)
|
||||
u32 SPUThread::get_events(bool waiting)
|
||||
{
|
||||
// check reservation status and set SPU_EVENT_LR if lost
|
||||
if (last_raddr != 0 && !vm::reservation_test(get_ctrl()))
|
||||
if (last_raddr != 0 && !vm::reservation_test(get_thread_ctrl()))
|
||||
{
|
||||
ch_event_stat |= SPU_EVENT_LR;
|
||||
|
||||
|
@ -63,7 +63,7 @@ enum : u32
|
||||
SPU_EVENT_SN = 0x2, // MFC List Command stall-and-notify event
|
||||
SPU_EVENT_TG = 0x1, // MFC Tag Group status update event
|
||||
|
||||
SPU_EVENT_IMPLEMENTED = SPU_EVENT_LR, // Mask of implemented events
|
||||
SPU_EVENT_IMPLEMENTED = SPU_EVENT_LR, // Mask of implemented events
|
||||
|
||||
SPU_EVENT_WAITING = 0x80000000, // Originally unused, set when SPU thread starts waiting on ch_event_stat
|
||||
//SPU_EVENT_AVAILABLE = 0x40000000, // Originally unused, channel count of the SPU_RdEventStat channel
|
||||
|
@ -259,7 +259,7 @@ namespace vm
|
||||
|
||||
void waiter_lock_t::wait()
|
||||
{
|
||||
while (!m_waiter->thread->Signaled())
|
||||
while (!m_waiter->thread->Unsignal())
|
||||
{
|
||||
if (m_waiter->pred())
|
||||
{
|
||||
@ -536,7 +536,10 @@ namespace vm
|
||||
{
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
g_tls_did_break_reservation = _reservation_break(g_reservation_addr);
|
||||
if (g_reservation_owner && g_reservation_owner == get_current_thread_ctrl())
|
||||
{
|
||||
g_tls_did_break_reservation = _reservation_break(g_reservation_addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -556,7 +559,10 @@ namespace vm
|
||||
// check and possibly break previous reservation
|
||||
if (g_reservation_owner != get_current_thread_ctrl() || g_reservation_addr != addr || g_reservation_size != size)
|
||||
{
|
||||
_reservation_break(g_reservation_addr);
|
||||
if (g_reservation_owner)
|
||||
{
|
||||
_reservation_break(g_reservation_addr);
|
||||
}
|
||||
|
||||
g_tls_did_break_reservation = true;
|
||||
}
|
||||
|
@ -6,31 +6,13 @@
|
||||
#include "Emu/CPU/CPUThread.h"
|
||||
#include "sleep_queue.h"
|
||||
|
||||
sleep_queue_entry_t::sleep_queue_entry_t(CPUThread& cpu, sleep_queue_t& queue)
|
||||
: m_queue(queue)
|
||||
, m_thread(cpu)
|
||||
void sleep_queue_entry_t::add_entry()
|
||||
{
|
||||
m_queue.emplace_back(cpu.shared_from_this());
|
||||
|
||||
m_thread.Sleep();
|
||||
m_queue.emplace_back(m_thread.shared_from_this());
|
||||
}
|
||||
|
||||
sleep_queue_entry_t::~sleep_queue_entry_t() noexcept(false)
|
||||
void sleep_queue_entry_t::remove_entry()
|
||||
{
|
||||
m_thread.Awake();
|
||||
|
||||
if (m_queue.front().get() == &m_thread)
|
||||
{
|
||||
m_queue.pop_front();
|
||||
return;
|
||||
}
|
||||
|
||||
if (m_queue.back().get() == &m_thread)
|
||||
{
|
||||
m_queue.pop_back();
|
||||
return;
|
||||
}
|
||||
|
||||
for (auto it = m_queue.begin(); it != m_queue.end(); it++)
|
||||
{
|
||||
if (it->get() == &m_thread)
|
||||
@ -39,9 +21,38 @@ sleep_queue_entry_t::~sleep_queue_entry_t() noexcept(false)
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
if (!std::uncaught_exception())
|
||||
{
|
||||
throw EXCEPTION("Thread not found");
|
||||
}
|
||||
}
|
||||
|
||||
bool sleep_queue_entry_t::find() const
|
||||
{
|
||||
for (auto it = m_queue.begin(); it != m_queue.end(); it++)
|
||||
{
|
||||
if (it->get() == &m_thread)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
sleep_queue_entry_t::sleep_queue_entry_t(CPUThread& cpu, sleep_queue_t& queue)
|
||||
: m_thread(cpu)
|
||||
, m_queue(queue)
|
||||
{
|
||||
add_entry();
|
||||
cpu.Sleep();
|
||||
}
|
||||
|
||||
sleep_queue_entry_t::sleep_queue_entry_t(CPUThread& cpu, sleep_queue_t& queue, const defer_sleep_t&)
|
||||
: m_thread(cpu)
|
||||
, m_queue(queue)
|
||||
{
|
||||
cpu.Sleep();
|
||||
}
|
||||
|
||||
sleep_queue_entry_t::~sleep_queue_entry_t() noexcept(false)
|
||||
{
|
||||
remove_entry();
|
||||
m_thread.Awake();
|
||||
}
|
||||
|
@ -43,16 +43,43 @@ enum
|
||||
|
||||
using sleep_queue_t = std::deque<std::shared_ptr<CPUThread>>;
|
||||
|
||||
// automatic object handling adding threads to the sleep queue
|
||||
static struct defer_sleep_t{} const defer_sleep;
|
||||
|
||||
// automatic object handling a thread entry in the sleep queue
|
||||
class sleep_queue_entry_t final
|
||||
{
|
||||
CPUThread& m_thread;
|
||||
sleep_queue_t& m_queue;
|
||||
|
||||
void add_entry();
|
||||
void remove_entry();
|
||||
bool find() const;
|
||||
|
||||
public:
|
||||
// adds specified thread to the sleep queue
|
||||
// add specified thread to the sleep queue
|
||||
sleep_queue_entry_t(CPUThread& cpu, sleep_queue_t& queue);
|
||||
|
||||
// removes specified thread from the sleep queue
|
||||
// don't add specified thread to the sleep queue
|
||||
sleep_queue_entry_t(CPUThread& cpu, sleep_queue_t& queue, const defer_sleep_t&);
|
||||
|
||||
// removes specified thread from the sleep queue if added
|
||||
~sleep_queue_entry_t() noexcept(false);
|
||||
|
||||
// add thread to the sleep queue
|
||||
inline void enter()
|
||||
{
|
||||
add_entry();
|
||||
}
|
||||
|
||||
// remove thread from the sleep queue
|
||||
inline void leave()
|
||||
{
|
||||
remove_entry();
|
||||
}
|
||||
|
||||
// check whether the thread exists in the sleep queue
|
||||
inline explicit operator bool() const
|
||||
{
|
||||
return find();
|
||||
}
|
||||
};
|
||||
|
@ -12,6 +12,28 @@ SysCallBase sys_cond("sys_cond");
|
||||
|
||||
extern u64 get_system_time();
|
||||
|
||||
void lv2_cond_t::notify(lv2_lock_t& lv2_lock, sleep_queue_t::iterator it)
|
||||
{
|
||||
CHECK_LV2_LOCK(lv2_lock);
|
||||
|
||||
auto& thread = *it;
|
||||
|
||||
if (mutex->owner)
|
||||
{
|
||||
// add thread to the mutex sleep queue if cannot lock immediately
|
||||
mutex->sq.emplace_back(thread);
|
||||
}
|
||||
else
|
||||
{
|
||||
mutex->owner = thread;
|
||||
|
||||
if (!thread->Signal())
|
||||
{
|
||||
throw EXCEPTION("Thread already signaled");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s32 sys_cond_create(vm::ptr<u32> cond_id, u32 mutex_id, vm::ptr<sys_cond_attribute_t> attr)
|
||||
{
|
||||
sys_cond.Warning("sys_cond_create(cond_id=*0x%x, mutex_id=0x%x, attr=*0x%x)", cond_id, mutex_id, attr);
|
||||
@ -54,7 +76,7 @@ s32 sys_cond_destroy(u32 cond_id)
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!cond->sq.empty())
|
||||
if (!cond->sq.empty() || cond.use_count() > 2)
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
@ -82,13 +104,11 @@ s32 sys_cond_signal(u32 cond_id)
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
for (auto& thread : cond->sq)
|
||||
// signal one waiting thread; protocol is ignored in current implementation
|
||||
if (!cond->sq.empty())
|
||||
{
|
||||
// signal one waiting thread; protocol is ignored in current implementation
|
||||
if (thread->Signal())
|
||||
{
|
||||
return CELL_OK;
|
||||
}
|
||||
cond->notify(lv2_lock, cond->sq.begin());
|
||||
cond->sq.pop_front();
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
@ -107,15 +127,14 @@ s32 sys_cond_signal_all(u32 cond_id)
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
for (auto& thread : cond->sq)
|
||||
// signal all waiting threads; protocol is ignored in current implementation
|
||||
for (auto it = cond->sq.begin(); it != cond->sq.end(); it++)
|
||||
{
|
||||
// signal all waiting threads; protocol is ignored in current implementation
|
||||
if (thread->Signal())
|
||||
{
|
||||
;
|
||||
}
|
||||
cond->notify(lv2_lock, it);
|
||||
}
|
||||
|
||||
cond->sq.clear();
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
@ -134,11 +153,13 @@ s32 sys_cond_signal_to(u32 cond_id, u32 thread_id)
|
||||
|
||||
// TODO: check if CELL_ESRCH is returned if thread_id is invalid
|
||||
|
||||
for (auto& thread : cond->sq)
|
||||
// signal specified thread (protocol is not required)
|
||||
for (auto it = cond->sq.begin(); it != cond->sq.end(); it++)
|
||||
{
|
||||
// signal specified thread
|
||||
if (thread->GetId() == thread_id && thread->Signal())
|
||||
if ((*it)->GetId() == thread_id)
|
||||
{
|
||||
cond->notify(lv2_lock, it);
|
||||
cond->sq.erase(it);
|
||||
return CELL_OK;
|
||||
}
|
||||
}
|
||||
@ -173,58 +194,54 @@ s32 sys_cond_wait(PPUThread& ppu, u32 cond_id, u64 timeout)
|
||||
// unlock the mutex
|
||||
cond->mutex->unlock(lv2_lock);
|
||||
|
||||
// add waiter; protocol is ignored in current implementation
|
||||
sleep_queue_entry_t waiter(ppu, cond->sq);
|
||||
|
||||
// add empty mutex waiter (may be actually set later)
|
||||
sleep_queue_entry_t mutex_waiter(ppu, cond->mutex->sq, defer_sleep);
|
||||
|
||||
while (!ppu.Unsignal())
|
||||
{
|
||||
// add waiter; protocol is ignored in current implementation
|
||||
sleep_queue_entry_t waiter(ppu, cond->sq);
|
||||
|
||||
while (!ppu.Signaled())
|
||||
// timeout is ignored if waiting on the cond var is already dropped
|
||||
if (timeout && waiter)
|
||||
{
|
||||
if (timeout)
|
||||
{
|
||||
const u64 passed = get_system_time() - start_time;
|
||||
const u64 passed = get_system_time() - start_time;
|
||||
|
||||
if (passed >= timeout || ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)) == std::cv_status::timeout)
|
||||
if (passed >= timeout)
|
||||
{
|
||||
// try to reown mutex and exit if timed out
|
||||
if (!cond->mutex->owner)
|
||||
{
|
||||
cond->mutex->owner = ppu.shared_from_this();
|
||||
break;
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
ppu.cv.wait(lv2_lock);
|
||||
|
||||
// drop condition variable and start waiting on the mutex queue
|
||||
mutex_waiter.enter();
|
||||
waiter.leave();
|
||||
continue;
|
||||
}
|
||||
|
||||
CHECK_EMU_STATUS;
|
||||
ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
|
||||
}
|
||||
}
|
||||
|
||||
// reown the mutex (could be set when notified)
|
||||
if (!cond->mutex->owner)
|
||||
{
|
||||
cond->mutex->owner = ppu.shared_from_this();
|
||||
}
|
||||
|
||||
if (cond->mutex->owner.get() != &ppu)
|
||||
{
|
||||
// add waiter; protocol is ignored in current implementation
|
||||
sleep_queue_entry_t waiter(ppu, cond->mutex->sq);
|
||||
|
||||
while (!ppu.Signaled())
|
||||
else
|
||||
{
|
||||
ppu.cv.wait(lv2_lock);
|
||||
|
||||
CHECK_EMU_STATUS;
|
||||
}
|
||||
|
||||
if (cond->mutex->owner.get() != &ppu)
|
||||
{
|
||||
throw EXCEPTION("Unexpected mutex owner");
|
||||
}
|
||||
CHECK_EMU_STATUS;
|
||||
}
|
||||
|
||||
// mutex owner is restored after notification or unlocking
|
||||
if (cond->mutex->owner.get() != &ppu)
|
||||
{
|
||||
throw EXCEPTION("Unexpected mutex owner");
|
||||
}
|
||||
|
||||
// restore the recursive value
|
||||
cond->mutex->recursive_count = recursive_value;
|
||||
|
||||
// check timeout
|
||||
// check timeout (unclear)
|
||||
if (timeout && get_system_time() - start_time > timeout)
|
||||
{
|
||||
return CELL_ETIMEDOUT;
|
||||
|
@ -31,6 +31,8 @@ struct lv2_cond_t
|
||||
, name(name)
|
||||
{
|
||||
}
|
||||
|
||||
void notify(lv2_lock_t& lv2_lock, sleep_queue_t::iterator it);
|
||||
};
|
||||
|
||||
REG_ID_TYPE(lv2_cond_t, 0x86); // SYS_COND_OBJECT
|
||||
|
@ -45,7 +45,12 @@ s32 sys_mutex_create(vm::ptr<u32> mutex_id, vm::ptr<sys_mutex_attribute_t> attr)
|
||||
case SYS_SYNC_FIFO: break;
|
||||
case SYS_SYNC_PRIORITY: break;
|
||||
case SYS_SYNC_PRIORITY_INHERIT: break;
|
||||
default: sys_mutex.Error("sys_mutex_create(): unknown protocol (0x%x)", protocol); return CELL_EINVAL;
|
||||
|
||||
default:
|
||||
{
|
||||
sys_mutex.Error("sys_mutex_create(): unknown protocol (0x%x)", protocol);
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
const bool recursive = attr->recursive == SYS_SYNC_RECURSIVE;
|
||||
@ -135,7 +140,7 @@ s32 sys_mutex_lock(PPUThread& ppu, u32 mutex_id, u64 timeout)
|
||||
// add waiter; protocol is ignored in current implementation
|
||||
sleep_queue_entry_t waiter(ppu, mutex->sq);
|
||||
|
||||
while (!ppu.Signaled())
|
||||
while (!ppu.Unsignal())
|
||||
{
|
||||
CHECK_EMU_STATUS;
|
||||
|
||||
@ -143,10 +148,12 @@ s32 sys_mutex_lock(PPUThread& ppu, u32 mutex_id, u64 timeout)
|
||||
{
|
||||
const u64 passed = get_system_time() - start_time;
|
||||
|
||||
if (passed >= timeout || ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed)) == std::cv_status::timeout)
|
||||
if (passed >= timeout)
|
||||
{
|
||||
return CELL_ETIMEDOUT;
|
||||
}
|
||||
|
||||
ppu.cv.wait_for(lv2_lock, std::chrono::microseconds(timeout - passed));
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -28,8 +28,8 @@ struct lv2_mutex_t
|
||||
const u64 name;
|
||||
|
||||
std::atomic<u32> cond_count{ 0 }; // count of condition variables associated
|
||||
std::atomic<u32> recursive_count{ 0 };
|
||||
std::shared_ptr<CPUThread> owner;
|
||||
std::atomic<u32> recursive_count{ 0 }; // count of recursive locks
|
||||
std::shared_ptr<CPUThread> owner; // current mutex owner
|
||||
|
||||
sleep_queue_t sq;
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user