sys_mmapper: rewrite page fault thread notifications

* Fix a corner case where SPU thread has the same ID as a PPU thread.
* Fix a potential deadlock on Emu.Stop() while sending event in EBUSY loop.
* Thread specific notifications.
This commit is contained in:
Eladash 2020-05-21 19:47:47 +03:00 committed by Ivan
parent 249686708c
commit d86c9a2549
5 changed files with 39 additions and 31 deletions

View File

@ -1442,13 +1442,19 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context) no
} }
} }
// Deschedule
if (cpu->id_type() == 1)
{
lv2_obj::sleep(*cpu);
}
// Now, place the page fault event onto table so that other functions [sys_mmapper_free_address and pagefault recovery funcs etc] // Now, place the page fault event onto table so that other functions [sys_mmapper_free_address and pagefault recovery funcs etc]
// know that this thread is page faulted and where. // know that this thread is page faulted and where.
auto pf_events = g_fxo->get<page_fault_event_entries>(); auto pf_events = g_fxo->get<page_fault_event_entries>();
{ {
std::lock_guard pf_lock(pf_events->pf_mutex); std::lock_guard pf_lock(pf_events->pf_mutex);
pf_events->events.emplace(static_cast<u32>(data2), addr); pf_events->events.emplace(cpu, addr);
} }
sig_log.warning("Page_fault %s location 0x%x because of %s memory", is_writing ? "writing" : "reading", sig_log.warning("Page_fault %s location 0x%x because of %s memory", is_writing ? "writing" : "reading",
@ -1467,38 +1473,32 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context) no
// If we fail due to being busy, wait a bit and try again. // If we fail due to being busy, wait a bit and try again.
while (static_cast<u32>(sending_error) == CELL_EBUSY) while (static_cast<u32>(sending_error) == CELL_EBUSY)
{ {
if (cpu->id_type() == 1) if (cpu->is_stopped())
{ {
lv2_obj::sleep(*cpu, 1000); sending_error = {};
break;
} }
thread_ctrl::wait_for(1000); thread_ctrl::wait_for(1000);
sending_error = sys_event_port_send(pf_port_id, data1, data2, data3); sending_error = sys_event_port_send(pf_port_id, data1, data2, data3);
} }
if (cpu->id_type() == 1)
{
// Deschedule
lv2_obj::sleep(*cpu);
}
if (sending_error) if (sending_error)
{ {
vm_log.fatal("Unknown error %x while trying to pass page fault.", +sending_error); vm_log.fatal("Unknown error 0x%x while trying to pass page fault.", +sending_error);
cpu->state += cpu_flag::dbg_pause;
} }
else
// Wait until the thread is recovered
for (std::shared_lock pf_lock(pf_events->pf_mutex);
pf_events->events.count(static_cast<u32>(data2)) && !sending_error;)
{ {
if (cpu->is_stopped()) // Wait until the thread is recovered
while (!cpu->state.test_and_reset(cpu_flag::signal))
{ {
break; if (cpu->is_stopped())
} {
break;
}
// Timeout in case the emulator is stopping thread_ctrl::wait();
pf_events->cond.wait(pf_lock, 10000); }
} }
// Reschedule, test cpu state and try recovery if stopped // Reschedule, test cpu state and try recovery if stopped

View File

@ -739,13 +739,13 @@ error_code sys_mmapper_enable_page_fault_notification(ppu_thread& ppu, u32 start
return CELL_OK; return CELL_OK;
} }
error_code mmapper_thread_recover_page_fault(u32 id) error_code mmapper_thread_recover_page_fault(cpu_thread* cpu)
{ {
// We can only wake a thread if it is being suspended for a page fault. // We can only wake a thread if it is being suspended for a page fault.
auto pf_events = g_fxo->get<page_fault_event_entries>(); auto pf_events = g_fxo->get<page_fault_event_entries>();
{ {
std::lock_guard pf_lock(pf_events->pf_mutex); std::lock_guard pf_lock(pf_events->pf_mutex);
auto pf_event_ind = pf_events->events.find(id); const auto pf_event_ind = pf_events->events.find(cpu);
if (pf_event_ind == pf_events->events.end()) if (pf_event_ind == pf_events->events.end())
{ {
@ -756,6 +756,15 @@ error_code mmapper_thread_recover_page_fault(u32 id)
pf_events->events.erase(pf_event_ind); pf_events->events.erase(pf_event_ind);
} }
pf_events->cond.notify_all(); if (cpu->id_type() == 1u)
{
lv2_obj::awake(cpu);
}
else
{
cpu->state += cpu_flag::signal;
cpu->notify();
}
return CELL_OK; return CELL_OK;
} }

View File

@ -58,10 +58,9 @@ struct page_fault_notification_entries
struct page_fault_event_entries struct page_fault_event_entries
{ {
// First = thread id, second = addr // First = thread, second = addr
std::unordered_map<u32, u32> events; std::unordered_map<class cpu_thread*, u32> events;
shared_mutex pf_mutex; shared_mutex pf_mutex;
cond_variable cond;
}; };
struct mmapper_unk_entry_struct0 struct mmapper_unk_entry_struct0
@ -76,7 +75,7 @@ struct mmapper_unk_entry_struct0
// Aux // Aux
class ppu_thread; class ppu_thread;
error_code mmapper_thread_recover_page_fault(u32 id); error_code mmapper_thread_recover_page_fault(cpu_thread* cpu);
// SysCalls // SysCalls
error_code sys_mmapper_allocate_address(ppu_thread&, u64 size, u64 flags, u64 alignment, vm::ptr<u32> alloc_addr); error_code sys_mmapper_allocate_address(ppu_thread&, u64 size, u64 flags, u64 alignment, vm::ptr<u32> alloc_addr);

View File

@ -551,7 +551,7 @@ error_code sys_ppu_thread_recover_page_fault(u32 thread_id)
return CELL_ESRCH; return CELL_ESRCH;
} }
return mmapper_thread_recover_page_fault(thread_id); return mmapper_thread_recover_page_fault(thread.ptr.get());
} }
error_code sys_ppu_thread_get_page_fault_context(u32 thread_id, vm::ptr<sys_ppu_thread_icontext_t> ctxt) error_code sys_ppu_thread_get_page_fault_context(u32 thread_id, vm::ptr<sys_ppu_thread_icontext_t> ctxt)
@ -572,7 +572,7 @@ error_code sys_ppu_thread_get_page_fault_context(u32 thread_id, vm::ptr<sys_ppu_
auto pf_events = g_fxo->get<page_fault_event_entries>(); auto pf_events = g_fxo->get<page_fault_event_entries>();
std::shared_lock lock(pf_events->pf_mutex); std::shared_lock lock(pf_events->pf_mutex);
const auto evt = pf_events->events.find(thread_id); const auto evt = pf_events->events.find(thread.ptr.get());
if (evt == pf_events->events.end()) if (evt == pf_events->events.end())
{ {
return CELL_EINVAL; return CELL_EINVAL;

View File

@ -1790,7 +1790,7 @@ error_code sys_spu_thread_recover_page_fault(ppu_thread& ppu, u32 id)
return CELL_ESRCH; return CELL_ESRCH;
} }
return mmapper_thread_recover_page_fault(id); return mmapper_thread_recover_page_fault(thread);
} }
error_code sys_raw_spu_recover_page_fault(ppu_thread& ppu, u32 id) error_code sys_raw_spu_recover_page_fault(ppu_thread& ppu, u32 id)
@ -1806,7 +1806,7 @@ error_code sys_raw_spu_recover_page_fault(ppu_thread& ppu, u32 id)
return CELL_ESRCH; return CELL_ESRCH;
} }
return mmapper_thread_recover_page_fault(id); return mmapper_thread_recover_page_fault(thread.get());
} }
error_code sys_raw_spu_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<void> attr) error_code sys_raw_spu_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<void> attr)