Implement SPU page faults notifications

* Implement both RawSPU and threaded SPU page fault recovery
* Guard page_fault_notification_entries access with a mutex
* Add missing lock in sys_ppu_thread_recover_page_fault/get_page_fault_context
* Fix EINVAL check in sys_ppu_thread_recover_page_fault, previously when the event was not found begin() was erased and
CELL_OK was returned.
* Fixed page fault recovery waiting logic:
- Do not rely on a single thread_ctrl notification (unsafe)
- Avoided a race where ::awake(ppu) can be called before ::sleep(ppu) therefore nop-ing out the notification
* Avoid inconsistencies with vm flags on page fault cause detection
* Fix sys_mmapper_enable_page_fault_notification EBUSY check
from RE it's allowed to register the same queue twice (on a different area) but not to enable page fault notifications twice
This commit is contained in:
eladash 2019-02-17 17:53:38 +02:00 committed by Ivan
parent 1875dc3f18
commit 4a28319edf
7 changed files with 212 additions and 99 deletions

View File

@ -1,7 +1,8 @@
#include "stdafx.h"
#include "stdafx.h"
#include "Emu/Memory/vm.h"
#include "Emu/System.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/SPUThread.h"
#include "Emu/Cell/RawSPUThread.h"
#include "Emu/Cell/lv2/sys_mmapper.h"
#include "Emu/Cell/lv2/sys_event.h"
@ -1273,59 +1274,126 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
if (cpu)
{
// TODO: Only PPU thread page fault notifications are supported
if (cpu->id_type() == 1 && fxm::check<page_fault_notification_entries>())
u32 pf_port_id = 0;
if (auto pf_entries = fxm::get<page_fault_notification_entries>())
{
for (const auto& entry : fxm::get<page_fault_notification_entries>()->entries)
std::shared_lock lock(pf_entries->mutex);
for (const auto& entry : pf_entries->entries)
{
auto mem = vm::get(vm::any, entry.start_addr);
if (!mem)
if (auto mem = vm::get(vm::any, entry.start_addr))
{
continue;
}
if (entry.start_addr <= addr && addr <= addr + mem->size - 1)
{
// Place the page fault event onto table so that other functions [sys_mmapper_free_address and ppu pagefault funcs]
// know that this thread is page faulted and where.
auto pf_entries = fxm::get_always<page_fault_event_entries>();
if (entry.start_addr <= addr && addr <= addr + mem->size - 1)
{
std::lock_guard pf_lock(pf_entries->pf_mutex);
page_fault_event pf_event{ cpu->id, addr };
pf_entries->events.emplace_back(pf_event);
pf_port_id = entry.port_id;
break;
}
// Now, we notify the game that a page fault occurred so it can rectify it.
// Note, for data3, were the memory readable AND we got a page fault, it must be due to a write violation since reads are allowed.
u64 data1 = addr;
u64 data2 = ((u64)SYS_MEMORY_PAGE_FAULT_TYPE_PPU_THREAD << 32) + cpu->id;
u64 data3 = vm::check_addr(addr, a_size, vm::page_readable) ? SYS_MEMORY_PAGE_FAULT_CAUSE_READ_ONLY : SYS_MEMORY_PAGE_FAULT_CAUSE_NON_MAPPED;
LOG_ERROR(MEMORY, "Page_fault %s location 0x%x because of %s memory", is_writing ? "writing" : "reading",
addr, data3 == SYS_MEMORY_PAGE_FAULT_CAUSE_READ_ONLY ? "writing read-only" : "using unmapped");
error_code sending_error = sys_event_port_send(entry.port_id, data1, data2, data3);
// If we fail due to being busy, wait a bit and try again.
while (sending_error == CELL_EBUSY)
{
lv2_obj::sleep(*cpu, 1000);
thread_ctrl::wait_for(1000);
sending_error = sys_event_port_send(entry.port_id, data1, data2, data3);
}
if (sending_error)
{
fmt::throw_exception("Unknown error %x while trying to pass page fault.", sending_error.value);
}
lv2_obj::sleep(*cpu);
thread_ctrl::wait();
return true;
}
}
}
if (pf_port_id)
{
// We notify the game that a page fault occurred so it can rectify it.
// Note, for data3, were the memory readable AND we got a page fault, it must be due to a write violation since reads are allowed.
u64 data1 = addr;
u64 data2;
if (cpu->id_type() == 1)
{
data2 = (SYS_MEMORY_PAGE_FAULT_TYPE_PPU_THREAD << 32) | cpu->id;
}
else if (static_cast<spu_thread*>(cpu)->group)
{
data2 = (SYS_MEMORY_PAGE_FAULT_TYPE_SPU_THREAD << 32) | cpu->id;
}
else
{
// Index is the correct ID in RawSPU
data2 = (SYS_MEMORY_PAGE_FAULT_TYPE_RAW_SPU << 32) | static_cast<spu_thread*>(cpu)->index;
}
u64 data3;
{
vm::reader_lock rlock;
if (vm::check_addr(addr, std::max<std::size_t>(1, d_size), vm::page_allocated | (is_writing ? vm::page_writable : vm::page_readable)))
{
// Memory was allocated inbetween, retry
return true;
}
else if (vm::check_addr(addr, std::max<std::size_t>(1, d_size), vm::page_allocated | vm::page_readable))
{
data3 = SYS_MEMORY_PAGE_FAULT_CAUSE_READ_ONLY; // TODO
}
else
{
data3 = SYS_MEMORY_PAGE_FAULT_CAUSE_NON_MAPPED;
}
}
// Now, place the page fault event onto table so that other functions [sys_mmapper_free_address and pagefault recovery funcs etc]
// know that this thread is page faulted and where.
auto pf_events = fxm::get_always<page_fault_event_entries>();
{
std::lock_guard pf_lock(pf_events->pf_mutex);
pf_events->events.emplace(static_cast<u32>(data2), addr);
}
LOG_ERROR(MEMORY, "Page_fault %s location 0x%x because of %s memory", is_writing ? "writing" : "reading",
addr, data3 == SYS_MEMORY_PAGE_FAULT_CAUSE_READ_ONLY ? "writing read-only" : "using unmapped");
error_code sending_error = sys_event_port_send(pf_port_id, data1, data2, data3);
// If we fail due to being busy, wait a bit and try again.
while (sending_error == CELL_EBUSY)
{
if (cpu->id_type() == 1)
{
lv2_obj::sleep(*cpu, 1000);
}
thread_ctrl::wait_for(1000);
sending_error = sys_event_port_send(pf_port_id, data1, data2, data3);
}
if (sending_error)
{
fmt::throw_exception("Unknown error %x while trying to pass page fault.", sending_error.value);
}
if (cpu->id_type() == 1)
{
// Deschedule
lv2_obj::sleep(*cpu);
}
// Wait until the thread is recovered
for (std::shared_lock pf_lock(pf_events->pf_mutex);
pf_events->events.find(static_cast<u32>(data2)) != pf_events->events.end();)
{
if (Emu.IsStopped())
{
break;
}
// Timeout in case the emulator is stopping
pf_events->cond.wait(pf_lock, 10000);
}
// Reschedule
cpu->test_stopped();
if (Emu.IsStopped())
{
// Hack: allocate memory in case the emulator is stopping
vm::falloc(addr & -0x10000, 0x10000);
}
return true;
}
vm::temporary_unlock(*cpu);
}

View File

@ -257,8 +257,8 @@ const std::array<ppu_function_t, 1024> s_ppu_syscall_table
uns_func, //195 (0x0C3) UNS
BIND_FUNC(sys_raw_spu_set_spu_cfg), //196 (0x0C4)
BIND_FUNC(sys_raw_spu_get_spu_cfg), //197 (0x0C5)
null_func,//BIND_FUNC(sys_spu_thread_recover_page_fault)//198 (0x0C6)
null_func,//BIND_FUNC(sys_raw_spu_recover_page_fault) //199 (0x0C7)
BIND_FUNC(sys_spu_thread_recover_page_fault), //198 (0x0C6)
BIND_FUNC(sys_raw_spu_recover_page_fault), //199 (0x0C7)
null_func, null_func, null_func, null_func, null_func, //204 UNS?
null_func, null_func, null_func, null_func, null_func, //209 UNS?

View File

@ -1,4 +1,4 @@
#include "stdafx.h"
#include "stdafx.h"
#include "Emu/Cell/PPUThread.h"
#include "sys_ppu_thread.h"
#include "Emu/Cell/lv2/sys_event.h"
@ -202,7 +202,7 @@ error_code sys_mmapper_free_address(u32 addr)
for (const auto& ev : pf_events->events)
{
auto mem = vm::get(vm::any, addr);
if (mem && addr <= ev.fault_addr && ev.fault_addr <= addr + mem->size - 1)
if (mem && addr <= ev.second && ev.second <= addr + mem->size - 1)
{
return CELL_EBUSY;
}
@ -223,6 +223,8 @@ error_code sys_mmapper_free_address(u32 addr)
// If a memory block is freed, remove it from page notification table.
auto pf_entries = fxm::get_always<page_fault_notification_entries>();
std::lock_guard lock(pf_entries->mutex);
auto ind_to_remove = pf_entries->entries.begin();
for (; ind_to_remove != pf_entries->entries.end(); ++ind_to_remove)
{
@ -403,9 +405,9 @@ error_code sys_mmapper_enable_page_fault_notification(u32 start_addr, u32 event_
sys_mmapper.warning("sys_mmapper_enable_page_fault_notification(start_addr=0x%x, event_queue_id=0x%x)", start_addr, event_queue_id);
auto mem = vm::get(vm::any, start_addr);
if (!mem)
if (!mem || start_addr != mem->addr || start_addr < 0x20000000 || start_addr >= 0xC0000000)
{
return CELL_EINVAL;
return {CELL_EINVAL, start_addr};
}
// TODO: Check memory region's flags to make sure the memory can be used for page faults.
@ -417,17 +419,6 @@ error_code sys_mmapper_enable_page_fault_notification(u32 start_addr, u32 event_
return CELL_ESRCH;
}
auto pf_entries = fxm::get_always<page_fault_notification_entries>();
// We're not allowed to have the same queue registered more than once for page faults.
for (const auto& entry : pf_entries->entries)
{
if (entry.event_queue_id == event_queue_id)
{
return CELL_ESRCH;
}
}
vm::var<u32> port_id(0);
error_code res = sys_event_port_create(port_id, SYS_EVENT_PORT_LOCAL, SYS_MEMORY_PAGE_FAULT_EVENT_KEY);
sys_event_port_connect_local(port_id->value(), event_queue_id);
@ -437,8 +428,44 @@ error_code sys_mmapper_enable_page_fault_notification(u32 start_addr, u32 event_
return CELL_EAGAIN;
}
auto pf_entries = fxm::get_always<page_fault_notification_entries>();
std::unique_lock lock(pf_entries->mutex);
// Return error code if page fault notifications are already enabled
for (const auto& entry : pf_entries->entries)
{
if (entry.start_addr == start_addr)
{
lock.unlock();
sys_event_port_disconnect(port_id->value());
sys_event_port_destroy(port_id->value());
return CELL_EBUSY;
}
}
page_fault_notification_entry entry{ start_addr, event_queue_id, port_id->value() };
pf_entries->entries.emplace_back(entry);
return CELL_OK;
}
CellError mmapper_thread_recover_page_fault(u32 id)
{
// We can only wake a thread if it is being suspended for a page fault.
auto pf_events = fxm::get_always<page_fault_event_entries>();
{
std::lock_guard pf_lock(pf_events->pf_mutex);
auto pf_event_ind = pf_events->events.find(id);
if (pf_event_ind == pf_events->events.end())
{
// if not found...
return CELL_EINVAL;
}
pf_events->events.erase(pf_event_ind);
}
pf_events->cond.notify_all();
return CellError(CELL_OK);
}

View File

@ -1,4 +1,4 @@
#pragma once
#pragma once
#include "sys_sync.h"
#include <vector>
@ -45,20 +45,20 @@ struct page_fault_notification_entry
struct page_fault_notification_entries
{
std::vector<page_fault_notification_entry> entries;
};
struct page_fault_event
{
u32 thread_id;
u32 fault_addr;
shared_mutex mutex;
};
struct page_fault_event_entries
{
std::vector<page_fault_event> events;
// First = thread id, second = addr
std::unordered_map<u32, u32> events;
shared_mutex pf_mutex;
cond_variable cond;
};
// helper function
CellError mmapper_thread_recover_page_fault(u32 id);
// SysCalls
error_code sys_mmapper_allocate_address(u64 size, u64 flags, u64 alignment, vm::ptr<u32> alloc_addr);
error_code sys_mmapper_allocate_fixed_address();

View File

@ -1,4 +1,4 @@
#include "stdafx.h"
#include "stdafx.h"
#include "Emu/Memory/vm.h"
#include "Emu/System.h"
#include "Emu/IdManager.h"
@ -420,27 +420,11 @@ error_code sys_ppu_thread_recover_page_fault(u32 thread_id)
return CELL_ESRCH;
}
// We can only wake a thread if it is being suspended for a page fault.
auto pf_events = fxm::get_always<page_fault_event_entries>();
auto pf_event_ind = pf_events->events.begin();
for (auto event_ind = pf_events->events.begin(); event_ind != pf_events->events.end(); ++event_ind)
if (auto res = mmapper_thread_recover_page_fault(thread_id))
{
if (event_ind->thread_id == thread_id)
{
pf_event_ind = event_ind;
break;
}
return res;
}
if (pf_event_ind == pf_events->events.end())
{ // if not found...
return CELL_EINVAL;
}
pf_events->events.erase(pf_event_ind);
lv2_obj::awake(*thread);
return CELL_OK;
}
@ -457,17 +441,10 @@ error_code sys_ppu_thread_get_page_fault_context(u32 thread_id, vm::ptr<sys_ppu_
// We can only get a context if the thread is being suspended for a page fault.
auto pf_events = fxm::get_always<page_fault_event_entries>();
std::shared_lock lock(pf_events->pf_mutex);
bool found = false;
for (const auto& ev : pf_events->events)
{
if (ev.thread_id == thread_id)
{
found = true;
break;
}
}
if (!found)
const auto evt = pf_events->events.find(thread_id);
if (evt == pf_events->events.end())
{
return CELL_EINVAL;
}

View File

@ -12,6 +12,7 @@
#include "Emu/Cell/PPUThread.h"
#include "Emu/Cell/RawSPUThread.h"
#include "sys_interrupt.h"
#include "sys_mmapper.h"
#include "sys_event.h"
#include "sys_spu.h"
@ -1253,6 +1254,44 @@ error_code sys_spu_thread_group_disconnect_event_all_threads(u32 id, u8 spup)
return CELL_OK;
}
error_code sys_spu_thread_recover_page_fault(u32 id)
{
sys_spu.warning("sys_spu_thread_recover_page_fault(id=0x%x)", id);
const auto thread = idm::get<named_thread<spu_thread>>(id);
if (UNLIKELY(!thread || !thread->group))
{
return CELL_ESRCH;
}
if (auto res = mmapper_thread_recover_page_fault(id))
{
return res;
}
return CELL_OK;
}
error_code sys_raw_spu_recover_page_fault(u32 id)
{
sys_spu.warning("sys_raw_spu_recover_page_fault(id=0x%x)", id);
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (UNLIKELY(!thread || thread->group))
{
return CELL_ESRCH;
}
if (auto res = mmapper_thread_recover_page_fault(id))
{
return res;
}
return CELL_OK;
}
error_code sys_raw_spu_create(vm::ptr<u32> id, vm::ptr<void> attr)
{
sys_spu.warning("sys_raw_spu_create(id=*0x%x, attr=*0x%x)", id, attr);

View File

@ -317,6 +317,7 @@ error_code sys_spu_thread_disconnect_event(u32 id, u32 event_type, u8 spup);
error_code sys_spu_thread_bind_queue(u32 id, u32 spuq, u32 spuq_num);
error_code sys_spu_thread_unbind_queue(u32 id, u32 spuq_num);
error_code sys_spu_thread_get_exit_status(u32 id, vm::ptr<u32> status);
error_code sys_spu_thread_recover_page_fault(u32 id);
error_code sys_raw_spu_create(vm::ptr<u32> id, vm::ptr<void> attr);
error_code sys_raw_spu_destroy(ppu_thread& ppu, u32 id);
@ -328,3 +329,4 @@ error_code sys_raw_spu_get_int_stat(u32 id, u32 class_id, vm::ptr<u64> stat);
error_code sys_raw_spu_read_puint_mb(u32 id, vm::ptr<u32> value);
error_code sys_raw_spu_set_spu_cfg(u32 id, u32 value);
error_code sys_raw_spu_get_spu_cfg(u32 id, vm::ptr<u32> value);
error_code sys_raw_spu_recover_page_fault(u32 id);