RSX: move g_dma_manager to g_fxo

This commit is contained in:
Nekotekina 2020-02-23 13:12:31 +03:00
parent fa0bf6a92c
commit 7069e7265f
7 changed files with 15 additions and 19 deletions

View File

@ -85,6 +85,4 @@ namespace rsx
// Fault recovery
utils::address_range get_fault_range(bool writing) const;
};
extern dma_manager g_dma_manager;
}

View File

@ -40,8 +40,6 @@ namespace rsx
{
std::function<bool(u32 addr, bool is_writing)> g_access_violation_handler;
dma_manager g_dma_manager;
u32 get_address(u32 offset, u32 location, const char* from)
{
const auto render = get_current_renderer();
@ -481,7 +479,7 @@ namespace rsx
rsx::overlays::reset_performance_overlay();
g_dma_manager.init();
g_fxo->get<rsx::dma_manager>()->init();
on_init_thread();
method_registers.init();
@ -664,7 +662,7 @@ namespace rsx
capture_current_frame = false;
m_rsx_thread_exiting = true;
g_dma_manager.join();
g_fxo->get<rsx::dma_manager>()->join();
}
void thread::fill_scale_offset_data(void *buffer, bool flip_y) const
@ -2091,7 +2089,7 @@ namespace rsx
const u32 data_size = range.second * block.attribute_stride;
const u32 vertex_base = range.first * block.attribute_stride;
g_dma_manager.copy(persistent, vm::_ptr<char>(block.real_offset_address) + vertex_base, data_size);
g_fxo->get<rsx::dma_manager>()->copy(persistent, vm::_ptr<char>(block.real_offset_address) + vertex_base, data_size);
persistent += data_size;
}
}
@ -2249,7 +2247,7 @@ namespace rsx
m_graphics_state |= rsx::pipeline_state::fragment_constants_dirty;
// DMA sync; if you need this, don't use MTRSX
// g_dma_manager.sync();
// g_fxo->get<rsx::dma_manager>()->sync();
//TODO: On sync every sub-unit should finish any pending tasks
//Might cause zcull lockup due to zombie 'unclaimed reports' which are not forcefully removed currently
@ -2489,7 +2487,7 @@ namespace rsx
{
if (g_cfg.video.multithreaded_rsx)
{
g_dma_manager.sync();
g_fxo->get<rsx::dma_manager>()->sync();
}
external_interrupt_ack.store(true);

View File

@ -21,7 +21,7 @@ namespace vk
if (!flush && g_cfg.video.multithreaded_rsx)
{
auto packet = new submit_packet(queue, pfence, info);
rsx::g_dma_manager.backend_ctrl(rctrl_queue_submit, packet);
g_fxo->get<rsx::dma_manager>()->backend_ctrl(rctrl_queue_submit, packet);
}
else
{

View File

@ -720,15 +720,15 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
if (result.num_flushable > 0)
{
if (rsx::g_dma_manager.is_current_thread())
if (g_fxo->get<rsx::dma_manager>()->is_current_thread())
{
// The offloader thread cannot handle flush requests
verify(HERE), m_queue_status.load() == flush_queue_state::ok;
m_offloader_fault_range = rsx::g_dma_manager.get_fault_range(is_writing);
m_offloader_fault_range = g_fxo->get<rsx::dma_manager>()->get_fault_range(is_writing);
m_offloader_fault_cause = (is_writing) ? rsx::invalidation_cause::write : rsx::invalidation_cause::read;
rsx::g_dma_manager.set_mem_fault_flag();
g_fxo->get<rsx::dma_manager>()->set_mem_fault_flag();
m_queue_status |= flush_queue_state::deadlock;
// Wait for deadlock to clear
@ -737,7 +737,7 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
_mm_pause();
}
rsx::g_dma_manager.clear_mem_fault_flag();
g_fxo->get<rsx::dma_manager>()->clear_mem_fault_flag();
return true;
}
@ -2668,7 +2668,7 @@ void VKGSRender::close_and_submit_command_buffer(vk::fence* pFence, VkSemaphore
{
// Workaround for deadlock occuring during RSX offloader fault
// TODO: Restructure command submission infrastructure to avoid this condition
const bool sync_success = rsx::g_dma_manager.sync();
const bool sync_success = g_fxo->get<rsx::dma_manager>()->sync();
const VkBool32 force_flush = !sync_success;
if (vk::test_status_interrupt(vk::heap_dirty))

View File

@ -147,7 +147,7 @@ namespace vk
}
// Wait for DMA activity to end
rsx::g_dma_manager.sync();
g_fxo->get<rsx::dma_manager>()->sync();
if (mapped)
{

View File

@ -68,7 +68,7 @@ namespace
VkDeviceSize offset_in_index_buffer = m_index_buffer_ring_info.alloc<256>(upload_size);
void* buf = m_index_buffer_ring_info.map(offset_in_index_buffer, upload_size);
rsx::g_dma_manager.emulate_as_indexed(buf, clause.primitive, vertex_count);
g_fxo->get<rsx::dma_manager>()->emulate_as_indexed(buf, clause.primitive, vertex_count);
m_index_buffer_ring_info.unmap();
return std::make_tuple(

View File

@ -229,7 +229,7 @@ namespace rsx
void texture_read_semaphore_release(thread* rsx, u32 _reg, u32 arg)
{
// Pipeline barrier seems to be equivalent to a SHADER_READ stage barrier
rsx::g_dma_manager.sync();
g_fxo->get<rsx::dma_manager>()->sync();
if (g_cfg.video.strict_rendering_mode)
{
rsx->sync();
@ -249,7 +249,7 @@ namespace rsx
void back_end_write_semaphore_release(thread* rsx, u32 _reg, u32 arg)
{
// Full pipeline barrier
rsx::g_dma_manager.sync();
g_fxo->get<rsx::dma_manager>()->sync();
rsx->sync();
const u32 offset = method_registers.semaphore_offset_4097() & -16;