mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-02-28 12:40:12 +00:00
vk: Reimplement access violation sync handling
This commit is contained in:
parent
1b1c300aad
commit
2400210144
@ -737,26 +737,55 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
|
||||
if (g_cfg.video.write_color_buffers || g_cfg.video.write_depth_buffer)
|
||||
{
|
||||
bool flushable, synchronized;
|
||||
std::tie(flushable, synchronized) = m_texture_cache.address_is_flushable(address);
|
||||
u64 sync_timestamp;
|
||||
std::tie(flushable, synchronized, sync_timestamp) = m_texture_cache.address_is_flushable(address);
|
||||
|
||||
if (!flushable)
|
||||
return false;
|
||||
|
||||
const bool is_rsxthr = std::this_thread::get_id() == rsx_thread;
|
||||
|
||||
if (synchronized)
|
||||
{
|
||||
if (m_last_flushable_cb >= 0)
|
||||
//Wait for any cb submitted after the sync timestamp to finish
|
||||
while (true)
|
||||
{
|
||||
if (m_primary_cb_list[m_last_flushable_cb].pending)
|
||||
m_primary_cb_list[m_last_flushable_cb].wait();
|
||||
u32 pending = 0;
|
||||
|
||||
if (m_last_flushable_cb < 0)
|
||||
break;
|
||||
|
||||
for (auto &cb : m_primary_cb_list)
|
||||
{
|
||||
if (!cb.pending && cb.last_sync >= sync_timestamp)
|
||||
{
|
||||
pending = 0;
|
||||
break;
|
||||
}
|
||||
|
||||
if (cb.pending)
|
||||
{
|
||||
pending++;
|
||||
|
||||
if (is_rsxthr)
|
||||
cb.poke();
|
||||
}
|
||||
}
|
||||
|
||||
if (!pending)
|
||||
break;
|
||||
|
||||
std::this_thread::yield();
|
||||
}
|
||||
|
||||
m_last_flushable_cb = -1;
|
||||
if (is_rsxthr)
|
||||
m_last_flushable_cb = -1;
|
||||
}
|
||||
else
|
||||
{
|
||||
//This region is buffered, but no previous sync point has been put in place to start sync efforts
|
||||
//Just stall and get what we have at this point
|
||||
if (std::this_thread::get_id() != rsx_thread)
|
||||
if (!is_rsxthr)
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_flush_queue_mutex);
|
||||
@ -765,7 +794,7 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
|
||||
m_queued_threads++;
|
||||
}
|
||||
|
||||
//This is awful!
|
||||
//Wait for the RSX thread to process
|
||||
while (m_flush_commands)
|
||||
{
|
||||
_mm_lfence();
|
||||
@ -1430,6 +1459,9 @@ void VKGSRender::flush_command_queue(bool hard_sync)
|
||||
}
|
||||
|
||||
m_current_command_buffer->reset();
|
||||
|
||||
if (m_last_flushable_cb == m_current_cb_index)
|
||||
m_last_flushable_cb = -1;
|
||||
}
|
||||
|
||||
open_command_buffer();
|
||||
@ -1564,6 +1596,15 @@ void VKGSRender::do_local_task()
|
||||
_mm_pause();
|
||||
}
|
||||
}
|
||||
|
||||
if (m_last_flushable_cb > -1)
|
||||
{
|
||||
auto cb = &m_primary_cb_list[m_last_flushable_cb];
|
||||
cb->poke();
|
||||
|
||||
if (!cb->pending)
|
||||
m_last_flushable_cb = -1;
|
||||
}
|
||||
}
|
||||
|
||||
bool VKGSRender::do_method(u32 cmd, u32 arg)
|
||||
@ -1919,6 +1960,7 @@ void VKGSRender::close_and_submit_command_buffer(const std::vector<VkSemaphore>
|
||||
infos.waitSemaphoreCount = static_cast<uint32_t>(semaphores.size());
|
||||
infos.sType = VK_STRUCTURE_TYPE_SUBMIT_INFO;
|
||||
|
||||
m_current_command_buffer->tag();
|
||||
CHECK_RESULT(vkQueueSubmit(m_swap_chain->get_present_queue(), 1, &infos, fence));
|
||||
}
|
||||
|
||||
|
@ -34,12 +34,15 @@ namespace vk
|
||||
#define VK_MAX_ASYNC_CB_COUNT 64
|
||||
#define VK_MAX_ASYNC_FRAMES 2
|
||||
|
||||
extern u64 get_system_time();
|
||||
|
||||
struct command_buffer_chunk: public vk::command_buffer
|
||||
{
|
||||
VkFence submit_fence = VK_NULL_HANDLE;
|
||||
VkDevice m_device = VK_NULL_HANDLE;
|
||||
|
||||
bool pending = false;
|
||||
std::atomic_bool pending = { false };
|
||||
std::atomic<u64> last_sync = { 0 };
|
||||
|
||||
command_buffer_chunk()
|
||||
{}
|
||||
@ -61,6 +64,11 @@ struct command_buffer_chunk: public vk::command_buffer
|
||||
vkDestroyFence(m_device, submit_fence, nullptr);
|
||||
}
|
||||
|
||||
void tag()
|
||||
{
|
||||
last_sync = get_system_time();
|
||||
}
|
||||
|
||||
void reset()
|
||||
{
|
||||
if (pending)
|
||||
@ -227,13 +235,14 @@ private:
|
||||
rsx::gcm_framebuffer_info m_depth_surface_info;
|
||||
|
||||
bool m_flush_draw_buffers = false;
|
||||
s32 m_last_flushable_cb = -1;
|
||||
std::atomic<int> m_last_flushable_cb = {-1 };
|
||||
|
||||
std::mutex m_flush_queue_mutex;
|
||||
std::atomic<bool> m_flush_commands = { false };
|
||||
std::atomic<int> m_queued_threads = { 0 };
|
||||
|
||||
std::thread::id rsx_thread;
|
||||
std::atomic<u64> m_last_sync_event = { 0 };
|
||||
|
||||
bool render_pass_open = false;
|
||||
|
||||
|
@ -7,6 +7,8 @@
|
||||
#include "../rsx_utils.h"
|
||||
#include "Utilities/mutex.h"
|
||||
|
||||
extern u64 get_system_time();
|
||||
|
||||
namespace vk
|
||||
{
|
||||
class cached_texture_section : public rsx::buffered_section
|
||||
@ -24,6 +26,8 @@ namespace vk
|
||||
u16 native_pitch;
|
||||
VkFence dma_fence = VK_NULL_HANDLE;
|
||||
bool synchronized = false;
|
||||
u64 sync_timestamp = 0;
|
||||
u64 last_use_timestamp = 0;
|
||||
vk::render_device* m_device = nullptr;
|
||||
vk::image *vram_texture = nullptr;
|
||||
std::unique_ptr<vk::buffer> dma_buffer;
|
||||
@ -60,6 +64,8 @@ namespace vk
|
||||
//Even if we are managing the same vram section, we cannot guarantee contents are static
|
||||
//The create method is only invoked when a new mangaged session is required
|
||||
synchronized = false;
|
||||
sync_timestamp = 0ull;
|
||||
last_use_timestamp = get_system_time();
|
||||
}
|
||||
|
||||
void release_dma_resources()
|
||||
@ -208,6 +214,7 @@ namespace vk
|
||||
}
|
||||
|
||||
synchronized = true;
|
||||
sync_timestamp = get_system_time();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
@ -289,6 +296,16 @@ namespace vk
|
||||
{
|
||||
return synchronized;
|
||||
}
|
||||
|
||||
bool sync_valid() const
|
||||
{
|
||||
return (sync_timestamp > last_use_timestamp);
|
||||
}
|
||||
|
||||
u64 get_sync_timestamp() const
|
||||
{
|
||||
return sync_timestamp;
|
||||
}
|
||||
};
|
||||
|
||||
class texture_cache
|
||||
@ -710,11 +727,11 @@ namespace vk
|
||||
return true;
|
||||
}
|
||||
|
||||
std::tuple<bool, bool> address_is_flushable(u32 address)
|
||||
std::tuple<bool, bool, u64> address_is_flushable(u32 address)
|
||||
{
|
||||
if (address < no_access_range.first ||
|
||||
address > no_access_range.second)
|
||||
return std::make_tuple(false, false);
|
||||
return std::make_tuple(false, false, 0ull);
|
||||
|
||||
reader_lock lock(m_cache_mutex);
|
||||
|
||||
@ -728,7 +745,7 @@ namespace vk
|
||||
if (!tex.is_flushable()) continue;
|
||||
|
||||
if (tex.overlaps(address))
|
||||
return std::make_tuple(true, tex.is_synchronized());
|
||||
return std::make_tuple(true, tex.is_synchronized(), tex.get_sync_timestamp());
|
||||
}
|
||||
}
|
||||
|
||||
@ -752,11 +769,11 @@ namespace vk
|
||||
if (!tex.is_flushable()) continue;
|
||||
|
||||
if (tex.overlaps(address))
|
||||
return std::make_tuple(true, tex.is_synchronized());
|
||||
return std::make_tuple(true, tex.is_synchronized(), tex.get_sync_timestamp());
|
||||
}
|
||||
}
|
||||
|
||||
return std::make_tuple(false, false);
|
||||
return std::make_tuple(false, false, 0ull);
|
||||
}
|
||||
|
||||
bool flush_address(u32 address, vk::render_device& dev, vk::command_buffer& cmd, vk::memory_type_mapping& memory_types, VkQueue submit_queue)
|
||||
|
Loading…
x
Reference in New Issue
Block a user