mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-02-23 06:40:49 +00:00
vk: Do not hard-sync on first sign of fragmentation
- It is very likely that the resource would be available if we just waited a while for the GPU queue to clear. - Instead of also discarding the current work, first check if we can get by without a hard sync.
This commit is contained in:
parent
29f3eec957
commit
49c6c2c529
@ -1202,8 +1202,15 @@ bool VKGSRender::on_vram_exhausted(rsx::problem_severity severity)
|
|||||||
return any_cache_relieved;
|
return any_cache_relieved;
|
||||||
}
|
}
|
||||||
|
|
||||||
void VKGSRender::on_descriptor_pool_fragmentation()
|
void VKGSRender::on_descriptor_pool_fragmentation(bool is_fatal)
|
||||||
{
|
{
|
||||||
|
if (!is_fatal)
|
||||||
|
{
|
||||||
|
// It is very likely that the release is simply in progress (enqueued)
|
||||||
|
m_primary_cb_list.wait_all();
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// Just flush everything. Unless the hardware is very deficient, this should happen very rarely.
|
// Just flush everything. Unless the hardware is very deficient, this should happen very rarely.
|
||||||
flush_command_queue(true, true);
|
flush_command_queue(true, true);
|
||||||
}
|
}
|
||||||
|
@ -264,7 +264,7 @@ public:
|
|||||||
bool on_vram_exhausted(rsx::problem_severity severity);
|
bool on_vram_exhausted(rsx::problem_severity severity);
|
||||||
|
|
||||||
// Handle pool creation failure due to fragmentation
|
// Handle pool creation failure due to fragmentation
|
||||||
void on_descriptor_pool_fragmentation();
|
void on_descriptor_pool_fragmentation(bool is_fatal);
|
||||||
|
|
||||||
// Conditional rendering
|
// Conditional rendering
|
||||||
void begin_conditional_rendering(const std::vector<rsx::reports::occlusion_query_info*>& sources) override;
|
void begin_conditional_rendering(const std::vector<rsx::reports::occlusion_query_info*>& sources) override;
|
||||||
|
@ -345,6 +345,14 @@ namespace vk
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void wait_all()
|
||||||
|
{
|
||||||
|
for (auto& cb : m_cb_list)
|
||||||
|
{
|
||||||
|
cb.wait();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
inline command_buffer_chunk* next()
|
inline command_buffer_chunk* next()
|
||||||
{
|
{
|
||||||
const auto result_id = ++m_current_index % Count;
|
const auto result_id = ++m_current_index % Count;
|
||||||
|
@ -269,11 +269,11 @@ namespace vk
|
|||||||
renderer->emergency_query_cleanup(&cmd);
|
renderer->emergency_query_cleanup(&cmd);
|
||||||
}
|
}
|
||||||
|
|
||||||
void on_descriptor_pool_fragmentation()
|
void on_descriptor_pool_fragmentation(bool is_fatal)
|
||||||
{
|
{
|
||||||
if (auto vkthr = dynamic_cast<VKGSRender*>(rsx::get_current_renderer()))
|
if (auto vkthr = dynamic_cast<VKGSRender*>(rsx::get_current_renderer()))
|
||||||
{
|
{
|
||||||
vkthr->on_descriptor_pool_fragmentation();
|
vkthr->on_descriptor_pool_fragmentation(is_fatal);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -5,7 +5,7 @@
|
|||||||
namespace vk
|
namespace vk
|
||||||
{
|
{
|
||||||
// Error handler callback
|
// Error handler callback
|
||||||
extern void on_descriptor_pool_fragmentation();
|
extern void on_descriptor_pool_fragmentation(bool fatal);
|
||||||
|
|
||||||
namespace descriptors
|
namespace descriptors
|
||||||
{
|
{
|
||||||
@ -228,38 +228,32 @@ namespace vk
|
|||||||
m_current_subpool_offset = 0;
|
m_current_subpool_offset = 0;
|
||||||
m_current_subpool_index = umax;
|
m_current_subpool_index = umax;
|
||||||
|
|
||||||
// Only attempt recovery once. Can be bumped up if we have a more complex setup in future.
|
const int max_retries = 2;
|
||||||
int retries = 1;
|
int retries = max_retries;
|
||||||
|
|
||||||
while (m_current_subpool_index == umax)
|
do
|
||||||
{
|
{
|
||||||
for (u32 index = 0; index < m_device_subpools.size(); ++index)
|
for (u32 index = 0; index < m_device_subpools.size(); ++index)
|
||||||
{
|
{
|
||||||
if (!m_device_subpools[index].busy)
|
if (!m_device_subpools[index].busy)
|
||||||
{
|
{
|
||||||
m_current_subpool_index = index;
|
m_current_subpool_index = index;
|
||||||
break;
|
goto done; // Nested break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (m_current_subpool_index != umax)
|
|
||||||
{
|
|
||||||
// We found something, exit early
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
VkDescriptorPool subpool = VK_NULL_HANDLE;
|
VkDescriptorPool subpool = VK_NULL_HANDLE;
|
||||||
if (VkResult result = vkCreateDescriptorPool(*m_owner, &m_create_info, nullptr, &subpool))
|
if (VkResult result = vkCreateDescriptorPool(*m_owner, &m_create_info, nullptr, &subpool))
|
||||||
{
|
{
|
||||||
if (retries-- && (result == VK_ERROR_FRAGMENTATION_EXT))
|
if (retries-- && (result == VK_ERROR_FRAGMENTATION_EXT))
|
||||||
{
|
{
|
||||||
rsx_log.warning("Descriptor pool creation failed with fragmentation error. Will attempt to recover.");
|
rsx_log.warning("Descriptor pool creation failed with fragmentation error. Will attempt to recover.");
|
||||||
vk::on_descriptor_pool_fragmentation();
|
vk::on_descriptor_pool_fragmentation(!retries);
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
vk::die_with_error(result);
|
vk::die_with_error(result);
|
||||||
break;
|
fmt::throw_exception("Unreachable");
|
||||||
}
|
}
|
||||||
|
|
||||||
// New subpool created successfully
|
// New subpool created successfully
|
||||||
@ -272,8 +266,10 @@ namespace vk
|
|||||||
});
|
});
|
||||||
|
|
||||||
m_current_subpool_index = m_device_subpools.size() - 1;
|
m_current_subpool_index = m_device_subpools.size() - 1;
|
||||||
}
|
|
||||||
|
|
||||||
|
} while (m_current_subpool_index == umax);
|
||||||
|
|
||||||
|
done:
|
||||||
m_device_subpools[m_current_subpool_index].busy = VK_TRUE;
|
m_device_subpools[m_current_subpool_index].busy = VK_TRUE;
|
||||||
m_current_pool_handle = m_device_subpools[m_current_subpool_index].handle;
|
m_current_pool_handle = m_device_subpools[m_current_subpool_index].handle;
|
||||||
}
|
}
|
||||||
|
Loading…
x
Reference in New Issue
Block a user