diff --git a/rpcs3/Emu/RSX/Common/simple_array.hpp b/rpcs3/Emu/RSX/Common/simple_array.hpp index 610df86752..4082447deb 100644 --- a/rpcs3/Emu/RSX/Common/simple_array.hpp +++ b/rpcs3/Emu/RSX/Common/simple_array.hpp @@ -5,10 +5,7 @@ namespace rsx { - template - concept is_simple_pod_v = (std::is_trivially_constructible_v) && (std::is_trivially_destructible_v ); - - template requires is_simple_pod_v + template requires std::is_trivially_destructible_v struct simple_array { public: diff --git a/rpcs3/Emu/RSX/VK/VKCompute.cpp b/rpcs3/Emu/RSX/VK/VKCompute.cpp index 13b2928c94..e70b0a7dff 100644 --- a/rpcs3/Emu/RSX/VK/VKCompute.cpp +++ b/rpcs3/Emu/RSX/VK/VKCompute.cpp @@ -16,13 +16,13 @@ namespace vk void compute_task::init_descriptors() { - std::vector descriptor_pool_sizes; - std::vector bindings; + rsx::simple_array descriptor_pool_sizes; + rsx::simple_array bindings; const auto layout = get_descriptor_layout(); for (const auto &e : layout) { - descriptor_pool_sizes.push_back({e.first, u32(VK_MAX_COMPUTE_TASKS * e.second)}); + descriptor_pool_sizes.push_back({e.first, e.second}); for (unsigned n = 0; n < e.second; ++n) { @@ -38,7 +38,7 @@ namespace vk } // Reserve descriptor pools - m_descriptor_pool.create(*g_render_device, descriptor_pool_sizes.data(), ::size32(descriptor_pool_sizes), VK_MAX_COMPUTE_TASKS, 3); + m_descriptor_pool.create(*g_render_device, descriptor_pool_sizes); m_descriptor_layout = vk::descriptors::create_layout(bindings); VkPipelineLayoutCreateInfo layout_info = {}; @@ -146,7 +146,7 @@ namespace vk ensure(m_used_descriptors < VK_MAX_COMPUTE_TASKS); - m_descriptor_set = m_descriptor_pool.allocate(m_descriptor_layout, VK_TRUE, m_used_descriptors++); + m_descriptor_set = m_descriptor_pool.allocate(m_descriptor_layout, VK_TRUE); bind_resources(); diff --git a/rpcs3/Emu/RSX/VK/VKDraw.cpp b/rpcs3/Emu/RSX/VK/VKDraw.cpp index c92b41e609..a5b12473bd 100644 --- a/rpcs3/Emu/RSX/VK/VKDraw.cpp +++ b/rpcs3/Emu/RSX/VK/VKDraw.cpp @@ -993,7 +993,6 @@ void VKGSRender::end() } // Allocate descriptor set - check_descriptors(); m_current_frame->descriptor_set = allocate_descriptor_set(); // Load program execution environment diff --git a/rpcs3/Emu/RSX/VK/VKGSRender.cpp b/rpcs3/Emu/RSX/VK/VKGSRender.cpp index 7be922985a..2740595b90 100644 --- a/rpcs3/Emu/RSX/VK/VKGSRender.cpp +++ b/rpcs3/Emu/RSX/VK/VKGSRender.cpp @@ -395,9 +395,9 @@ namespace std::tuple get_shared_pipeline_layout(VkDevice dev) { const auto& binding_table = vk::get_current_renderer()->get_pipeline_binding_table(); - std::vector bindings(binding_table.total_descriptor_bindings); + rsx::simple_array bindings(binding_table.total_descriptor_bindings); - usz idx = 0; + u32 idx = 0; // Vertex stream, one stream for cacheable data, one stream for transient data for (int i = 0; i < 3; i++) @@ -595,7 +595,7 @@ VKGSRender::VKGSRender(utils::serial* ar) noexcept : GSRender(ar) m_secondary_cb_list.create(m_secondary_command_buffer_pool, vk::command_buffer::access_type_hint::all); //Precalculated stuff - std::tie(pipeline_layout, descriptor_layouts) = get_shared_pipeline_layout(*m_device); + std::tie(m_pipeline_layout, m_descriptor_layouts) = get_shared_pipeline_layout(*m_device); //Occlusion m_occlusion_query_manager = std::make_unique(*m_device, VK_QUERY_TYPE_OCCLUSION, OCCLUSION_MAX_POOL_SIZE); @@ -614,13 +614,16 @@ VKGSRender::VKGSRender(utils::serial* ar) noexcept : GSRender(ar) const auto& binding_table = m_device->get_pipeline_binding_table(); const u32 num_fs_samplers = binding_table.vertex_textures_first_bind_slot - binding_table.textures_first_bind_slot; - std::vector sizes; - sizes.push_back({ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER , 6 * max_draw_calls }); - sizes.push_back({ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER , 3 * max_draw_calls }); - sizes.push_back({ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER , (num_fs_samplers + 4) * max_draw_calls }); + rsx::simple_array descriptor_type_sizes = + { + { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER , 6 }, + { VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER , 3 }, + { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER , (num_fs_samplers + 4) }, - // Conditional rendering predicate slot; refactor to allow skipping this when not needed - sizes.push_back({ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1 * max_draw_calls }); + // Conditional rendering predicate slot; refactor to allow skipping this when not needed + { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 1 } + }; + m_descriptor_pool.create(*m_device, descriptor_type_sizes, max_draw_calls); VkSemaphoreCreateInfo semaphore_info = {}; semaphore_info.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO; @@ -665,7 +668,6 @@ VKGSRender::VKGSRender(utils::serial* ar) noexcept : GSRender(ar) { vkCreateSemaphore((*m_device), &semaphore_info, nullptr, &ctx.present_wait_semaphore); vkCreateSemaphore((*m_device), &semaphore_info, nullptr, &ctx.acquire_signal_semaphore); - ctx.descriptor_pool.create(*m_device, sizes.data(), static_cast(sizes.size()), max_draw_calls, 1); } const auto& memory_map = m_device->get_memory_mapping(); @@ -920,11 +922,12 @@ VKGSRender::~VKGSRender() { vkDestroySemaphore((*m_device), ctx.present_wait_semaphore, nullptr); vkDestroySemaphore((*m_device), ctx.acquire_signal_semaphore, nullptr); - ctx.descriptor_pool.destroy(); ctx.buffer_views_to_clean.clear(); } + m_descriptor_pool.destroy(); + // Textures m_rtts.destroy(); m_texture_cache.destroy(); @@ -935,8 +938,8 @@ VKGSRender::~VKGSRender() m_text_writer.reset(); //Pipeline descriptors - vkDestroyPipelineLayout(*m_device, pipeline_layout, nullptr); - vkDestroyDescriptorSetLayout(*m_device, descriptor_layouts, nullptr); + vkDestroyPipelineLayout(*m_device, m_pipeline_layout, nullptr); + vkDestroyDescriptorSetLayout(*m_device, m_descriptor_layouts, nullptr); // Queries m_occlusion_query_manager.reset(); @@ -1318,22 +1321,11 @@ void VKGSRender::check_present_status() } } -void VKGSRender::check_descriptors() -{ - // Ease resource pressure if the number of draw calls becomes too high or we are running low on memory resources - const auto required_descriptors = rsx::method_registers.current_draw_clause.pass_count(); - if (!m_current_frame->descriptor_pool.can_allocate(required_descriptors, 0)) - { - // Should hard sync before resetting descriptors for spec compliance - flush_command_queue(true); - } -} - VkDescriptorSet VKGSRender::allocate_descriptor_set() { if (!m_shader_interpreter.is_interpreter(m_program)) [[likely]] { - return m_current_frame->descriptor_pool.allocate(descriptor_layouts, VK_TRUE, 0); + return m_descriptor_pool.allocate(m_descriptor_layouts, VK_TRUE); } else { @@ -1414,7 +1406,7 @@ void VKGSRender::on_init_thread() if (!m_overlay_manager) { m_frame->hide(); - m_shaders_cache->load(nullptr, pipeline_layout); + m_shaders_cache->load(nullptr, m_pipeline_layout); m_frame->show(); } else @@ -1422,7 +1414,7 @@ void VKGSRender::on_init_thread() rsx::shader_loading_dialog_native dlg(this); // TODO: Handle window resize messages during loading on GPUs without OUT_OF_DATE_KHR support - m_shaders_cache->load(&dlg, pipeline_layout); + m_shaders_cache->load(&dlg, m_pipeline_layout); } } @@ -2009,7 +2001,7 @@ bool VKGSRender::load_program() // Load current program from cache std::tie(m_program, m_vertex_prog, m_fragment_prog) = m_prog_buffer->get_graphics_pipeline(vertex_program, fragment_program, m_pipeline_properties, - shadermode != shader_mode::recompiler, true, pipeline_layout); + shadermode != shader_mode::recompiler, true, m_pipeline_layout); vk::leave_uninterruptible(); @@ -2268,7 +2260,7 @@ void VKGSRender::update_vertex_env(u32 id, const vk::vertex_upload_info& vertex_ data_size = 20; } - vkCmdPushConstants(*m_current_command_buffer, pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, data_size, draw_info); + vkCmdPushConstants(*m_current_command_buffer, m_pipeline_layout, VK_SHADER_STAGE_VERTEX_BIT, 0, data_size, draw_info); const usz data_offset = (id * 128) + m_vertex_layout_stream_info.offset; auto dst = m_vertex_layout_ring_info.map(data_offset, 128); diff --git a/rpcs3/Emu/RSX/VK/VKGSRender.h b/rpcs3/Emu/RSX/VK/VKGSRender.h index f943ef9288..44be389ab8 100644 --- a/rpcs3/Emu/RSX/VK/VKGSRender.h +++ b/rpcs3/Emu/RSX/VK/VKGSRender.h @@ -120,8 +120,9 @@ private: volatile vk::host_data_t* m_host_data_ptr = nullptr; std::unique_ptr m_host_object_data; - VkDescriptorSetLayout descriptor_layouts; - VkPipelineLayout pipeline_layout; + vk::descriptor_pool m_descriptor_pool; + VkDescriptorSetLayout m_descriptor_layouts; + VkPipelineLayout m_pipeline_layout; vk::framebuffer_holder* m_draw_fbo = nullptr; @@ -229,7 +230,6 @@ private: void check_heap_status(u32 flags = VK_HEAP_CHECK_ALL); void check_present_status(); - void check_descriptors(); VkDescriptorSet allocate_descriptor_set(); vk::vertex_upload_info upload_vertex_data(); diff --git a/rpcs3/Emu/RSX/VK/VKGSRenderTypes.hpp b/rpcs3/Emu/RSX/VK/VKGSRenderTypes.hpp index 299917e28f..0306c692e7 100644 --- a/rpcs3/Emu/RSX/VK/VKGSRenderTypes.hpp +++ b/rpcs3/Emu/RSX/VK/VKGSRenderTypes.hpp @@ -176,7 +176,6 @@ namespace vk VkSemaphore present_wait_semaphore = VK_NULL_HANDLE; vk::descriptor_set descriptor_set; - vk::descriptor_pool descriptor_pool; rsx::flags32_t flags = 0; @@ -185,7 +184,7 @@ namespace vk u32 present_image = -1; command_buffer_chunk* swap_command_buffer = nullptr; - //Heap pointers + // Heap pointers s64 attrib_heap_ptr = 0; s64 vtx_env_heap_ptr = 0; s64 frag_env_heap_ptr = 0; @@ -199,14 +198,12 @@ namespace vk u64 last_frame_sync_time = 0; - //Copy shareable information + // Copy shareable information void grab_resources(frame_context_t& other) { present_wait_semaphore = other.present_wait_semaphore; acquire_signal_semaphore = other.acquire_signal_semaphore; descriptor_set.swap(other.descriptor_set); - descriptor_pool = other.descriptor_pool; - used_descriptors = other.used_descriptors; flags = other.flags; attrib_heap_ptr = other.attrib_heap_ptr; @@ -221,7 +218,7 @@ namespace vk rasterizer_env_heap_ptr = other.rasterizer_env_heap_ptr; } - //Exchange storage (non-copyable) + // Exchange storage (non-copyable) void swap_storage(frame_context_t& other) { std::swap(buffer_views_to_clean, other.buffer_views_to_clean); diff --git a/rpcs3/Emu/RSX/VK/VKHelpers.cpp b/rpcs3/Emu/RSX/VK/VKHelpers.cpp index 5d9bf1c67d..78b297b694 100644 --- a/rpcs3/Emu/RSX/VK/VKHelpers.cpp +++ b/rpcs3/Emu/RSX/VK/VKHelpers.cpp @@ -35,14 +35,6 @@ namespace vk u64 g_num_processed_frames = 0; u64 g_num_total_frames = 0; - void reset_compute_tasks() - { - for (const auto &p : g_compute_tasks) - { - p.second->free_resources(); - } - } - void reset_overlay_passes() { for (const auto& p : g_overlay_passes) @@ -53,7 +45,7 @@ namespace vk void reset_global_resources() { - vk::reset_compute_tasks(); + // FIXME: These two shouldn't exist vk::reset_resolve_resources(); vk::reset_overlay_passes(); diff --git a/rpcs3/Emu/RSX/VK/VKOverlays.cpp b/rpcs3/Emu/RSX/VK/VKOverlays.cpp index bba369e4f3..e9afe8ef15 100644 --- a/rpcs3/Emu/RSX/VK/VKOverlays.cpp +++ b/rpcs3/Emu/RSX/VK/VKOverlays.cpp @@ -15,8 +15,6 @@ #include "util/fnv_hash.hpp" -#define VK_OVERLAY_MAX_DRAW_CALLS 1024 - namespace vk { overlay_pass::overlay_pass() @@ -49,26 +47,26 @@ namespace vk void overlay_pass::init_descriptors() { - std::vector descriptor_pool_sizes = + rsx::simple_array descriptor_pool_sizes = { - { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, VK_OVERLAY_MAX_DRAW_CALLS } + { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1 } }; if (m_num_usable_samplers) { - descriptor_pool_sizes.push_back({ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, VK_OVERLAY_MAX_DRAW_CALLS * m_num_usable_samplers }); + descriptor_pool_sizes.push_back({ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER, m_num_usable_samplers }); } if (m_num_input_attachments) { - descriptor_pool_sizes.push_back({ VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, VK_OVERLAY_MAX_DRAW_CALLS * m_num_input_attachments }); + descriptor_pool_sizes.push_back({ VK_DESCRIPTOR_TYPE_INPUT_ATTACHMENT, m_num_input_attachments }); } // Reserve descriptor pools - m_descriptor_pool.create(*m_device, descriptor_pool_sizes.data(), ::size32(descriptor_pool_sizes), VK_OVERLAY_MAX_DRAW_CALLS, 2); + m_descriptor_pool.create(*m_device, descriptor_pool_sizes); const auto num_bindings = 1 + m_num_usable_samplers + m_num_input_attachments; - std::vector bindings(num_bindings); + rsx::simple_array bindings(num_bindings); bindings[0].descriptorType = VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER; bindings[0].descriptorCount = 1; @@ -222,16 +220,7 @@ namespace vk else program = build_pipeline(key, pass); - ensure(m_used_descriptors < VK_OVERLAY_MAX_DRAW_CALLS); - - VkDescriptorSetAllocateInfo alloc_info = {}; - alloc_info.descriptorPool = m_descriptor_pool; - alloc_info.descriptorSetCount = 1; - alloc_info.pSetLayouts = &m_descriptor_layout; - alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; - - CHECK_RESULT(vkAllocateDescriptorSets(*m_device, &alloc_info, m_descriptor_set.ptr())); - m_used_descriptors++; + m_descriptor_set = m_descriptor_pool.allocate(m_descriptor_layout); if (!m_sampler && !src.empty()) { @@ -288,6 +277,7 @@ namespace vk void overlay_pass::free_resources() { + // FIXME: Allocation sizes are known, we don't need to use a data_heap structure m_vao.reset_allocation_stats(); m_ubo.reset_allocation_stats(); } diff --git a/rpcs3/Emu/RSX/VK/VKPresent.cpp b/rpcs3/Emu/RSX/VK/VKPresent.cpp index 85c7067ece..f2289b9927 100644 --- a/rpcs3/Emu/RSX/VK/VKPresent.cpp +++ b/rpcs3/Emu/RSX/VK/VKPresent.cpp @@ -199,11 +199,6 @@ void VKGSRender::frame_context_cleanup(vk::frame_context_t *ctx) // Resource cleanup. // TODO: This is some outdated crap. { - if (m_text_writer) - { - m_text_writer->reset_descriptors(); - } - if (m_overlay_manager && m_overlay_manager->has_dirty()) { auto ui_renderer = vk::get_overlay_pass(); diff --git a/rpcs3/Emu/RSX/VK/VKResolveHelper.cpp b/rpcs3/Emu/RSX/VK/VKResolveHelper.cpp index f64fb4141e..ca30abfc06 100644 --- a/rpcs3/Emu/RSX/VK/VKResolveHelper.cpp +++ b/rpcs3/Emu/RSX/VK/VKResolveHelper.cpp @@ -246,9 +246,6 @@ namespace vk void reset_resolve_resources() { - for (auto &e : g_resolve_helpers) e.second->free_resources(); - for (auto &e : g_unresolve_helpers) e.second->free_resources(); - if (g_depth_resolver) g_depth_resolver->free_resources(); if (g_depth_unresolver) g_depth_unresolver->free_resources(); if (g_stencil_resolver) g_stencil_resolver->free_resources(); diff --git a/rpcs3/Emu/RSX/VK/VKResourceManager.cpp b/rpcs3/Emu/RSX/VK/VKResourceManager.cpp index 02c4573bcd..3c9eff4ef3 100644 --- a/rpcs3/Emu/RSX/VK/VKResourceManager.cpp +++ b/rpcs3/Emu/RSX/VK/VKResourceManager.cpp @@ -41,6 +41,11 @@ namespace vk return &g_resource_manager; } + garbage_collector* get_gc() + { + return &g_resource_manager; + } + void resource_manager::trim() { // For any managed resources, try to keep the number of unused/idle resources as low as possible. diff --git a/rpcs3/Emu/RSX/VK/VKResourceManager.h b/rpcs3/Emu/RSX/VK/VKResourceManager.h index bc9b459482..4bc914250e 100644 --- a/rpcs3/Emu/RSX/VK/VKResourceManager.h +++ b/rpcs3/Emu/RSX/VK/VKResourceManager.h @@ -1,5 +1,6 @@ #pragma once #include "vkutils/image.h" +#include "vkutils/garbage_collector.h" #include "vkutils/query_pool.hpp" #include "vkutils/sampler.h" @@ -16,42 +17,6 @@ namespace vk u64 last_completed_event_id(); void on_event_completed(u64 event_id, bool flush = false); - class disposable_t - { - void* ptr; - std::function deleter; - - disposable_t(void* ptr_, std::function deleter_) : - ptr(ptr_), deleter(deleter_) {} - public: - - disposable_t() = delete; - disposable_t(const disposable_t&) = delete; - - disposable_t(disposable_t&& other): - ptr(std::exchange(other.ptr, nullptr)), - deleter(other.deleter) - {} - - ~disposable_t() - { - if (ptr) - { - deleter(ptr); - ptr = nullptr; - } - } - - template - static disposable_t make(T* raw) - { - return disposable_t(raw, [](void *raw) - { - delete static_cast(raw); - }); - } - }; - struct eid_scope_t { u64 eid; @@ -83,7 +48,7 @@ namespace vk } }; - class resource_manager + class resource_manager : public garbage_collector { private: sampler_pool_t m_sampler_pool; @@ -151,7 +116,7 @@ namespace vk return ret; } - inline void dispose(vk::disposable_t& disposable) + void dispose(vk::disposable_t& disposable) override { get_current_eid_scope().m_disposables.emplace_back(std::move(disposable)); } diff --git a/rpcs3/Emu/RSX/VK/VKShaderInterpreter.cpp b/rpcs3/Emu/RSX/VK/VKShaderInterpreter.cpp index 5c2726df67..00e94505ef 100644 --- a/rpcs3/Emu/RSX/VK/VKShaderInterpreter.cpp +++ b/rpcs3/Emu/RSX/VK/VKShaderInterpreter.cpp @@ -233,7 +233,7 @@ namespace vk std::pair shader_interpreter::create_layout(VkDevice dev) { const auto& binding_table = vk::get_current_renderer()->get_pipeline_binding_table(); - std::vector bindings(binding_table.total_descriptor_bindings); + rsx::simple_array bindings(binding_table.total_descriptor_bindings); u32 idx = 0; @@ -378,13 +378,15 @@ namespace vk { const auto max_draw_calls = dev.get_descriptor_max_draw_calls(); - std::vector sizes; - sizes.push_back({ VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER , 6 * max_draw_calls }); - sizes.push_back({ VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER , 3 * max_draw_calls }); - sizes.push_back({ VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER , 68 * max_draw_calls }); - sizes.push_back({ VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 3 * max_draw_calls }); + rsx::simple_array sizes = + { + { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER , 6 }, + { VK_DESCRIPTOR_TYPE_UNIFORM_TEXEL_BUFFER , 3 }, + { VK_DESCRIPTOR_TYPE_COMBINED_IMAGE_SAMPLER , 68 }, + { VK_DESCRIPTOR_TYPE_STORAGE_BUFFER, 3 } + }; - m_descriptor_pool.create(dev, sizes.data(), ::size32(sizes), max_draw_calls, 2); + m_descriptor_pool.create(dev, sizes, max_draw_calls); } void shader_interpreter::init(const vk::render_device& dev) @@ -518,7 +520,7 @@ namespace vk VkDescriptorSet shader_interpreter::allocate_descriptor_set() { - return m_descriptor_pool.allocate(m_shared_descriptor_layout, VK_TRUE, 0); + return m_descriptor_pool.allocate(m_shared_descriptor_layout); } glsl::program* shader_interpreter::get(const vk::pipeline_props& properties, const program_hash_util::fragment_program_utils::fragment_program_metadata& metadata) diff --git a/rpcs3/Emu/RSX/VK/VKTextOut.h b/rpcs3/Emu/RSX/VK/VKTextOut.h index 41e0ad037b..1cb45cecd3 100644 --- a/rpcs3/Emu/RSX/VK/VKTextOut.h +++ b/rpcs3/Emu/RSX/VK/VKTextOut.h @@ -40,16 +40,16 @@ namespace vk void init_descriptor_set(vk::render_device &dev) { - VkDescriptorPoolSize descriptor_pools[1] = + rsx::simple_array descriptor_pools = { - { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 120 }, + { VK_DESCRIPTOR_TYPE_UNIFORM_BUFFER, 1 }, }; // Reserve descriptor pools - m_descriptor_pool.create(dev, descriptor_pools, 1, 120, 2); + m_descriptor_pool.create(dev, descriptor_pools); // Scale and offset data plus output color - std::vector bindings = + rsx::simple_array bindings = { { .binding = 0, @@ -205,7 +205,7 @@ namespace vk { ensure(m_used_descriptors < 120); - m_descriptor_set = m_descriptor_pool.allocate(m_descriptor_layout, VK_TRUE, m_used_descriptors++); + m_descriptor_set = m_descriptor_pool.allocate(m_descriptor_layout); float scale[] = { scale_x, scale_y }; float colors[] = { color[0], color[1], color[2], color[3] }; diff --git a/rpcs3/Emu/RSX/VK/vkutils/descriptors.cpp b/rpcs3/Emu/RSX/VK/vkutils/descriptors.cpp index d7f4738f7d..96234d7cf5 100644 --- a/rpcs3/Emu/RSX/VK/vkutils/descriptors.cpp +++ b/rpcs3/Emu/RSX/VK/vkutils/descriptors.cpp @@ -1,5 +1,6 @@ #include "Emu/IdManager.h" #include "descriptors.h" +#include "garbage_collector.h" namespace vk { @@ -63,7 +64,7 @@ namespace vk g_fxo->get().flush_all(); } - VkDescriptorSetLayout create_layout(const std::vector& bindings) + VkDescriptorSetLayout create_layout(const rsx::simple_array& bindings) { VkDescriptorSetLayoutCreateInfo infos = {}; infos.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_LAYOUT_CREATE_INFO; @@ -105,65 +106,56 @@ namespace vk } } - void descriptor_pool::create(const vk::render_device& dev, VkDescriptorPoolSize* sizes, u32 size_descriptors_count, u32 max_sets, u8 subpool_count) + void descriptor_pool::create(const vk::render_device& dev, const rsx::simple_array& pool_sizes, u32 max_sets) { - ensure(subpool_count); + ensure(max_sets > 16); + + auto scaled_pool_sizes = pool_sizes; + for (auto& size : scaled_pool_sizes) + { + ensure(size.descriptorCount < 32); // Sanity check. Remove before commit. + size.descriptorCount *= max_sets; + } info.flags = dev.get_descriptor_update_after_bind_support() ? VK_DESCRIPTOR_POOL_CREATE_UPDATE_AFTER_BIND_BIT : 0; info.maxSets = max_sets; - info.poolSizeCount = size_descriptors_count; - info.pPoolSizes = sizes; + info.poolSizeCount = scaled_pool_sizes.size(); + info.pPoolSizes = scaled_pool_sizes.data(); info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; m_owner = &dev; - m_device_pools.resize(subpool_count); - - for (auto& pool : m_device_pools) - { - CHECK_RESULT(vkCreateDescriptorPool(dev, &info, nullptr, &pool)); - } - - m_current_pool_handle = m_device_pools[0]; + next_subpool(); } void descriptor_pool::destroy() { - if (m_device_pools.empty()) return; + if (m_device_subpools.empty()) return; - for (auto& pool : m_device_pools) + for (auto& pool : m_device_subpools) { - vkDestroyDescriptorPool((*m_owner), pool, nullptr); - pool = VK_NULL_HANDLE; + vkDestroyDescriptorPool((*m_owner), pool.handle, nullptr); + pool.handle = VK_NULL_HANDLE; } m_owner = nullptr; } - void descriptor_pool::reset(VkDescriptorPoolResetFlags flags) + void descriptor_pool::reset(u32 subpool_id, VkDescriptorPoolResetFlags flags) { - m_descriptor_set_cache.clear(); - m_current_pool_index = (m_current_pool_index + 1) % u32(m_device_pools.size()); - m_current_pool_handle = m_device_pools[m_current_pool_index]; - CHECK_RESULT(vkResetDescriptorPool(*m_owner, m_current_pool_handle, flags)); + std::lock_guard lock(m_subpool_lock); + + CHECK_RESULT(vkResetDescriptorPool(*m_owner, m_device_subpools[subpool_id].handle, flags)); + m_device_subpools[subpool_id].busy = VK_FALSE; } - VkDescriptorSet descriptor_pool::allocate(VkDescriptorSetLayout layout, VkBool32 use_cache, u32 used_count) + VkDescriptorSet descriptor_pool::allocate(VkDescriptorSetLayout layout, VkBool32 use_cache) { if (use_cache) { if (m_descriptor_set_cache.empty()) { // For optimal cache utilization, each pool should only allocate one layout - if (m_cached_layout != layout) - { - m_cached_layout = layout; - m_allocation_request_cache.resize(max_cache_size); - - for (auto& layout_ : m_allocation_request_cache) - { - layout_ = m_cached_layout; - } - } + m_cached_layout = layout; } else if (m_cached_layout != layout) { @@ -175,6 +167,11 @@ namespace vk } } + if (!can_allocate(use_cache ? 4 : 1, m_current_subpool_offset)) + { + next_subpool(); + } + VkDescriptorSet new_descriptor_set; VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_SET_ALLOCATE_INFO; @@ -184,8 +181,12 @@ namespace vk if (use_cache) { - ensure(used_count < info.maxSets); - const auto alloc_size = std::min(info.maxSets - used_count, max_cache_size); + const auto alloc_size = std::min(info.maxSets - m_current_subpool_offset, max_cache_size); + m_allocation_request_cache.resize(alloc_size); + for (auto& layout_ : m_allocation_request_cache) + { + layout_ = m_cached_layout; + } ensure(m_descriptor_set_cache.empty()); alloc_info.descriptorSetCount = alloc_size; @@ -204,6 +205,51 @@ namespace vk return new_descriptor_set; } + void descriptor_pool::next_subpool() + { + if (m_current_subpool_index != umax) + { + // Enqueue release using gc + auto release_func = [subpool_index=m_current_subpool_index, this]() + { + this->reset(subpool_index, 0); + }; + + auto cleanup_obj = std::make_unique(release_func); + vk::get_gc()->dispose(cleanup_obj); + } + + std::lock_guard lock(m_subpool_lock); + + m_current_subpool_offset = 0; + m_current_subpool_index = umax; + + for (u32 index = 0; index < m_device_subpools.size(); ++index) + { + if (!m_device_subpools[index].busy) + { + m_current_subpool_index = index; + break; + } + } + + if (m_current_subpool_index == umax) + { + VkDescriptorPool subpool = VK_NULL_HANDLE; + CHECK_RESULT(vkCreateDescriptorPool(*m_owner, &info, nullptr, &subpool)); + + m_device_subpools.push_back( + { + .handle = subpool, + .busy = VK_FALSE + }); + + m_current_subpool_index = m_device_subpools.size() - 1; + } + + m_device_subpools[m_current_subpool_index].busy = VK_TRUE; + } + descriptor_set::descriptor_set(VkDescriptorSet set) { flush(); diff --git a/rpcs3/Emu/RSX/VK/vkutils/descriptors.h b/rpcs3/Emu/RSX/VK/vkutils/descriptors.h index 83066dbb19..e325386a32 100644 --- a/rpcs3/Emu/RSX/VK/vkutils/descriptors.h +++ b/rpcs3/Emu/RSX/VK/vkutils/descriptors.h @@ -10,30 +10,55 @@ namespace vk { + struct gc_wrapper_t + { + std::function m_callback; + + gc_wrapper_t(std::function callback) + : m_callback(callback) + {} + + ~gc_wrapper_t() + { + m_callback(); + } + }; + class descriptor_pool { public: descriptor_pool() = default; ~descriptor_pool() = default; - void create(const vk::render_device& dev, VkDescriptorPoolSize* sizes, u32 size_descriptors_count, u32 max_sets, u8 subpool_count); + void create(const vk::render_device& dev, const rsx::simple_array& pool_sizes, u32 max_sets = 1024); void destroy(); - void reset(VkDescriptorPoolResetFlags flags); - VkDescriptorSet allocate(VkDescriptorSetLayout layout, VkBool32 use_cache, u32 used_count); + VkDescriptorSet allocate(VkDescriptorSetLayout layout, VkBool32 use_cache = VK_TRUE); operator VkDescriptorPool() { return m_current_pool_handle; } - FORCE_INLINE bool valid() const { return (!m_device_pools.empty()); } + FORCE_INLINE bool valid() const { return (!m_device_subpools.empty()); } FORCE_INLINE u32 max_sets() const { return info.maxSets; } - FORCE_INLINE bool can_allocate(u32 required_count, u32 used_count) const { return (used_count + required_count) <= info.maxSets; }; private: + FORCE_INLINE bool can_allocate(u32 required_count, u32 already_used_count = 0) const { return (required_count + already_used_count) <= info.maxSets; }; + void reset(u32 subpool_id, VkDescriptorPoolResetFlags flags); + void next_subpool(); + + struct logical_subpool_t + { + VkDescriptorPool handle; + VkBool32 busy; + }; + const vk::render_device* m_owner = nullptr; VkDescriptorPoolCreateInfo info = {}; - rsx::simple_array m_device_pools; + rsx::simple_array m_device_subpools; VkDescriptorPool m_current_pool_handle = VK_NULL_HANDLE; - u32 m_current_pool_index = 0; + u32 m_current_subpool_index = umax; + u32 m_current_subpool_offset = 0; + + shared_mutex m_subpool_lock; static constexpr size_t max_cache_size = 64; VkDescriptorSetLayout m_cached_layout = VK_NULL_HANDLE; @@ -122,6 +147,6 @@ namespace vk void init(); void flush(); - VkDescriptorSetLayout create_layout(const std::vector& bindings); + VkDescriptorSetLayout create_layout(const rsx::simple_array& bindings); } } diff --git a/rpcs3/Emu/RSX/VK/vkutils/garbage_collector.h b/rpcs3/Emu/RSX/VK/vkutils/garbage_collector.h new file mode 100644 index 0000000000..6bf8150b4b --- /dev/null +++ b/rpcs3/Emu/RSX/VK/vkutils/garbage_collector.h @@ -0,0 +1,56 @@ +#include + +#include + +namespace vk +{ + class disposable_t + { + void* ptr; + std::function deleter; + + disposable_t(void* ptr_, std::function deleter_) : + ptr(ptr_), deleter(deleter_) {} + public: + + disposable_t() = delete; + disposable_t(const disposable_t&) = delete; + + disposable_t(disposable_t&& other) : + ptr(std::exchange(other.ptr, nullptr)), + deleter(other.deleter) + {} + + ~disposable_t() + { + if (ptr) + { + deleter(ptr); + ptr = nullptr; + } + } + + template + static disposable_t make(T* raw) + { + return disposable_t(raw, [](void* raw) + { + delete static_cast(raw); + }); + } + }; + + struct garbage_collector + { + virtual void dispose(vk::disposable_t& object) = 0; + + template + void dispose(std::unique_ptr& object) + { + auto ptr = vk::disposable_t::make(object.release()); + dispose(ptr); + } + }; + + garbage_collector* get_gc(); +} diff --git a/rpcs3/VKGSRender.vcxproj b/rpcs3/VKGSRender.vcxproj index 56a8603dc8..e0080c18d2 100644 --- a/rpcs3/VKGSRender.vcxproj +++ b/rpcs3/VKGSRender.vcxproj @@ -45,6 +45,7 @@ + diff --git a/rpcs3/VKGSRender.vcxproj.filters b/rpcs3/VKGSRender.vcxproj.filters index 153b6bc569..f0f31f894a 100644 --- a/rpcs3/VKGSRender.vcxproj.filters +++ b/rpcs3/VKGSRender.vcxproj.filters @@ -171,6 +171,9 @@ upscalers + + vkutils +