diff --git a/gfx/common/vulkan_common.c b/gfx/common/vulkan_common.c index 0b09324f8c..11fa6104dd 100644 --- a/gfx/common/vulkan_common.c +++ b/gfx/common/vulkan_common.c @@ -482,7 +482,9 @@ struct vk_texture vulkan_create_texture(vk_t *vk, if ((format_properties.linearTilingFeatures & required) != required) { - RARCH_LOG("[Vulkan]: GPU does not support using linear images as textures. Falling back to copy path.\n"); +#ifdef VULKAN_DEBUG + RARCH_DBG("[Vulkan]: GPU does not support using linear images as textures. Falling back to copy path.\n"); +#endif type = VULKAN_TEXTURE_STAGING; } } @@ -584,7 +586,7 @@ struct vk_texture vulkan_create_texture(vk_t *vk, { /* Recreate texture but for STAGING this time ... */ #ifdef VULKAN_DEBUG - RARCH_LOG("[Vulkan]: GPU supports linear images as textures, but not DEVICE_LOCAL. Falling back to copy path.\n"); + RARCH_DBG("[Vulkan]: GPU supports linear images as textures, but not DEVICE_LOCAL. Falling back to copy path.\n"); #endif type = VULKAN_TEXTURE_STAGING; vkDestroyImage(device, tex.image, NULL); @@ -2299,7 +2301,7 @@ bool vulkan_surface_create(gfx_ctx_vulkan_data_t *vk, enum vulkan_wsi_type type, void *display, void *surface, unsigned width, unsigned height, - unsigned swap_interval) + int8_t swap_interval) { switch (type) { @@ -2928,7 +2930,7 @@ retry: bool vulkan_create_swapchain(gfx_ctx_vulkan_data_t *vk, unsigned width, unsigned height, - unsigned swap_interval) + int8_t swap_interval) { unsigned i; uint32_t format_count; @@ -2978,7 +2980,7 @@ bool vulkan_create_swapchain(gfx_ctx_vulkan_data_t *vk, { /* Do not bother creating a swapchain redundantly. */ #ifdef VULKAN_DEBUG - RARCH_LOG("[Vulkan]: Do not need to re-create swapchain.\n"); + RARCH_DBG("[Vulkan]: Do not need to re-create swapchain.\n"); #endif vulkan_create_wait_fences(vk); @@ -3025,6 +3027,12 @@ bool vulkan_create_swapchain(gfx_ctx_vulkan_data_t *vk, vulkan_emulated_mailbox_deinit(&vk->mailbox); + /* Unless we have other reasons to clamp, we should prefer 3 images. + * We hard sync against the swapchain, so if we have 2 images, + * we would be unable to overlap CPU and GPU, which can get very slow + * for GPU-rendered cores. */ + desired_swapchain_images = settings->uints.video_max_swapchain_images; + present_mode_count = 0; vkGetPhysicalDeviceSurfacePresentModesKHR( vk->context.gpu, vk->vk_surface, @@ -3038,40 +3046,80 @@ bool vulkan_create_swapchain(gfx_ctx_vulkan_data_t *vk, vk->context.gpu, vk->vk_surface, &present_mode_count, present_modes); -#ifdef VULKAN_DEBUG - for (i = 0; i < present_mode_count; i++) + if (vk->swapchain == VK_NULL_HANDLE) { - RARCH_LOG("[Vulkan]: Swapchain supports present mode: %u.\n", - present_modes[i]); + for (i = 0; i < present_mode_count; i++) + { + switch (present_modes[i]) + { + case VK_PRESENT_MODE_IMMEDIATE_KHR: + RARCH_DBG("[Vulkan]: Swapchain supports present mode: IMMEDIATE_KHR.\n"); + break; + case VK_PRESENT_MODE_MAILBOX_KHR: + RARCH_DBG("[Vulkan]: Swapchain supports present mode: MAILBOX_KHR.\n"); + break; + case VK_PRESENT_MODE_FIFO_KHR: + RARCH_DBG("[Vulkan]: Swapchain supports present mode: FIFO_KHR.\n"); + break; + case VK_PRESENT_MODE_FIFO_RELAXED_KHR: + RARCH_DBG("[Vulkan]: Swapchain supports present mode: FIFO_RELAXED_KHR.\n"); + break; + default: + break; + } + + vk->context.present_modes[i] = present_modes[i]; + } } -#endif vk->context.swap_interval = swap_interval; for (i = 0; i < present_mode_count; i++) { - if (!swap_interval && present_modes[i] == VK_PRESENT_MODE_MAILBOX_KHR) + /* Special fallthrough default for mailbox for rather getting it + * than fifo without vsync when immediate is not available */ + if (present_modes[i] == VK_PRESENT_MODE_MAILBOX_KHR + && ( (swap_interval > 0 && desired_swapchain_images > 2) + || (swap_interval < 1 && swapchain_present_mode == VK_PRESENT_MODE_FIFO_KHR)) + ) { swapchain_present_mode = VK_PRESENT_MODE_MAILBOX_KHR; - break; + continue; } - else if (!swap_interval && present_modes[i] - == VK_PRESENT_MODE_IMMEDIATE_KHR) + else if (swap_interval == 0 && present_modes[i] == VK_PRESENT_MODE_IMMEDIATE_KHR) { swapchain_present_mode = VK_PRESENT_MODE_IMMEDIATE_KHR; break; } - else if (swap_interval && present_modes[i] == VK_PRESENT_MODE_FIFO_KHR) + else if (swap_interval > 0 && present_modes[i] == VK_PRESENT_MODE_FIFO_KHR) { /* Kind of tautological since FIFO must always be present. */ swapchain_present_mode = VK_PRESENT_MODE_FIFO_KHR; break; } + else if (swap_interval < 0 && present_modes[i] == VK_PRESENT_MODE_FIFO_RELAXED_KHR) + { + swapchain_present_mode = VK_PRESENT_MODE_FIFO_RELAXED_KHR; + break; + } } -#ifdef VULKAN_DEBUG - RARCH_LOG("[Vulkan]: Creating swapchain with present mode: %u\n", - (unsigned)swapchain_present_mode); -#endif + switch (swapchain_present_mode) + { + case VK_PRESENT_MODE_IMMEDIATE_KHR: + RARCH_DBG("[Vulkan]: Creating swapchain with present mode: IMMEDIATE_KHR.\n"); + break; + case VK_PRESENT_MODE_MAILBOX_KHR: + RARCH_DBG("[Vulkan]: Creating swapchain with present mode: MAILBOX_KHR.\n"); + break; + case VK_PRESENT_MODE_FIFO_KHR: + RARCH_DBG("[Vulkan]: Creating swapchain with present mode: FIFO_KHR.\n"); + break; + case VK_PRESENT_MODE_FIFO_RELAXED_KHR: + RARCH_DBG("[Vulkan]: Creating swapchain with present mode: FIFO_RELAXED_KHR.\n"); + break; + default: + break; + } vkGetPhysicalDeviceSurfaceFormatsKHR(vk->context.gpu, vk->vk_surface, &format_count, NULL); @@ -3172,20 +3220,13 @@ bool vulkan_create_swapchain(gfx_ctx_vulkan_data_t *vk, vk->context.num_swapchain_images = 1; memset(vk->context.swapchain_images, 0, sizeof(vk->context.swapchain_images)); - RARCH_LOG("[Vulkan]: Cannot create a swapchain yet. Will try again later ...\n"); + RARCH_DBG("[Vulkan]: Cannot create a swapchain yet. Will try again later..\n"); return true; } -#ifdef VULKAN_DEBUG - RARCH_LOG("[Vulkan]: Using swapchain size %ux%u.\n", - swapchain_size.width, swapchain_size.height); -#endif - - /* Unless we have other reasons to clamp, we should prefer 3 images. - * We hard sync against the swapchain, so if we have 2 images, - * we would be unable to overlap CPU and GPU, which can get very slow - * for GPU-rendered cores. */ - desired_swapchain_images = settings->uints.video_max_swapchain_images; + if (vk->swapchain == VK_NULL_HANDLE) + RARCH_DBG("[Vulkan]: Using swapchain size %ux%u.\n", + swapchain_size.width, swapchain_size.height); /* Clamp images requested to what is supported by the implementation. */ if (desired_swapchain_images < surface_properties.minImageCount) diff --git a/gfx/common/vulkan_common.h b/gfx/common/vulkan_common.h index 02ca9bf4ab..8374ca2cbe 100644 --- a/gfx/common/vulkan_common.h +++ b/gfx/common/vulkan_common.h @@ -134,6 +134,7 @@ typedef struct vulkan_context VkPhysicalDeviceProperties gpu_properties; VkPhysicalDeviceMemoryProperties memory_properties; + VkPresentModeKHR present_modes[16]; VkImage swapchain_images[VULKAN_MAX_SWAPCHAIN_IMAGES]; VkFence swapchain_fences[VULKAN_MAX_SWAPCHAIN_IMAGES]; VkFormat swapchain_format; @@ -156,9 +157,9 @@ typedef struct vulkan_context unsigned swapchain_width; unsigned swapchain_height; - unsigned swap_interval; unsigned num_recycled_acquire_semaphores; + int8_t swap_interval; uint8_t flags; bool swapchain_fences_signalled[VULKAN_MAX_SWAPCHAIN_IMAGES]; @@ -724,7 +725,7 @@ bool vulkan_surface_create(gfx_ctx_vulkan_data_t *vk, enum vulkan_wsi_type type, void *display, void *surface, unsigned width, unsigned height, - unsigned swap_interval); + int8_t swap_interval); void vulkan_present(gfx_ctx_vulkan_data_t *vk, unsigned index); @@ -732,7 +733,7 @@ void vulkan_acquire_next_image(gfx_ctx_vulkan_data_t *vk); bool vulkan_create_swapchain(gfx_ctx_vulkan_data_t *vk, unsigned width, unsigned height, - unsigned swap_interval); + int8_t swap_interval); void vulkan_set_uniform_buffer( VkDevice device, diff --git a/gfx/drivers_context/w_vk_ctx.c b/gfx/drivers_context/w_vk_ctx.c index f2666749a4..1f8971d4b5 100644 --- a/gfx/drivers_context/w_vk_ctx.c +++ b/gfx/drivers_context/w_vk_ctx.c @@ -330,10 +330,24 @@ static void *gfx_ctx_w_vk_get_context_data(void *data) { return &win32_vk.contex static uint32_t gfx_ctx_w_vk_get_flags(void *data) { - uint32_t flags = 0; + uint32_t flags = 0; + uint8_t present_mode_count = 16; + uint8_t i = 0; + + /* Check for FIFO_RELAXED_KHR capability */ + for (i = 0; i < present_mode_count; i++) + { + if (win32_vk.context.present_modes[i] == VK_PRESENT_MODE_FIFO_RELAXED_KHR) + { + BIT32_SET(flags, GFX_CTX_FLAGS_ADAPTIVE_VSYNC); + break; + } + } + #if defined(HAVE_SLANG) && defined(HAVE_SPIRV_CROSS) BIT32_SET(flags, GFX_CTX_FLAGS_SHADERS_SLANG); #endif + return flags; } diff --git a/gfx/drivers_context/x_vk_ctx.c b/gfx/drivers_context/x_vk_ctx.c index 9d234dff10..da7a9af1f4 100644 --- a/gfx/drivers_context/x_vk_ctx.c +++ b/gfx/drivers_context/x_vk_ctx.c @@ -541,8 +541,20 @@ static void *gfx_ctx_x_vk_get_context_data(void *data) static uint32_t gfx_ctx_x_vk_get_flags(void *data) { - uint32_t flags = 0; - gfx_ctx_x_vk_data_t *x = (gfx_ctx_x_vk_data_t*)data; + gfx_ctx_x_vk_data_t *x = (gfx_ctx_x_vk_data_t*)data; + uint32_t flags = 0; + uint8_t present_mode_count = 16; + uint8_t i = 0; + + /* Check for FIFO_RELAXED_KHR capability */ + for (i = 0; i < present_mode_count; i++) + { + if (x->vk.context.present_modes[i] == VK_PRESENT_MODE_FIFO_RELAXED_KHR) + { + BIT32_SET(flags, GFX_CTX_FLAGS_ADAPTIVE_VSYNC); + break; + } + } #if defined(HAVE_SLANG) && defined(HAVE_SPIRV_CROSS) BIT32_SET(flags, GFX_CTX_FLAGS_SHADERS_SLANG);