(Vulkan) Optimizations/cleanups -

* Honor SUBOPTIMAL on non-Android since you'd want to recreate
swapchains then. On Android it can be promoted to SUCCESS.
SUBOPTIMAL_KHR can happen there when rotation (pre-rotate) is wrong.
* Small cleanups in general - less conditionals
This commit is contained in:
libretroadmin 2022-05-18 16:39:54 +02:00
parent 650101083d
commit 3cad090b4c

View File

@ -189,8 +189,7 @@ static VkResult vulkan_emulated_mailbox_acquire_next_image_blocking(
while (!mailbox->acquired) while (!mailbox->acquired)
scond_wait(mailbox->cond, mailbox->lock); scond_wait(mailbox->cond, mailbox->lock);
res = mailbox->result; if ((res = mailbox->result) == VK_SUCCESS)
if (res == VK_SUCCESS)
*index = mailbox->index; *index = mailbox->index;
mailbox->has_pending_request = false; mailbox->has_pending_request = false;
mailbox->acquired = false; mailbox->acquired = false;
@ -230,10 +229,11 @@ static void vulkan_emulated_mailbox_loop(void *userdata)
mailbox->request_acquire = false; mailbox->request_acquire = false;
slock_unlock(mailbox->lock); slock_unlock(mailbox->lock);
#ifdef ANDROID
mailbox->result = vkAcquireNextImageKHR( mailbox->result = vkAcquireNextImageKHR(
mailbox->device, mailbox->swapchain, UINT64_MAX, mailbox->device, mailbox->swapchain, UINT64_MAX,
VK_NULL_HANDLE, fence, &mailbox->index); VK_NULL_HANDLE, fence, &mailbox->index);
/* VK_SUBOPTIMAL_KHR can be returned on Android 10 /* VK_SUBOPTIMAL_KHR can be returned on Android 10
* when prerotate is not dealt with. * when prerotate is not dealt with.
* This is not an error we need to care about, * This is not an error we need to care about,
@ -242,17 +242,22 @@ static void vulkan_emulated_mailbox_loop(void *userdata)
mailbox->result = VK_SUCCESS; mailbox->result = VK_SUCCESS;
if (mailbox->result == VK_SUCCESS) if (mailbox->result == VK_SUCCESS)
vkWaitForFences(mailbox->device, 1, #else
&fence, true, UINT64_MAX); if ((mailbox->result = vkAcquireNextImageKHR(
vkResetFences(mailbox->device, 1, &fence); mailbox->device, mailbox->swapchain, UINT64_MAX,
VK_NULL_HANDLE, fence, &mailbox->index)) == VK_SUCCESS)
if (mailbox->result == VK_SUCCESS) #endif
{ {
vkWaitForFences(mailbox->device, 1, &fence, true, UINT64_MAX);
vkResetFences(mailbox->device, 1, &fence);
slock_lock(mailbox->lock); slock_lock(mailbox->lock);
mailbox->acquired = true; mailbox->acquired = true;
scond_signal(mailbox->cond); scond_signal(mailbox->cond);
slock_unlock(mailbox->lock); slock_unlock(mailbox->lock);
} }
else
vkResetFences(mailbox->device, 1, &fence);
} }
vkDestroyFence(mailbox->device, fence, NULL); vkDestroyFence(mailbox->device, fence, NULL);
@ -263,18 +268,24 @@ static bool vulkan_emulated_mailbox_init(
VkDevice device, VkDevice device,
VkSwapchainKHR swapchain) VkSwapchainKHR swapchain)
{ {
memset(mailbox, 0, sizeof(*mailbox)); mailbox->thread = NULL;
mailbox->device = device; mailbox->lock = NULL;
mailbox->swapchain = swapchain; mailbox->cond = NULL;
mailbox->device = device;
mailbox->swapchain = swapchain;
mailbox->index = 0;
mailbox->result = VK_SUCCESS;
mailbox->acquired = false;
mailbox->request_acquire = false;
mailbox->dead = false;
mailbox->has_pending_request = false;
mailbox->cond = scond_new(); if (!(mailbox->cond = scond_new()))
if (!mailbox->cond)
return false; return false;
mailbox->lock = slock_new(); if (!(mailbox->lock = slock_new()))
if (!mailbox->lock)
return false; return false;
mailbox->thread = sthread_create(vulkan_emulated_mailbox_loop, mailbox); if (!(mailbox->thread = sthread_create(vulkan_emulated_mailbox_loop,
if (!mailbox->thread) mailbox)))
return false; return false;
return true; return true;
} }
@ -968,9 +979,9 @@ void vulkan_draw_triangles(vk_t *vk, const struct vk_draw_triangles *call)
/* Changing pipeline invalidates dynamic state. */ /* Changing pipeline invalidates dynamic state. */
vk->tracker.dirty |= VULKAN_DIRTY_DYNAMIC_BIT; vk->tracker.dirty |= VULKAN_DIRTY_DYNAMIC_BIT;
vulkan_check_dynamic_state(vk);
} }
else if (vk->tracker.dirty & VULKAN_DIRTY_DYNAMIC_BIT)
if (vk->tracker.dirty & VULKAN_DIRTY_DYNAMIC_BIT)
vulkan_check_dynamic_state(vk); vulkan_check_dynamic_state(vk);
/* Upload descriptors */ /* Upload descriptors */
@ -1033,9 +1044,9 @@ void vulkan_draw_quad(vk_t *vk, const struct vk_draw_quad *quad)
vk->tracker.pipeline = quad->pipeline; vk->tracker.pipeline = quad->pipeline;
/* Changing pipeline invalidates dynamic state. */ /* Changing pipeline invalidates dynamic state. */
vk->tracker.dirty |= VULKAN_DIRTY_DYNAMIC_BIT; vk->tracker.dirty |= VULKAN_DIRTY_DYNAMIC_BIT;
vulkan_check_dynamic_state(vk);
} }
else if (vk->tracker.dirty & VULKAN_DIRTY_DYNAMIC_BIT)
if (vk->tracker.dirty & VULKAN_DIRTY_DYNAMIC_BIT)
vulkan_check_dynamic_state(vk); vulkan_check_dynamic_state(vk);
/* Upload descriptors */ /* Upload descriptors */
@ -1134,8 +1145,8 @@ struct vk_buffer vulkan_create_buffer(
alloc.memoryTypeIndex = vulkan_find_memory_type( alloc.memoryTypeIndex = vulkan_find_memory_type(
&context->memory_properties, &context->memory_properties,
mem_reqs.memoryTypeBits, mem_reqs.memoryTypeBits,
VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT | VK_MEMORY_PROPERTY_HOST_VISIBLE_BIT
VK_MEMORY_PROPERTY_HOST_COHERENT_BIT); | VK_MEMORY_PROPERTY_HOST_COHERENT_BIT);
vkAllocateMemory(context->device, &alloc, NULL, &buffer.memory); vkAllocateMemory(context->device, &alloc, NULL, &buffer.memory);
vkBindBufferMemory(context->device, buffer.buffer, buffer.memory, 0); vkBindBufferMemory(context->device, buffer.buffer, buffer.memory, 0);
@ -1170,17 +1181,17 @@ static struct vk_descriptor_pool *vulkan_alloc_descriptor_pool(
if (!pool) if (!pool)
return NULL; return NULL;
pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO; pool_info.sType = VK_STRUCTURE_TYPE_DESCRIPTOR_POOL_CREATE_INFO;
pool_info.pNext = NULL; pool_info.pNext = NULL;
pool_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT; pool_info.flags = VK_DESCRIPTOR_POOL_CREATE_FREE_DESCRIPTOR_SET_BIT;
pool_info.maxSets = VULKAN_DESCRIPTOR_MANAGER_BLOCK_SETS; pool_info.maxSets = VULKAN_DESCRIPTOR_MANAGER_BLOCK_SETS;
pool_info.poolSizeCount = manager->num_sizes; pool_info.poolSizeCount = manager->num_sizes;
pool_info.pPoolSizes = manager->sizes; pool_info.pPoolSizes = manager->sizes;
pool->pool = VK_NULL_HANDLE; pool->pool = VK_NULL_HANDLE;
for (i = 0; i < VULKAN_DESCRIPTOR_MANAGER_BLOCK_SETS; i++) for (i = 0; i < VULKAN_DESCRIPTOR_MANAGER_BLOCK_SETS; i++)
pool->sets[i] = VK_NULL_HANDLE; pool->sets[i] = VK_NULL_HANDLE;
pool->next = NULL; pool->next = NULL;
vkCreateDescriptorPool(device, &pool_info, NULL, &pool->pool); vkCreateDescriptorPool(device, &pool_info, NULL, &pool->pool);
@ -1329,9 +1340,8 @@ bool vulkan_buffer_chain_alloc(const struct vulkan_context *context,
{ {
if (!chain->head) if (!chain->head)
{ {
chain->head = vulkan_buffer_chain_alloc_node(context, if (!(chain->head = vulkan_buffer_chain_alloc_node(context,
chain->block_size, chain->usage); chain->block_size, chain->usage)))
if (!chain->head)
return false; return false;
chain->current = chain->head; chain->current = chain->head;
@ -1356,9 +1366,8 @@ bool vulkan_buffer_chain_alloc(const struct vulkan_context *context,
if (size < chain->block_size) if (size < chain->block_size)
size = chain->block_size; size = chain->block_size;
chain->current->next = vulkan_buffer_chain_alloc_node( if (!(chain->current->next = vulkan_buffer_chain_alloc_node(
context, size, chain->usage); context, size, chain->usage)))
if (!chain->current->next)
return false; return false;
vulkan_buffer_chain_step(chain); vulkan_buffer_chain_step(chain);
@ -1441,8 +1450,8 @@ static bool vulkan_find_instance_extensions(const char **exts, unsigned num_exts
if (vkEnumerateInstanceExtensionProperties(NULL, &property_count, NULL) != VK_SUCCESS) if (vkEnumerateInstanceExtensionProperties(NULL, &property_count, NULL) != VK_SUCCESS)
return false; return false;
properties = (VkExtensionProperties*)malloc(property_count * sizeof(*properties)); if (!(properties = (VkExtensionProperties*)malloc(property_count *
if (!properties) sizeof(*properties))))
{ {
ret = false; ret = false;
goto end; goto end;
@ -1479,8 +1488,8 @@ static bool vulkan_find_device_extensions(VkPhysicalDevice gpu,
if (vkEnumerateDeviceExtensionProperties(gpu, NULL, &property_count, NULL) != VK_SUCCESS) if (vkEnumerateDeviceExtensionProperties(gpu, NULL, &property_count, NULL) != VK_SUCCESS)
return false; return false;
properties = (VkExtensionProperties*)malloc(property_count * sizeof(*properties)); if (!(properties = (VkExtensionProperties*)malloc(property_count *
if (!properties) sizeof(*properties))))
{ {
ret = false; ret = false;
goto end; goto end;
@ -1527,8 +1536,7 @@ static bool vulkan_context_init_gpu(gfx_ctx_vulkan_data_t *vk)
return false; return false;
} }
gpus = (VkPhysicalDevice*)calloc(gpu_count, sizeof(*gpus)); if (!(gpus = (VkPhysicalDevice*)calloc(gpu_count, sizeof(*gpus))))
if (!gpus)
{ {
RARCH_ERR("[Vulkan]: Failed to enumerate physical devices.\n"); RARCH_ERR("[Vulkan]: Failed to enumerate physical devices.\n");
return false; return false;
@ -1744,8 +1752,8 @@ static bool vulkan_context_init_device(gfx_ctx_vulkan_data_t *vk)
return false; return false;
} }
queue_properties = (VkQueueFamilyProperties*)malloc(queue_count * sizeof(*queue_properties)); if (!(queue_properties = (VkQueueFamilyProperties*)malloc(queue_count *
if (!queue_properties) sizeof(*queue_properties))))
return false; return false;
vkGetPhysicalDeviceQueueFamilyProperties(vk->context.gpu, vkGetPhysicalDeviceQueueFamilyProperties(vk->context.gpu,
@ -1753,13 +1761,11 @@ static bool vulkan_context_init_device(gfx_ctx_vulkan_data_t *vk)
for (i = 0; i < queue_count; i++) for (i = 0; i < queue_count; i++)
{ {
VkQueueFlags required; VkQueueFlags required = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT;
VkBool32 supported = VK_FALSE; VkBool32 supported = VK_FALSE;
vkGetPhysicalDeviceSurfaceSupportKHR( vkGetPhysicalDeviceSurfaceSupportKHR(
vk->context.gpu, i, vk->context.gpu, i,
vk->vk_surface, &supported); vk->vk_surface, &supported);
required = VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT;
if (supported && ((queue_properties[i].queueFlags & required) == required)) if (supported && ((queue_properties[i].queueFlags & required) == required))
{ {
vk->context.graphics_queue_index = i; vk->context.graphics_queue_index = i;
@ -1778,12 +1784,11 @@ static bool vulkan_context_init_device(gfx_ctx_vulkan_data_t *vk)
return false; return false;
} }
use_device_ext = vulkan_find_device_extensions(vk->context.gpu, if (!(use_device_ext = vulkan_find_device_extensions(vk->context.gpu,
enabled_device_extensions, &enabled_device_extension_count, enabled_device_extensions, &enabled_device_extension_count,
device_extensions, ARRAY_SIZE(device_extensions), device_extensions, ARRAY_SIZE(device_extensions),
optional_device_extensions, ARRAY_SIZE(optional_device_extensions)); optional_device_extensions,
ARRAY_SIZE(optional_device_extensions))))
if (!use_device_ext)
{ {
RARCH_ERR("[Vulkan]: Could not find required device extensions.\n"); RARCH_ERR("[Vulkan]: Could not find required device extensions.\n");
return false; return false;
@ -1842,20 +1847,17 @@ bool vulkan_context_init(gfx_ctx_vulkan_data_t *vk,
unsigned i; unsigned i;
VkResult res; VkResult res;
PFN_vkGetInstanceProcAddr GetInstanceProcAddr; PFN_vkGetInstanceProcAddr GetInstanceProcAddr;
VkInstanceCreateInfo info = { VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO };
VkApplicationInfo app = { VK_STRUCTURE_TYPE_APPLICATION_INFO };
const char *instance_extensions[4]; const char *instance_extensions[4];
unsigned ext_count = 0; bool use_instance_ext = false;
VkInstanceCreateInfo info = { VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO };
#ifdef VULKAN_DEBUG VkApplicationInfo app = { VK_STRUCTURE_TYPE_APPLICATION_INFO };
instance_extensions[ext_count++] = "VK_EXT_debug_report"; unsigned ext_count = 0;
static const char *instance_layers[] = { "VK_LAYER_KHRONOS_validation" };
#endif
bool use_instance_ext;
struct retro_hw_render_context_negotiation_interface_vulkan *iface = struct retro_hw_render_context_negotiation_interface_vulkan *iface =
(struct retro_hw_render_context_negotiation_interface_vulkan*)video_driver_get_context_negotiation_interface(); (struct retro_hw_render_context_negotiation_interface_vulkan*)video_driver_get_context_negotiation_interface();
#ifdef VULKAN_DEBUG
static const char *instance_layers[] = { "VK_LAYER_KHRONOS_validation" };
instance_extensions[ext_count++] = "VK_EXT_debug_report";
#endif
if (iface && iface->interface_type != RETRO_HW_RENDER_CONTEXT_NEGOTIATION_INTERFACE_VULKAN) if (iface && iface->interface_type != RETRO_HW_RENDER_CONTEXT_NEGOTIATION_INTERFACE_VULKAN)
{ {
@ -1962,6 +1964,7 @@ bool vulkan_context_init(gfx_ctx_vulkan_data_t *vk,
if (iface && iface->get_application_info) if (iface && iface->get_application_info)
{ {
info.pApplicationInfo = iface->get_application_info(); info.pApplicationInfo = iface->get_application_info();
#ifdef VULKAN_DEBUG
if (info.pApplicationInfo->pApplicationName) if (info.pApplicationInfo->pApplicationName)
{ {
RARCH_LOG("[Vulkan]: App: %s (version %u)\n", RARCH_LOG("[Vulkan]: App: %s (version %u)\n",
@ -1975,6 +1978,7 @@ bool vulkan_context_init(gfx_ctx_vulkan_data_t *vk,
info.pApplicationInfo->pEngineName, info.pApplicationInfo->pEngineName,
info.pApplicationInfo->engineVersion); info.pApplicationInfo->engineVersion);
} }
#endif
} }
if (cached_instance_vk) if (cached_instance_vk)
@ -2097,10 +2101,6 @@ static bool vulkan_create_display_surface(gfx_ctx_vulkan_data_t *vk,
unsigned saved_width = *width; unsigned saved_width = *width;
unsigned saved_height = *height; unsigned saved_height = *height;
/* We need to decide on GPU here to be able to query support. */
if (!vulkan_context_init_gpu(vk))
return false;
VULKAN_SYMBOL_WRAPPER_LOAD_INSTANCE_EXTENSION_SYMBOL(vk->context.instance, VULKAN_SYMBOL_WRAPPER_LOAD_INSTANCE_EXTENSION_SYMBOL(vk->context.instance,
vkGetPhysicalDeviceDisplayPropertiesKHR); vkGetPhysicalDeviceDisplayPropertiesKHR);
VULKAN_SYMBOL_WRAPPER_LOAD_INSTANCE_EXTENSION_SYMBOL(vk->context.instance, VULKAN_SYMBOL_WRAPPER_LOAD_INSTANCE_EXTENSION_SYMBOL(vk->context.instance,
@ -2183,14 +2183,14 @@ retry:
for (i = 0; i < plane_count; i++) for (i = 0; i < plane_count; i++)
{ {
uint32_t supported_count = 0; uint32_t supported_count = 0;
VkDisplayKHR *supported = NULL; VkDisplayKHR *supported = NULL;
VkDisplayPlaneCapabilitiesKHR plane_caps; VkDisplayPlaneCapabilitiesKHR plane_caps;
vkGetDisplayPlaneSupportedDisplaysKHR(vk->context.gpu, i, &supported_count, NULL); vkGetDisplayPlaneSupportedDisplaysKHR(vk->context.gpu, i, &supported_count, NULL);
if (!supported_count) if (!supported_count)
continue; continue;
supported = (VkDisplayKHR*)calloc(supported_count, sizeof(*supported)); if (!(supported = (VkDisplayKHR*)calloc(supported_count,
if (!supported) sizeof(*supported))))
GOTO_FAIL(); GOTO_FAIL();
vkGetDisplayPlaneSupportedDisplaysKHR(vk->context.gpu, i, &supported_count, vkGetDisplayPlaneSupportedDisplaysKHR(vk->context.gpu, i, &supported_count,
@ -2212,8 +2212,8 @@ retry:
if (j == supported_count) if (j == supported_count)
continue; continue;
if (planes[i].currentDisplay == VK_NULL_HANDLE || if ( planes[i].currentDisplay == VK_NULL_HANDLE
planes[i].currentDisplay == display) || planes[i].currentDisplay == display)
best_plane = j; best_plane = j;
else else
continue; continue;
@ -2248,13 +2248,13 @@ out:
if (best_plane == UINT32_MAX) if (best_plane == UINT32_MAX)
GOTO_FAIL(); GOTO_FAIL();
create_info.displayMode = best_mode; create_info.displayMode = best_mode;
create_info.planeIndex = best_plane; create_info.planeIndex = best_plane;
create_info.planeStackIndex = planes[best_plane].currentStackIndex; create_info.planeStackIndex = planes[best_plane].currentStackIndex;
create_info.transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR; create_info.transform = VK_SURFACE_TRANSFORM_IDENTITY_BIT_KHR;
create_info.globalAlpha = 1.0f; create_info.globalAlpha = 1.0f;
create_info.alphaMode = alpha_mode; create_info.alphaMode = alpha_mode;
create_info.imageExtent.width = *width; create_info.imageExtent.width = *width;
create_info.imageExtent.height = *height; create_info.imageExtent.height = *height;
if (vkCreateDisplayPlaneSurfaceKHR(vk->context.instance, if (vkCreateDisplayPlaneSurfaceKHR(vk->context.instance,
@ -2413,12 +2413,13 @@ bool vulkan_surface_create(gfx_ctx_vulkan_data_t *vk,
#endif #endif
break; break;
case VULKAN_WSI_DISPLAY: case VULKAN_WSI_DISPLAY:
{ /* We need to decide on GPU here to be able to query support. */
if (!vulkan_create_display_surface(vk, if (!vulkan_context_init_gpu(vk))
&width, &height, return false;
(const struct vulkan_display_surface_info*)display)) if (!vulkan_create_display_surface(vk,
return false; &width, &height,
} (const struct vulkan_display_surface_info*)display))
return false;
break; break;
case VULKAN_WSI_MVK_MACOS: case VULKAN_WSI_MVK_MACOS:
#ifdef HAVE_COCOA #ifdef HAVE_COCOA
@ -2546,6 +2547,7 @@ void vulkan_present(gfx_ctx_vulkan_data_t *vk, unsigned index)
#endif #endif
err = vkQueuePresentKHR(vk->context.queue, &present); err = vkQueuePresentKHR(vk->context.queue, &present);
#ifdef ANDROID
/* VK_SUBOPTIMAL_KHR can be returned on /* VK_SUBOPTIMAL_KHR can be returned on
* Android 10 when prerotate is not dealt with. * Android 10 when prerotate is not dealt with.
* This is not an error we need to care about, * This is not an error we need to care about,
@ -2554,6 +2556,7 @@ void vulkan_present(gfx_ctx_vulkan_data_t *vk, unsigned index)
result = VK_SUCCESS; result = VK_SUCCESS;
if (err == VK_SUBOPTIMAL_KHR) if (err == VK_SUBOPTIMAL_KHR)
err = VK_SUCCESS; err = VK_SUCCESS;
#endif
#ifdef WSI_HARDENING_TEST #ifdef WSI_HARDENING_TEST
trigger_spurious_error_vkresult(&err); trigger_spurious_error_vkresult(&err);
@ -2682,29 +2685,28 @@ static VkSemaphore vulkan_get_wsi_acquire_semaphore(struct vulkan_context *ctx)
static void vulkan_acquire_wait_fences(gfx_ctx_vulkan_data_t *vk) static void vulkan_acquire_wait_fences(gfx_ctx_vulkan_data_t *vk)
{ {
unsigned index; unsigned index;
VkFenceCreateInfo fence_info;
VkFence *next_fence = NULL; VkFence *next_fence = NULL;
fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_info.pNext = NULL;
fence_info.flags = 0;
/* Decouples the frame fence index from swapchain index. */ /* Decouples the frame fence index from swapchain index. */
vk->context.current_frame_index = vk->context.current_frame_index =
(vk->context.current_frame_index + 1) % (vk->context.current_frame_index + 1) %
vk->context.num_swapchain_images; vk->context.num_swapchain_images;
index = vk->context.current_frame_index; index = vk->context.current_frame_index;
next_fence = &vk->context.swapchain_fences[index]; if (*(next_fence = &vk->context.swapchain_fences[index]) != VK_NULL_HANDLE)
if (*next_fence != VK_NULL_HANDLE)
{ {
if (vk->context.swapchain_fences_signalled[index]) if (vk->context.swapchain_fences_signalled[index])
vkWaitForFences(vk->context.device, 1, next_fence, true, UINT64_MAX); vkWaitForFences(vk->context.device, 1, next_fence, true, UINT64_MAX);
vkResetFences(vk->context.device, 1, next_fence); vkResetFences(vk->context.device, 1, next_fence);
} }
else else
{
VkFenceCreateInfo fence_info;
fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_info.pNext = NULL;
fence_info.flags = 0;
vkCreateFence(vk->context.device, &fence_info, NULL, next_fence); vkCreateFence(vk->context.device, &fence_info, NULL, next_fence);
}
vk->context.swapchain_fences_signalled[index] = false; vk->context.swapchain_fences_signalled[index] = false;
if (vk->context.swapchain_wait_semaphores[index] != VK_NULL_HANDLE) if (vk->context.swapchain_wait_semaphores[index] != VK_NULL_HANDLE)
@ -2714,10 +2716,13 @@ static void vulkan_acquire_wait_fences(gfx_ctx_vulkan_data_t *vk)
static void vulkan_create_wait_fences(gfx_ctx_vulkan_data_t *vk) static void vulkan_create_wait_fences(gfx_ctx_vulkan_data_t *vk)
{ {
VkFenceCreateInfo fence_info =
{ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO };
unsigned i; unsigned i;
VkFenceCreateInfo fence_info;
fence_info.sType = VK_STRUCTURE_TYPE_FENCE_CREATE_INFO;
fence_info.pNext = NULL;
fence_info.flags = 0;
for (i = 0; i < vk->context.num_swapchain_images; i++) for (i = 0; i < vk->context.num_swapchain_images; i++)
{ {
if (!vk->context.swapchain_fences[i]) if (!vk->context.swapchain_fences[i])
@ -2795,7 +2800,6 @@ retry:
err = vkAcquireNextImageKHR(vk->context.device, err = vkAcquireNextImageKHR(vk->context.device,
vk->swapchain, UINT64_MAX, vk->swapchain, UINT64_MAX,
semaphore, fence, &vk->context.current_swapchain_index); semaphore, fence, &vk->context.current_swapchain_index);
#ifdef ANDROID #ifdef ANDROID
/* VK_SUBOPTIMAL_KHR can be returned on Android 10 /* VK_SUBOPTIMAL_KHR can be returned on Android 10
* when prerotate is not dealt with. * when prerotate is not dealt with.