Merge pull request #7191 from Themaister/master

Vulkan: Maintenance fixes
This commit is contained in:
Twinaphex 2018-09-08 21:50:28 +02:00 committed by GitHub
commit 6ea292f2f5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 341 additions and 94 deletions

View File

@ -38,6 +38,14 @@
#include "../../libretro-common/include/retro_math.h"
#include "../../libretro-common/include/string/stdstring.h"
#define VENDOR_ID_AMD 0x1002
#define VENDOR_ID_NV 0x10DE
#define VENDOR_ID_INTEL 0x8086
#ifdef _WIN32
#define VULKAN_EMULATE_MAILBOX
#endif
static dylib_t vulkan_library;
static VkInstance cached_instance_vk;
static VkDevice cached_device_vk;
@ -88,6 +96,7 @@ static VKAPI_ATTR VkBool32 VKAPI_CALL vulkan_debug_cb(
RARCH_ERR("[Vulkan]: Error: %s: %s\n",
pLayerPrefix, pMessage);
}
#if 0
else if (flags & VK_DEBUG_REPORT_WARNING_BIT_EXT)
{
RARCH_WARN("[Vulkan]: Warning: %s: %s\n",
@ -103,11 +112,126 @@ static VKAPI_ATTR VkBool32 VKAPI_CALL vulkan_debug_cb(
RARCH_LOG("[Vulkan]: Information: %s: %s\n",
pLayerPrefix, pMessage);
}
#endif
return VK_FALSE;
}
#endif
void vulkan_emulated_mailbox_deinit(struct vulkan_emulated_mailbox *mailbox)
{
if (mailbox->thread)
{
slock_lock(mailbox->lock);
mailbox->dead = true;
scond_signal(mailbox->cond);
slock_unlock(mailbox->lock);
sthread_join(mailbox->thread);
}
if (mailbox->lock)
slock_free(mailbox->lock);
if (mailbox->cond)
scond_free(mailbox->cond);
memset(mailbox, 0, sizeof(*mailbox));
}
VkResult vulkan_emulated_mailbox_acquire_next_image(struct vulkan_emulated_mailbox *mailbox,
unsigned *index)
{
VkResult res;
if (mailbox->swapchain == VK_NULL_HANDLE)
return VK_ERROR_OUT_OF_DATE_KHR;
slock_lock(mailbox->lock);
if (!mailbox->has_pending_request)
{
mailbox->request_acquire = true;
scond_signal(mailbox->cond);
}
mailbox->has_pending_request = true;
if (mailbox->acquired)
{
res = mailbox->result;
*index = mailbox->index;
mailbox->has_pending_request = false;
mailbox->acquired = false;
}
else
res = VK_TIMEOUT;
slock_unlock(mailbox->lock);
return res;
}
static void vulkan_emulated_mailbox_loop(void *userdata)
{
VkResult res;
VkFence fence;
VkFenceCreateInfo info = { VK_STRUCTURE_TYPE_FENCE_CREATE_INFO };
struct vulkan_emulated_mailbox *mailbox =
(struct vulkan_emulated_mailbox *)userdata;
vkCreateFence(mailbox->device, &info, NULL, &fence);
for (;;)
{
slock_lock(mailbox->lock);
while (!mailbox->dead && !mailbox->request_acquire)
scond_wait(mailbox->cond, mailbox->lock);
if (mailbox->dead)
{
slock_unlock(mailbox->lock);
break;
}
mailbox->request_acquire = false;
slock_unlock(mailbox->lock);
mailbox->result = vkAcquireNextImageKHR(mailbox->device, mailbox->swapchain, UINT64_MAX,
VK_NULL_HANDLE, fence, &mailbox->index);
if (mailbox->result == VK_SUCCESS)
vkWaitForFences(mailbox->device, 1, &fence, true, UINT64_MAX);
vkResetFences(mailbox->device, 1, &fence);
if (mailbox->result == VK_SUCCESS)
{
slock_lock(mailbox->lock);
mailbox->acquired = true;
scond_signal(mailbox->cond);
slock_unlock(mailbox->lock);
}
}
vkDestroyFence(mailbox->device, fence, NULL);
}
bool vulkan_emulated_mailbox_init(struct vulkan_emulated_mailbox *mailbox,
VkDevice device,
VkSwapchainKHR swapchain)
{
memset(mailbox, 0, sizeof(*mailbox));
mailbox->device = device;
mailbox->swapchain = swapchain;
mailbox->cond = scond_new();
if (!mailbox->cond)
return false;
mailbox->lock = slock_new();
if (!mailbox->lock)
return false;
mailbox->thread = sthread_create(vulkan_emulated_mailbox_loop, mailbox);
if (!mailbox->thread)
return false;
return true;
}
uint32_t vulkan_find_memory_type(
const VkPhysicalDeviceMemoryProperties *mem_props,
uint32_t device_reqs, uint32_t host_reqs)
@ -184,13 +308,12 @@ void vulkan_copy_staging_to_dynamic(vk_t *vk, VkCommandBuffer cmd,
struct vk_texture *dynamic,
struct vk_texture *staging)
{
VkImageCopy region;
VkBufferImageCopy region;
retro_assert(dynamic->type == VULKAN_TEXTURE_DYNAMIC);
retro_assert(staging->type == VULKAN_TEXTURE_STAGING);
vulkan_sync_texture_to_gpu(vk, staging);
vulkan_transition_texture(vk, cmd, staging);
/* We don't have to sync against previous TRANSFER,
* since we observed the completion by fences.
@ -208,15 +331,14 @@ void vulkan_copy_staging_to_dynamic(vk_t *vk, VkCommandBuffer cmd,
VK_PIPELINE_STAGE_TRANSFER_BIT);
memset(&region, 0, sizeof(region));
region.extent.width = dynamic->width;
region.extent.height = dynamic->height;
region.extent.depth = 1;
region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.srcSubresource.layerCount = 1;
region.dstSubresource = region.srcSubresource;
region.imageExtent.width = dynamic->width;
region.imageExtent.height = dynamic->height;
region.imageExtent.depth = 1;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.layerCount = 1;
vkCmdCopyImage(cmd,
staging->image, VK_IMAGE_LAYOUT_GENERAL,
vkCmdCopyBufferToImage(cmd,
staging->buffer,
dynamic->image, VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1, &region);
@ -323,6 +445,7 @@ struct vk_texture vulkan_create_texture(vk_t *vk,
VkSubresourceLayout layout;
VkDevice device = vk->context->device;
VkImageCreateInfo info = { VK_STRUCTURE_TYPE_IMAGE_CREATE_INFO };
VkBufferCreateInfo buffer_info = { VK_STRUCTURE_TYPE_BUFFER_CREATE_INFO };
VkImageViewCreateInfo view = { VK_STRUCTURE_TYPE_IMAGE_VIEW_CREATE_INFO };
VkMemoryAllocateInfo alloc = { VK_STRUCTURE_TYPE_MEMORY_ALLOCATE_INFO };
VkImageSubresource subresource = { VK_IMAGE_ASPECT_COLOR_BIT };
@ -338,6 +461,10 @@ struct vk_texture vulkan_create_texture(vk_t *vk,
info.extent.height = height;
info.extent.depth = 1;
info.arrayLayers = 1;
info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
buffer_info.size = width * height * vulkan_format_to_bpp(format);
buffer_info.sharingMode = VK_SHARING_MODE_EXCLUSIVE;
/* For simplicity, always build mipmaps for
* static textures, samplers can be used to enable it dynamically.
@ -355,7 +482,7 @@ struct vk_texture vulkan_create_texture(vk_t *vk,
if (type == VULKAN_TEXTURE_STREAMED)
{
VkFormatProperties format_properties;
VkFormatFeatureFlags required = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
const VkFormatFeatureFlags required = VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT |
VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT;
vkGetPhysicalDeviceFormatProperties(
@ -396,23 +523,33 @@ struct vk_texture vulkan_create_texture(vk_t *vk,
break;
case VULKAN_TEXTURE_STAGING:
info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
info.initialLayout = VK_IMAGE_LAYOUT_GENERAL;
info.tiling = VK_IMAGE_TILING_LINEAR;
info.initialLayout = VK_IMAGE_LAYOUT_PREINITIALIZED;
break;
case VULKAN_TEXTURE_READBACK:
info.usage = VK_IMAGE_USAGE_TRANSFER_DST_BIT;
buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_DST_BIT;
info.initialLayout = VK_IMAGE_LAYOUT_GENERAL;
info.tiling = VK_IMAGE_TILING_LINEAR;
info.initialLayout = VK_IMAGE_LAYOUT_UNDEFINED;
break;
}
vkCreateImage(device, &info, NULL, &tex.image);
if (type != VULKAN_TEXTURE_STAGING && type != VULKAN_TEXTURE_READBACK)
{
vkCreateImage(device, &info, NULL, &tex.image);
#if 0
vulkan_track_alloc(tex.image);
vulkan_track_alloc(tex.image);
#endif
vkGetImageMemoryRequirements(device, tex.image, &mem_reqs);
vkGetImageMemoryRequirements(device, tex.image, &mem_reqs);
}
else
{
/* Linear staging textures are not guaranteed to be supported,
* use buffers instead. */
vkCreateBuffer(device, &buffer_info, NULL, &tex.buffer);
vkGetBufferMemoryRequirements(device, tex.buffer, &mem_reqs);
}
alloc.allocationSize = mem_reqs.size;
switch (type)
@ -449,13 +586,14 @@ struct vk_texture vulkan_create_texture(vk_t *vk,
{
/* Recreate texture but for STAGING this time ... */
RARCH_LOG("[Vulkan]: GPU supports linear images as textures, but not DEVICE_LOCAL. Falling back to copy path.\n");
type = VULKAN_TEXTURE_STAGING;
type = VULKAN_TEXTURE_STAGING;
vkDestroyImage(device, tex.image, NULL);
tex.image = NULL;
info.initialLayout = VK_IMAGE_LAYOUT_GENERAL;
info.usage = VK_IMAGE_USAGE_TRANSFER_SRC_BIT;
vkCreateImage(device, &info, NULL, &tex.image);
vkGetImageMemoryRequirements(device, tex.image, &mem_reqs);
buffer_info.usage = VK_BUFFER_USAGE_TRANSFER_SRC_BIT;
vkCreateBuffer(device, &buffer_info, NULL, &tex.buffer);
vkGetBufferMemoryRequirements(device, tex.buffer, &mem_reqs);
alloc.allocationSize = mem_reqs.size;
alloc.memoryTypeIndex = vulkan_find_memory_type_fallback(
@ -478,6 +616,8 @@ struct vk_texture vulkan_create_texture(vk_t *vk,
vulkan_track_dealloc(old->image);
#endif
}
if (old && old->buffer != VK_NULL_HANDLE)
vkDestroyBuffer(vk->context->device, old->buffer, NULL);
/* We can pilfer the old memory and move it over to the new texture. */
if (old &&
@ -507,7 +647,10 @@ struct vk_texture vulkan_create_texture(vk_t *vk,
memset(old, 0, sizeof(*old));
}
vkBindImageMemory(device, tex.image, tex.memory, 0);
if (tex.image)
vkBindImageMemory(device, tex.image, tex.memory, 0);
if (tex.buffer)
vkBindBufferMemory(device, tex.buffer, tex.memory, 0);
if (type != VULKAN_TEXTURE_STAGING && type != VULKAN_TEXTURE_READBACK)
{
@ -532,8 +675,14 @@ struct vk_texture vulkan_create_texture(vk_t *vk,
else
tex.view = VK_NULL_HANDLE;
if (info.tiling == VK_IMAGE_TILING_LINEAR)
if (tex.image && info.tiling == VK_IMAGE_TILING_LINEAR)
vkGetImageSubresourceLayout(device, tex.image, &subresource, &layout);
else if (tex.buffer)
{
layout.offset = 0;
layout.size = buffer_info.size;
layout.rowPitch = width * vulkan_format_to_bpp(format);
}
else
memset(&layout, 0, sizeof(layout));
@ -568,7 +717,7 @@ struct vk_texture vulkan_create_texture(vk_t *vk,
}
else if (initial && type == VULKAN_TEXTURE_STATIC)
{
VkImageCopy region;
VkBufferImageCopy region;
VkCommandBuffer staging;
struct vk_texture tmp = vulkan_create_texture(vk, NULL,
width, height, format, initial, NULL, VULKAN_TEXTURE_STAGING);
@ -583,12 +732,6 @@ struct vk_texture vulkan_create_texture(vk_t *vk,
vkBeginCommandBuffer(staging, &begin_info);
vulkan_image_layout_transition(vk, staging, tmp.image,
VK_IMAGE_LAYOUT_PREINITIALIZED, VK_IMAGE_LAYOUT_GENERAL,
VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
/* If doing mipmapping on upload, keep in general so we can easily do transfers to
* and transfers from the images without having to
* mess around with lots of extra transitions at per-level granularity.
@ -603,16 +746,14 @@ struct vk_texture vulkan_create_texture(vk_t *vk,
VK_PIPELINE_STAGE_TRANSFER_BIT);
memset(&region, 0, sizeof(region));
region.extent.width = width;
region.extent.height = height;
region.extent.depth = 1;
region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.srcSubresource.layerCount = 1;
region.dstSubresource = region.srcSubresource;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.layerCount = 1;
region.imageExtent.width = width;
region.imageExtent.height = height;
region.imageExtent.depth = 1;
vkCmdCopyImage(staging,
tmp.image,
VK_IMAGE_LAYOUT_GENERAL,
vkCmdCopyBufferToImage(staging,
tmp.buffer,
tex.image,
tex.mipmap ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1, &region);
@ -710,12 +851,18 @@ void vulkan_destroy_texture(
{
if (tex->mapped)
vkUnmapMemory(device, tex->memory);
vkFreeMemory(device, tex->memory, NULL);
if (tex->view)
vkDestroyImageView(device, tex->view, NULL);
vkDestroyImage(device, tex->image, NULL);
if (tex->image)
vkDestroyImage(device, tex->image, NULL);
if (tex->buffer)
vkDestroyBuffer(device, tex->buffer, NULL);
if (tex->memory)
vkFreeMemory(device, tex->memory, NULL);
#ifdef VULKAN_DEBUG_TEXTURE_ALLOC
vulkan_track_dealloc(tex->image);
if (tex->image)
vulkan_track_dealloc(tex->image);
#endif
memset(tex, 0, sizeof(*tex));
}
@ -762,6 +909,9 @@ static void vulkan_write_quad_descriptors(
void vulkan_transition_texture(vk_t *vk, VkCommandBuffer cmd, struct vk_texture *texture)
{
if (!texture->image)
return;
/* Transition to GENERAL layout for linear streamed textures.
* We're using linear textures here, so only
* GENERAL layout is supported.
@ -782,14 +932,6 @@ void vulkan_transition_texture(vk_t *vk, VkCommandBuffer cmd, struct vk_texture
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
break;
case VULKAN_TEXTURE_STAGING:
vulkan_image_layout_transition(vk, cmd, texture->image,
texture->layout, VK_IMAGE_LAYOUT_GENERAL,
VK_ACCESS_HOST_WRITE_BIT, VK_ACCESS_TRANSFER_READ_BIT,
VK_PIPELINE_STAGE_HOST_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
break;
default:
retro_assert(0 && "Attempting to transition invalid texture type.\n");
break;
@ -1550,6 +1692,12 @@ static bool vulkan_context_init_device(gfx_ctx_vulkan_data_t *vk)
vkGetPhysicalDeviceMemoryProperties(vk->context.gpu,
&vk->context.memory_properties);
#ifdef VULKAN_EMULATE_MAILBOX
/* Win32 windowed mode seems to deal just fine with toggling VSync.
* Fullscreen however ... */
vk->emulate_mailbox = vk->fullscreen;
#endif
RARCH_LOG("[Vulkan]: Using GPU: %s\n", vk->context.gpu_properties.deviceName);
if (vk->context.device == VK_NULL_HANDLE)
@ -2309,12 +2457,14 @@ static void vulkan_destroy_swapchain(gfx_ctx_vulkan_data_t *vk)
{
unsigned i;
vulkan_emulated_mailbox_deinit(&vk->mailbox);
if (vk->swapchain != VK_NULL_HANDLE)
{
vkDeviceWaitIdle(vk->context.device);
vkDestroySwapchainKHR(vk->context.device, vk->swapchain, NULL);
memset(vk->context.swapchain_images, 0, sizeof(vk->context.swapchain_images));
vk->swapchain = VK_NULL_HANDLE;
vk->context.has_acquired_swapchain = false;
}
for (i = 0; i < VULKAN_MAX_SWAPCHAIN_IMAGES; i++)
@ -2333,9 +2483,13 @@ static void vulkan_destroy_swapchain(gfx_ctx_vulkan_data_t *vk)
void vulkan_present(gfx_ctx_vulkan_data_t *vk, unsigned index)
{
VkPresentInfoKHR present = { VK_STRUCTURE_TYPE_PRESENT_INFO_KHR };
VkResult result = VK_SUCCESS;
VkResult err = VK_SUCCESS;
VkPresentInfoKHR present = { VK_STRUCTURE_TYPE_PRESENT_INFO_KHR };
VkResult result = VK_SUCCESS;
VkResult err = VK_SUCCESS;
if (!vk->context.has_acquired_swapchain)
return;
vk->context.has_acquired_swapchain = false;
/* We're still waiting for a proper swapchain, so just fake it. */
if (vk->swapchain == VK_NULL_HANDLE)
@ -2431,8 +2585,8 @@ static void vulkan_acquire_clear_fences(gfx_ctx_vulkan_data_t *vk)
vkDestroyFence(vk->context.device,
vk->context.swapchain_fences[i], NULL);
vk->context.swapchain_fences[i] = VK_NULL_HANDLE;
vk->context.swapchain_fences_signalled[i] = false;
}
vk->context.swapchain_fences_signalled[i] = false;
}
}
@ -2449,10 +2603,26 @@ static void vulkan_acquire_wait_fences(gfx_ctx_vulkan_data_t *vk)
if (vk->context.swapchain_fences_signalled[index])
vkWaitForFences(vk->context.device, 1, next_fence, true, UINT64_MAX);
vkResetFences(vk->context.device, 1, next_fence);
vk->context.swapchain_fences_signalled[index] = false;
}
else
vkCreateFence(vk->context.device, &fence_info, NULL, next_fence);
vk->context.swapchain_fences_signalled[index] = false;
}
static void vulkan_create_wait_fences(gfx_ctx_vulkan_data_t *vk)
{
VkFenceCreateInfo fence_info =
{ VK_STRUCTURE_TYPE_FENCE_CREATE_INFO };
unsigned i;
for (i = 0; i < vk->context.num_swapchain_images; i++)
{
if (!vk->context.swapchain_fences[i])
{
vkCreateFence(vk->context.device, &fence_info, NULL,
&vk->context.swapchain_fences[i]);
}
}
}
void vulkan_acquire_next_image(gfx_ctx_vulkan_data_t *vk)
@ -2489,22 +2659,45 @@ retry:
}
}
vkCreateFence(vk->context.device, &fence_info, NULL, &fence);
err = vkAcquireNextImageKHR(vk->context.device,
vk->swapchain, UINT64_MAX,
VK_NULL_HANDLE, fence, &vk->context.current_swapchain_index);
if (vk->emulating_mailbox)
{
/* Non-blocking acquire. If we don't get a swapchain frame right away,
* just skip rendering to the swapchain this frame, similar to what
* MAILBOX would do. */
err = vulkan_emulated_mailbox_acquire_next_image(&vk->mailbox, &vk->context.current_swapchain_index);
fence = VK_NULL_HANDLE;
}
else
{
vkCreateFence(vk->context.device, &fence_info, NULL, &fence);
err = vkAcquireNextImageKHR(vk->context.device,
vk->swapchain, UINT64_MAX,
VK_NULL_HANDLE, fence, &vk->context.current_swapchain_index);
}
if (err == VK_SUCCESS)
vkWaitForFences(vk->context.device, 1, &fence, true, UINT64_MAX);
{
if (fence != VK_NULL_HANDLE)
vkWaitForFences(vk->context.device, 1, &fence, true, UINT64_MAX);
vk->context.has_acquired_swapchain = true;
}
else
vk->context.has_acquired_swapchain = false;
#ifdef WSI_HARDENING_TEST
trigger_spurious_error_vkresult(&err);
#endif
vkDestroyFence(vk->context.device, fence, NULL);
if (fence != VK_NULL_HANDLE)
vkDestroyFence(vk->context.device, fence, NULL);
if (err == VK_ERROR_OUT_OF_DATE_KHR)
if (err == VK_NOT_READY || err == VK_TIMEOUT)
{
/* Just pretend we have a swapchain index, round-robin style. */
vk->context.current_swapchain_index =
(vk->context.current_swapchain_index + 1) % vk->context.num_swapchain_images;
}
else if (err == VK_ERROR_OUT_OF_DATE_KHR)
{
/* Throw away the old swapchain and try again. */
vulkan_destroy_swapchain(vk);
@ -2567,6 +2760,14 @@ bool vulkan_create_swapchain(gfx_ctx_vulkan_data_t *vk,
vkDeviceWaitIdle(vk->context.device);
vulkan_acquire_clear_fences(vk);
if (swap_interval == 0 && vk->emulate_mailbox)
{
swap_interval = 1;
vk->emulating_mailbox = true;
}
else
vk->emulating_mailbox = false;
vk->created_new_swapchain = true;
if (vk->swapchain != VK_NULL_HANDLE &&
!vk->context.invalid_swapchain &&
@ -2577,9 +2778,17 @@ bool vulkan_create_swapchain(gfx_ctx_vulkan_data_t *vk,
/* Do not bother creating a swapchain redundantly. */
RARCH_LOG("[Vulkan]: Do not need to re-create swapchain.\n");
vk->created_new_swapchain = false;
vulkan_create_wait_fences(vk);
if (vk->emulating_mailbox && vk->mailbox.swapchain == VK_NULL_HANDLE)
vulkan_emulated_mailbox_init(&vk->mailbox, vk->context.device, vk->swapchain);
else if (!vk->emulating_mailbox && vk->mailbox.swapchain != VK_NULL_HANDLE)
vulkan_emulated_mailbox_deinit(&vk->mailbox);
return true;
}
vulkan_emulated_mailbox_deinit(&vk->mailbox);
present_mode_count = 0;
vkGetPhysicalDeviceSurfacePresentModesKHR(
vk->context.gpu, vk->vk_surface,
@ -2814,5 +3023,10 @@ bool vulkan_create_swapchain(gfx_ctx_vulkan_data_t *vk,
/* Force driver to reset swapchain image handles. */
vk->context.invalid_swapchain = true;
vulkan_create_wait_fences(vk);
if (vk->emulating_mailbox)
vulkan_emulated_mailbox_init(&vk->mailbox, vk->context.device, vk->swapchain);
return true;
}

View File

@ -96,6 +96,7 @@ typedef struct vulkan_context
/* Used by screenshot to get blits with correct colorspace. */
bool swapchain_is_srgb;
bool swap_interval_emulation_lock;
bool has_acquired_swapchain;
unsigned swapchain_width;
unsigned swapchain_height;
@ -127,13 +128,41 @@ typedef struct vulkan_context
#endif
} vulkan_context_t;
struct vulkan_emulated_mailbox
{
sthread_t *thread;
VkDevice device;
VkSwapchainKHR swapchain;
slock_t *lock;
scond_t *cond;
unsigned index;
bool acquired;
bool request_acquire;
bool dead;
bool has_pending_request;
VkResult result;
};
bool vulkan_emulated_mailbox_init(struct vulkan_emulated_mailbox *mailbox,
VkDevice device, VkSwapchainKHR swapchain);
void vulkan_emulated_mailbox_deinit(struct vulkan_emulated_mailbox *mailbox);
VkResult vulkan_emulated_mailbox_acquire_next_image(struct vulkan_emulated_mailbox *mailbox, unsigned *index);
typedef struct gfx_ctx_vulkan_data
{
bool need_new_swapchain;
bool created_new_swapchain;
bool emulate_mailbox;
bool emulating_mailbox;
vulkan_context_t context;
VkSurfaceKHR vk_surface;
VkSwapchainKHR swapchain;
struct vulkan_emulated_mailbox mailbox;
/* Used to check if we need to use mailbox emulation or not.
* Only relevant on Windows for now. */
bool fullscreen;
} gfx_ctx_vulkan_data_t;
struct vulkan_display_surface_info
@ -179,6 +208,7 @@ struct vk_texture
VkImage image;
VkImageView view;
VkDeviceMemory memory;
VkBuffer buffer;
VkFormat format;

View File

@ -854,6 +854,8 @@ static void vulkan_init_static_resources(vk_t *vk)
uint32_t blank[4 * 4];
VkCommandPoolCreateInfo pool_info = {
VK_STRUCTURE_TYPE_COMMAND_POOL_CREATE_INFO };
pool_info.flags = VK_COMMAND_POOL_CREATE_RESET_COMMAND_BUFFER_BIT;
/* Create the pipeline cache. */
VkPipelineCacheCreateInfo cache = {
VK_STRUCTURE_TYPE_PIPELINE_CACHE_CREATE_INFO };
@ -1509,23 +1511,22 @@ static void vulkan_set_viewport(void *data, unsigned viewport_width,
static void vulkan_readback(vk_t *vk)
{
VkImageCopy region;
VkBufferImageCopy region;
struct vk_texture *staging;
struct video_viewport vp;
VkMemoryBarrier barrier = { VK_STRUCTURE_TYPE_MEMORY_BARRIER };
vulkan_viewport_info(vk, &vp);
memset(&region, 0, sizeof(region));
region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.srcSubresource.layerCount = 1;
region.dstSubresource = region.srcSubresource;
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.layerCount = 1;
region.imageOffset.x = vp.x;
region.imageOffset.y = vp.y;
region.imageExtent.width = vp.width;
region.imageExtent.height = vp.height;
region.imageExtent.depth = 1;
region.srcOffset.x = vp.x;
region.srcOffset.y = vp.y;
region.extent.width = vp.width;
region.extent.height = vp.height;
region.extent.depth = 1;
/* FIXME: We won't actually get format conversion with vkCmdCopyImage, so have to check
/* FIXME: We won't actually get format conversion with vkCmdCopyImageToBuffer, so have to check
* properly for this. BGRA seems to be the default for all swapchains. */
if (vk->context->swapchain_format != VK_FORMAT_B8G8R8A8_UNORM)
RARCH_WARN("[Vulkan]: Backbuffer is not BGRA8888, readbacks might not work properly.\n");
@ -1537,25 +1538,18 @@ static void vulkan_readback(vk_t *vk)
VK_FORMAT_B8G8R8A8_UNORM,
NULL, NULL, VULKAN_TEXTURE_READBACK);
/* Go through the long-winded dance of remapping image layouts. */
vulkan_image_layout_transition(vk, vk->cmd, staging->image,
VK_IMAGE_LAYOUT_UNDEFINED, VK_IMAGE_LAYOUT_GENERAL,
0, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
vkCmdCopyImage(vk->cmd, vk->chain->backbuffer.image,
vkCmdCopyImageToBuffer(vk->cmd, vk->chain->backbuffer.image,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL,
staging->image,
VK_IMAGE_LAYOUT_GENERAL,
staging->buffer,
1, &region);
/* Make the data visible to host. */
vulkan_image_layout_transition(vk, vk->cmd, staging->image,
VK_IMAGE_LAYOUT_GENERAL, VK_IMAGE_LAYOUT_GENERAL,
VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_HOST_READ_BIT,
barrier.srcAccessMask = VK_ACCESS_TRANSFER_WRITE_BIT;
barrier.dstAccessMask = VK_ACCESS_HOST_READ_BIT;
vkCmdPipelineBarrier(vk->cmd,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_HOST_BIT);
VK_PIPELINE_STAGE_HOST_BIT, 0,
1, &barrier, 0, NULL, 0, NULL);
}
static void vulkan_inject_black_frame(vk_t *vk, video_frame_info_t *video_info)
@ -1784,7 +1778,7 @@ static bool vulkan_frame(void *data, const void *frame,
(vulkan_filter_chain_t*)vk->filter_chain,
vk->cmd, &vk->vk_vp);
/* Render to backbuffer. */
if (chain->backbuffer.image != VK_NULL_HANDLE)
if (chain->backbuffer.image != VK_NULL_HANDLE && vk->context->has_acquired_swapchain)
{
rp_info.renderPass = vk->render_pass;
rp_info.framebuffer = chain->backbuffer.framebuffer;
@ -1880,6 +1874,7 @@ static bool vulkan_frame(void *data, const void *frame,
vulkan_filter_chain_end_frame((vulkan_filter_chain_t*)vk->filter_chain, vk->cmd);
if (chain->backbuffer.image != VK_NULL_HANDLE &&
vk->context->has_acquired_swapchain &&
(vk->readback.pending || vk->readback.streamed))
{
/* We cannot safely read back from an image which
@ -1911,7 +1906,8 @@ static bool vulkan_frame(void *data, const void *frame,
vk->readback.pending = false;
}
else if (chain->backbuffer.image != VK_NULL_HANDLE)
else if (chain->backbuffer.image != VK_NULL_HANDLE &&
vk->context->has_acquired_swapchain)
{
/* Prepare backbuffer for presentation. */
vulkan_image_layout_transition(vk, vk->cmd,
@ -1971,8 +1967,11 @@ static bool vulkan_frame(void *data, const void *frame,
submit_info.signalSemaphoreCount = 0;
if (vk->context->swapchain_semaphores[frame_index] != VK_NULL_HANDLE)
if (vk->context->swapchain_semaphores[frame_index] != VK_NULL_HANDLE &&
vk->context->has_acquired_swapchain)
{
signal_semaphores[submit_info.signalSemaphoreCount++] = vk->context->swapchain_semaphores[frame_index];
}
if (vk->hw.signal_semaphore != VK_NULL_HANDLE)
{
@ -2014,7 +2013,9 @@ static bool vulkan_frame(void *data, const void *frame,
/* Disable BFI during fast forward, slow-motion,
* and pause to prevent flicker. */
if (
video_info->black_frame_insertion
chain->backbuffer.image != VK_NULL_HANDLE
&& vk->context->has_acquired_swapchain
&& video_info->black_frame_insertion
&& !video_info->input_driver_nonblock_state
&& !video_info->runloop_is_slowmotion
&& !video_info->runloop_is_paused)

View File

@ -584,6 +584,8 @@ static bool gfx_ctx_wgl_set_video_mode(void *data,
unsigned width, unsigned height,
bool fullscreen)
{
win32_vk.fullscreen = fullscreen;
if (!win32_set_video_mode(NULL, width, height, fullscreen))
{
RARCH_ERR("[WGL]: win32_set_video_mode failed.\n");