mirror of
https://github.com/libretro/RetroArch
synced 2025-03-29 22:20:21 +00:00
Simplify vulkan_create_texture
This commit is contained in:
parent
d55ca599e0
commit
2d588ca4ef
@ -635,157 +635,186 @@ struct vk_texture vulkan_create_texture(vk_t *vk,
|
||||
tex.format = format;
|
||||
tex.type = type;
|
||||
|
||||
if (initial && (type == VULKAN_TEXTURE_STREAMED || type == VULKAN_TEXTURE_STAGING))
|
||||
if (initial)
|
||||
{
|
||||
unsigned y;
|
||||
uint8_t *dst = NULL;
|
||||
const uint8_t *src = NULL;
|
||||
void *ptr = NULL;
|
||||
unsigned bpp = vulkan_format_to_bpp(tex.format);
|
||||
unsigned stride = tex.width * bpp;
|
||||
|
||||
vkMapMemory(device, tex.memory, tex.offset, tex.size, 0, &ptr);
|
||||
|
||||
dst = (uint8_t*)ptr;
|
||||
src = (const uint8_t*)initial;
|
||||
for (y = 0; y < tex.height; y++, dst += tex.stride, src += stride)
|
||||
memcpy(dst, src, width * bpp);
|
||||
|
||||
if ( tex.need_manual_cache_management &&
|
||||
tex.memory != VK_NULL_HANDLE)
|
||||
VULKAN_SYNC_TEXTURE_TO_GPU(vk->context->device, tex.memory);
|
||||
vkUnmapMemory(device, tex.memory);
|
||||
}
|
||||
else if (initial && type == VULKAN_TEXTURE_STATIC)
|
||||
{
|
||||
VkBufferImageCopy region;
|
||||
VkCommandBuffer staging;
|
||||
struct vk_texture tmp = vulkan_create_texture(vk, NULL,
|
||||
width, height, format, initial, NULL, VULKAN_TEXTURE_STAGING);
|
||||
|
||||
cmd_info.commandPool = vk->staging_pool;
|
||||
cmd_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
|
||||
cmd_info.commandBufferCount = 1;
|
||||
|
||||
vkAllocateCommandBuffers(vk->context->device, &cmd_info, &staging);
|
||||
|
||||
begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
|
||||
|
||||
vkBeginCommandBuffer(staging, &begin_info);
|
||||
|
||||
/* If doing mipmapping on upload, keep in general so we can easily do transfers to
|
||||
* and transfers from the images without having to
|
||||
* mess around with lots of extra transitions at per-level granularity.
|
||||
*/
|
||||
VULKAN_IMAGE_LAYOUT_TRANSITION(
|
||||
staging,
|
||||
tex.image,
|
||||
VK_IMAGE_LAYOUT_UNDEFINED,
|
||||
tex.mipmap ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
||||
0, VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
|
||||
memset(®ion, 0, sizeof(region));
|
||||
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
||||
region.imageSubresource.layerCount = 1;
|
||||
region.imageExtent.width = width;
|
||||
region.imageExtent.height = height;
|
||||
region.imageExtent.depth = 1;
|
||||
|
||||
vkCmdCopyBufferToImage(staging,
|
||||
tmp.buffer,
|
||||
tex.image,
|
||||
tex.mipmap ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
||||
1, ®ion);
|
||||
|
||||
if (tex.mipmap)
|
||||
switch (type)
|
||||
{
|
||||
for (i = 1; i < info.mipLevels; i++)
|
||||
{
|
||||
VkImageBlit blit_region;
|
||||
unsigned src_width = MAX(width >> (i - 1), 1);
|
||||
unsigned src_height = MAX(height >> (i - 1), 1);
|
||||
unsigned target_width = MAX(width >> i, 1);
|
||||
unsigned target_height = MAX(height >> i, 1);
|
||||
memset(&blit_region, 0, sizeof(blit_region));
|
||||
case VULKAN_TEXTURE_STREAMED:
|
||||
case VULKAN_TEXTURE_STAGING:
|
||||
{
|
||||
unsigned y;
|
||||
uint8_t *dst = NULL;
|
||||
const uint8_t *src = NULL;
|
||||
void *ptr = NULL;
|
||||
unsigned bpp = vulkan_format_to_bpp(tex.format);
|
||||
unsigned stride = tex.width * bpp;
|
||||
|
||||
blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
||||
blit_region.srcSubresource.mipLevel = i - 1;
|
||||
blit_region.srcSubresource.baseArrayLayer = 0;
|
||||
blit_region.srcSubresource.layerCount = 1;
|
||||
blit_region.dstSubresource = blit_region.srcSubresource;
|
||||
blit_region.dstSubresource.mipLevel = i;
|
||||
blit_region.srcOffsets[1].x = src_width;
|
||||
blit_region.srcOffsets[1].y = src_height;
|
||||
blit_region.srcOffsets[1].z = 1;
|
||||
blit_region.dstOffsets[1].x = target_width;
|
||||
blit_region.dstOffsets[1].y = target_height;
|
||||
blit_region.dstOffsets[1].z = 1;
|
||||
vkMapMemory(device, tex.memory, tex.offset, tex.size, 0, &ptr);
|
||||
|
||||
/* Only injects execution and memory barriers,
|
||||
* not actual transition. */
|
||||
VULKAN_IMAGE_LAYOUT_TRANSITION(
|
||||
staging, tex.image,
|
||||
VK_IMAGE_LAYOUT_GENERAL,
|
||||
VK_IMAGE_LAYOUT_GENERAL,
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_ACCESS_TRANSFER_READ_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
dst = (uint8_t*)ptr;
|
||||
src = (const uint8_t*)initial;
|
||||
for (y = 0; y < tex.height; y++, dst += tex.stride, src += stride)
|
||||
memcpy(dst, src, width * bpp);
|
||||
|
||||
vkCmdBlitImage(staging,
|
||||
tex.image, VK_IMAGE_LAYOUT_GENERAL,
|
||||
tex.image, VK_IMAGE_LAYOUT_GENERAL,
|
||||
1, &blit_region, VK_FILTER_LINEAR);
|
||||
}
|
||||
if ( tex.need_manual_cache_management &&
|
||||
tex.memory != VK_NULL_HANDLE)
|
||||
VULKAN_SYNC_TEXTURE_TO_GPU(vk->context->device, tex.memory);
|
||||
vkUnmapMemory(device, tex.memory);
|
||||
}
|
||||
break;
|
||||
case VULKAN_TEXTURE_STATIC:
|
||||
{
|
||||
VkBufferImageCopy region;
|
||||
VkCommandBuffer staging;
|
||||
struct vk_texture tmp = vulkan_create_texture(vk, NULL,
|
||||
width, height, format, initial, NULL, VULKAN_TEXTURE_STAGING);
|
||||
|
||||
/* Complete our texture. */
|
||||
VULKAN_IMAGE_LAYOUT_TRANSITION(
|
||||
staging, tex.image,
|
||||
VK_IMAGE_LAYOUT_GENERAL,
|
||||
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_ACCESS_SHADER_READ_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
|
||||
}
|
||||
else
|
||||
{
|
||||
VULKAN_IMAGE_LAYOUT_TRANSITION(
|
||||
staging, tex.image,
|
||||
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
||||
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_ACCESS_SHADER_READ_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
|
||||
}
|
||||
cmd_info.commandPool = vk->staging_pool;
|
||||
cmd_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
|
||||
cmd_info.commandBufferCount = 1;
|
||||
|
||||
vkEndCommandBuffer(staging);
|
||||
submit_info.commandBufferCount = 1;
|
||||
submit_info.pCommandBuffers = &staging;
|
||||
vkAllocateCommandBuffers(vk->context->device,
|
||||
&cmd_info, &staging);
|
||||
|
||||
begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
|
||||
|
||||
vkBeginCommandBuffer(staging, &begin_info);
|
||||
|
||||
/* If doing mipmapping on upload, keep in general
|
||||
* so we can easily do transfers to
|
||||
* and transfers from the images without having to
|
||||
* mess around with lots of extra transitions at
|
||||
* per-level granularity.
|
||||
*/
|
||||
VULKAN_IMAGE_LAYOUT_TRANSITION(
|
||||
staging,
|
||||
tex.image,
|
||||
VK_IMAGE_LAYOUT_UNDEFINED,
|
||||
tex.mipmap
|
||||
? VK_IMAGE_LAYOUT_GENERAL
|
||||
: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
||||
0, VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
|
||||
memset(®ion, 0, sizeof(region));
|
||||
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
||||
region.imageSubresource.layerCount = 1;
|
||||
region.imageExtent.width = width;
|
||||
region.imageExtent.height = height;
|
||||
region.imageExtent.depth = 1;
|
||||
|
||||
vkCmdCopyBufferToImage(staging,
|
||||
tmp.buffer,
|
||||
tex.image,
|
||||
tex.mipmap
|
||||
? VK_IMAGE_LAYOUT_GENERAL
|
||||
: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
||||
1, ®ion);
|
||||
|
||||
if (tex.mipmap)
|
||||
{
|
||||
for (i = 1; i < info.mipLevels; i++)
|
||||
{
|
||||
VkImageBlit blit_region;
|
||||
unsigned src_width = MAX(width >> (i - 1), 1);
|
||||
unsigned src_height = MAX(height >> (i - 1), 1);
|
||||
unsigned target_width = MAX(width >> i, 1);
|
||||
unsigned target_height = MAX(height >> i, 1);
|
||||
memset(&blit_region, 0, sizeof(blit_region));
|
||||
|
||||
blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
|
||||
blit_region.srcSubresource.mipLevel = i - 1;
|
||||
blit_region.srcSubresource.baseArrayLayer = 0;
|
||||
blit_region.srcSubresource.layerCount = 1;
|
||||
blit_region.dstSubresource = blit_region.srcSubresource;
|
||||
blit_region.dstSubresource.mipLevel = i;
|
||||
blit_region.srcOffsets[1].x = src_width;
|
||||
blit_region.srcOffsets[1].y = src_height;
|
||||
blit_region.srcOffsets[1].z = 1;
|
||||
blit_region.dstOffsets[1].x = target_width;
|
||||
blit_region.dstOffsets[1].y = target_height;
|
||||
blit_region.dstOffsets[1].z = 1;
|
||||
|
||||
/* Only injects execution and memory barriers,
|
||||
* not actual transition. */
|
||||
VULKAN_IMAGE_LAYOUT_TRANSITION(
|
||||
staging,
|
||||
tex.image,
|
||||
VK_IMAGE_LAYOUT_GENERAL,
|
||||
VK_IMAGE_LAYOUT_GENERAL,
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_ACCESS_TRANSFER_READ_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
|
||||
vkCmdBlitImage(
|
||||
staging,
|
||||
tex.image,
|
||||
VK_IMAGE_LAYOUT_GENERAL,
|
||||
tex.image,
|
||||
VK_IMAGE_LAYOUT_GENERAL,
|
||||
1,
|
||||
&blit_region,
|
||||
VK_FILTER_LINEAR);
|
||||
}
|
||||
|
||||
/* Complete our texture. */
|
||||
VULKAN_IMAGE_LAYOUT_TRANSITION(
|
||||
staging,
|
||||
tex.image,
|
||||
VK_IMAGE_LAYOUT_GENERAL,
|
||||
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_ACCESS_SHADER_READ_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
|
||||
}
|
||||
else
|
||||
{
|
||||
VULKAN_IMAGE_LAYOUT_TRANSITION(
|
||||
staging,
|
||||
tex.image,
|
||||
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
|
||||
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
|
||||
VK_ACCESS_TRANSFER_WRITE_BIT,
|
||||
VK_ACCESS_SHADER_READ_BIT,
|
||||
VK_PIPELINE_STAGE_TRANSFER_BIT,
|
||||
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
|
||||
}
|
||||
|
||||
vkEndCommandBuffer(staging);
|
||||
submit_info.commandBufferCount = 1;
|
||||
submit_info.pCommandBuffers = &staging;
|
||||
|
||||
#ifdef HAVE_THREADS
|
||||
slock_lock(vk->context->queue_lock);
|
||||
slock_lock(vk->context->queue_lock);
|
||||
#endif
|
||||
vkQueueSubmit(vk->context->queue,
|
||||
1, &submit_info, VK_NULL_HANDLE);
|
||||
vkQueueSubmit(vk->context->queue,
|
||||
1, &submit_info, VK_NULL_HANDLE);
|
||||
|
||||
/* TODO: Very crude, but texture uploads only happen
|
||||
* during init, so waiting for GPU to complete transfer
|
||||
* and blocking isn't a big deal. */
|
||||
vkQueueWaitIdle(vk->context->queue);
|
||||
/* TODO: Very crude, but texture uploads only happen
|
||||
* during init, so waiting for GPU to complete transfer
|
||||
* and blocking isn't a big deal. */
|
||||
vkQueueWaitIdle(vk->context->queue);
|
||||
#ifdef HAVE_THREADS
|
||||
slock_unlock(vk->context->queue_lock);
|
||||
slock_unlock(vk->context->queue_lock);
|
||||
#endif
|
||||
|
||||
vkFreeCommandBuffers(vk->context->device,
|
||||
vk->staging_pool, 1, &staging);
|
||||
vulkan_destroy_texture(
|
||||
vk->context->device, &tmp);
|
||||
tex.layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
||||
vkFreeCommandBuffers(vk->context->device,
|
||||
vk->staging_pool, 1, &staging);
|
||||
vulkan_destroy_texture(
|
||||
vk->context->device, &tmp);
|
||||
tex.layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
|
||||
}
|
||||
break;
|
||||
case VULKAN_TEXTURE_DYNAMIC:
|
||||
case VULKAN_TEXTURE_READBACK:
|
||||
/* TODO/FIXME - stubs */
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
return tex;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user