Simplify vulkan_create_texture

This commit is contained in:
twinaphex 2020-12-15 08:00:22 +01:00
parent d55ca599e0
commit 2d588ca4ef

View File

@ -635,157 +635,186 @@ struct vk_texture vulkan_create_texture(vk_t *vk,
tex.format = format; tex.format = format;
tex.type = type; tex.type = type;
if (initial && (type == VULKAN_TEXTURE_STREAMED || type == VULKAN_TEXTURE_STAGING)) if (initial)
{ {
unsigned y; switch (type)
uint8_t *dst = NULL;
const uint8_t *src = NULL;
void *ptr = NULL;
unsigned bpp = vulkan_format_to_bpp(tex.format);
unsigned stride = tex.width * bpp;
vkMapMemory(device, tex.memory, tex.offset, tex.size, 0, &ptr);
dst = (uint8_t*)ptr;
src = (const uint8_t*)initial;
for (y = 0; y < tex.height; y++, dst += tex.stride, src += stride)
memcpy(dst, src, width * bpp);
if ( tex.need_manual_cache_management &&
tex.memory != VK_NULL_HANDLE)
VULKAN_SYNC_TEXTURE_TO_GPU(vk->context->device, tex.memory);
vkUnmapMemory(device, tex.memory);
}
else if (initial && type == VULKAN_TEXTURE_STATIC)
{
VkBufferImageCopy region;
VkCommandBuffer staging;
struct vk_texture tmp = vulkan_create_texture(vk, NULL,
width, height, format, initial, NULL, VULKAN_TEXTURE_STAGING);
cmd_info.commandPool = vk->staging_pool;
cmd_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
cmd_info.commandBufferCount = 1;
vkAllocateCommandBuffers(vk->context->device, &cmd_info, &staging);
begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vkBeginCommandBuffer(staging, &begin_info);
/* If doing mipmapping on upload, keep in general so we can easily do transfers to
* and transfers from the images without having to
* mess around with lots of extra transitions at per-level granularity.
*/
VULKAN_IMAGE_LAYOUT_TRANSITION(
staging,
tex.image,
VK_IMAGE_LAYOUT_UNDEFINED,
tex.mipmap ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
0, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
memset(&region, 0, sizeof(region));
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.layerCount = 1;
region.imageExtent.width = width;
region.imageExtent.height = height;
region.imageExtent.depth = 1;
vkCmdCopyBufferToImage(staging,
tmp.buffer,
tex.image,
tex.mipmap ? VK_IMAGE_LAYOUT_GENERAL : VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1, &region);
if (tex.mipmap)
{ {
for (i = 1; i < info.mipLevels; i++) case VULKAN_TEXTURE_STREAMED:
{ case VULKAN_TEXTURE_STAGING:
VkImageBlit blit_region; {
unsigned src_width = MAX(width >> (i - 1), 1); unsigned y;
unsigned src_height = MAX(height >> (i - 1), 1); uint8_t *dst = NULL;
unsigned target_width = MAX(width >> i, 1); const uint8_t *src = NULL;
unsigned target_height = MAX(height >> i, 1); void *ptr = NULL;
memset(&blit_region, 0, sizeof(blit_region)); unsigned bpp = vulkan_format_to_bpp(tex.format);
unsigned stride = tex.width * bpp;
blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT; vkMapMemory(device, tex.memory, tex.offset, tex.size, 0, &ptr);
blit_region.srcSubresource.mipLevel = i - 1;
blit_region.srcSubresource.baseArrayLayer = 0;
blit_region.srcSubresource.layerCount = 1;
blit_region.dstSubresource = blit_region.srcSubresource;
blit_region.dstSubresource.mipLevel = i;
blit_region.srcOffsets[1].x = src_width;
blit_region.srcOffsets[1].y = src_height;
blit_region.srcOffsets[1].z = 1;
blit_region.dstOffsets[1].x = target_width;
blit_region.dstOffsets[1].y = target_height;
blit_region.dstOffsets[1].z = 1;
/* Only injects execution and memory barriers, dst = (uint8_t*)ptr;
* not actual transition. */ src = (const uint8_t*)initial;
VULKAN_IMAGE_LAYOUT_TRANSITION( for (y = 0; y < tex.height; y++, dst += tex.stride, src += stride)
staging, tex.image, memcpy(dst, src, width * bpp);
VK_IMAGE_LAYOUT_GENERAL,
VK_IMAGE_LAYOUT_GENERAL,
VK_ACCESS_TRANSFER_WRITE_BIT,
VK_ACCESS_TRANSFER_READ_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
vkCmdBlitImage(staging, if ( tex.need_manual_cache_management &&
tex.image, VK_IMAGE_LAYOUT_GENERAL, tex.memory != VK_NULL_HANDLE)
tex.image, VK_IMAGE_LAYOUT_GENERAL, VULKAN_SYNC_TEXTURE_TO_GPU(vk->context->device, tex.memory);
1, &blit_region, VK_FILTER_LINEAR); vkUnmapMemory(device, tex.memory);
} }
break;
case VULKAN_TEXTURE_STATIC:
{
VkBufferImageCopy region;
VkCommandBuffer staging;
struct vk_texture tmp = vulkan_create_texture(vk, NULL,
width, height, format, initial, NULL, VULKAN_TEXTURE_STAGING);
/* Complete our texture. */ cmd_info.commandPool = vk->staging_pool;
VULKAN_IMAGE_LAYOUT_TRANSITION( cmd_info.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
staging, tex.image, cmd_info.commandBufferCount = 1;
VK_IMAGE_LAYOUT_GENERAL,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_ACCESS_TRANSFER_WRITE_BIT,
VK_ACCESS_SHADER_READ_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
}
else
{
VULKAN_IMAGE_LAYOUT_TRANSITION(
staging, tex.image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_ACCESS_TRANSFER_WRITE_BIT,
VK_ACCESS_SHADER_READ_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
}
vkEndCommandBuffer(staging); vkAllocateCommandBuffers(vk->context->device,
submit_info.commandBufferCount = 1; &cmd_info, &staging);
submit_info.pCommandBuffers = &staging;
begin_info.flags = VK_COMMAND_BUFFER_USAGE_ONE_TIME_SUBMIT_BIT;
vkBeginCommandBuffer(staging, &begin_info);
/* If doing mipmapping on upload, keep in general
* so we can easily do transfers to
* and transfers from the images without having to
* mess around with lots of extra transitions at
* per-level granularity.
*/
VULKAN_IMAGE_LAYOUT_TRANSITION(
staging,
tex.image,
VK_IMAGE_LAYOUT_UNDEFINED,
tex.mipmap
? VK_IMAGE_LAYOUT_GENERAL
: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
0, VK_ACCESS_TRANSFER_WRITE_BIT,
VK_PIPELINE_STAGE_TOP_OF_PIPE_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
memset(&region, 0, sizeof(region));
region.imageSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
region.imageSubresource.layerCount = 1;
region.imageExtent.width = width;
region.imageExtent.height = height;
region.imageExtent.depth = 1;
vkCmdCopyBufferToImage(staging,
tmp.buffer,
tex.image,
tex.mipmap
? VK_IMAGE_LAYOUT_GENERAL
: VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
1, &region);
if (tex.mipmap)
{
for (i = 1; i < info.mipLevels; i++)
{
VkImageBlit blit_region;
unsigned src_width = MAX(width >> (i - 1), 1);
unsigned src_height = MAX(height >> (i - 1), 1);
unsigned target_width = MAX(width >> i, 1);
unsigned target_height = MAX(height >> i, 1);
memset(&blit_region, 0, sizeof(blit_region));
blit_region.srcSubresource.aspectMask = VK_IMAGE_ASPECT_COLOR_BIT;
blit_region.srcSubresource.mipLevel = i - 1;
blit_region.srcSubresource.baseArrayLayer = 0;
blit_region.srcSubresource.layerCount = 1;
blit_region.dstSubresource = blit_region.srcSubresource;
blit_region.dstSubresource.mipLevel = i;
blit_region.srcOffsets[1].x = src_width;
blit_region.srcOffsets[1].y = src_height;
blit_region.srcOffsets[1].z = 1;
blit_region.dstOffsets[1].x = target_width;
blit_region.dstOffsets[1].y = target_height;
blit_region.dstOffsets[1].z = 1;
/* Only injects execution and memory barriers,
* not actual transition. */
VULKAN_IMAGE_LAYOUT_TRANSITION(
staging,
tex.image,
VK_IMAGE_LAYOUT_GENERAL,
VK_IMAGE_LAYOUT_GENERAL,
VK_ACCESS_TRANSFER_WRITE_BIT,
VK_ACCESS_TRANSFER_READ_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT);
vkCmdBlitImage(
staging,
tex.image,
VK_IMAGE_LAYOUT_GENERAL,
tex.image,
VK_IMAGE_LAYOUT_GENERAL,
1,
&blit_region,
VK_FILTER_LINEAR);
}
/* Complete our texture. */
VULKAN_IMAGE_LAYOUT_TRANSITION(
staging,
tex.image,
VK_IMAGE_LAYOUT_GENERAL,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_ACCESS_TRANSFER_WRITE_BIT,
VK_ACCESS_SHADER_READ_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
}
else
{
VULKAN_IMAGE_LAYOUT_TRANSITION(
staging,
tex.image,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL,
VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL,
VK_ACCESS_TRANSFER_WRITE_BIT,
VK_ACCESS_SHADER_READ_BIT,
VK_PIPELINE_STAGE_TRANSFER_BIT,
VK_PIPELINE_STAGE_FRAGMENT_SHADER_BIT);
}
vkEndCommandBuffer(staging);
submit_info.commandBufferCount = 1;
submit_info.pCommandBuffers = &staging;
#ifdef HAVE_THREADS #ifdef HAVE_THREADS
slock_lock(vk->context->queue_lock); slock_lock(vk->context->queue_lock);
#endif #endif
vkQueueSubmit(vk->context->queue, vkQueueSubmit(vk->context->queue,
1, &submit_info, VK_NULL_HANDLE); 1, &submit_info, VK_NULL_HANDLE);
/* TODO: Very crude, but texture uploads only happen /* TODO: Very crude, but texture uploads only happen
* during init, so waiting for GPU to complete transfer * during init, so waiting for GPU to complete transfer
* and blocking isn't a big deal. */ * and blocking isn't a big deal. */
vkQueueWaitIdle(vk->context->queue); vkQueueWaitIdle(vk->context->queue);
#ifdef HAVE_THREADS #ifdef HAVE_THREADS
slock_unlock(vk->context->queue_lock); slock_unlock(vk->context->queue_lock);
#endif #endif
vkFreeCommandBuffers(vk->context->device, vkFreeCommandBuffers(vk->context->device,
vk->staging_pool, 1, &staging); vk->staging_pool, 1, &staging);
vulkan_destroy_texture( vulkan_destroy_texture(
vk->context->device, &tmp); vk->context->device, &tmp);
tex.layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL; tex.layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
}
break;
case VULKAN_TEXTURE_DYNAMIC:
case VULKAN_TEXTURE_READBACK:
/* TODO/FIXME - stubs */
break;
}
} }
return tex; return tex;
} }