rsx: Always initialize memory unless it is guaranteed to be wiped

This commit is contained in:
kd-11 2019-05-14 19:50:45 +03:00 committed by kd-11
parent 88290d9fab
commit 214bb3ec87
10 changed files with 216 additions and 163 deletions

View File

@ -25,7 +25,7 @@ namespace rsx
{
unknown = 0,
attachment = 1,
storage = 2
storage = 2,
};
//Sampled image descriptor

View File

@ -25,6 +25,12 @@ namespace rsx
size_t get_packed_pitch(surface_color_format format, u32 width);
}
enum surface_state_flags : u32
{
ready = 0,
erase_bkgnd = 1
};
template <typename surface_type>
struct surface_overlap_info_t
{
@ -127,14 +133,14 @@ namespace rsx
u64 last_use_tag = 0; // tag indicating when this block was last confirmed to have been written to
std::array<std::pair<u32, u64>, 5> memory_tag_samples;
bool dirty = false;
deferred_clipped_region<image_storage_type> old_contents{};
rsx::surface_antialiasing read_aa_mode = rsx::surface_antialiasing::center_1_sample;
GcmTileInfo *tile = nullptr;
rsx::surface_antialiasing write_aa_mode = rsx::surface_antialiasing::center_1_sample;
flags32_t usage = surface_usage_flags::unknown;
flags32_t memory_usage_flags = surface_usage_flags::unknown;
flags32_t state_flags = surface_state_flags::ready;
union
{
@ -198,9 +204,14 @@ namespace rsx
return format_info.gcm_depth_format;
}
bool dirty() const
{
return (state_flags != rsx::surface_state_flags::ready) || old_contents;
}
bool test() const
{
if (dirty)
if (dirty())
{
// TODO
// Should RCB or mem-sync (inherit previous mem) to init memory
@ -361,7 +372,9 @@ namespace rsx
sync_tag();
read_aa_mode = write_aa_mode;
dirty = false;
// HACK!! This should be cleared through memory barriers only
state_flags = rsx::surface_state_flags::ready;
if (old_contents.source)
{
@ -604,7 +617,7 @@ namespace rsx
if (e.second->last_use_tag <= timestamp_check ||
new_surface == surface ||
address == e.first ||
e.second->dirty)
e.second->dirty())
{
// Do not bother synchronizing with uninitialized data
continue;
@ -731,7 +744,6 @@ namespace rsx
region.target = new_surface;
new_surface->set_old_contents_region(region, true);
new_surface->dirty = true;
break;
}
#endif

View File

@ -2461,7 +2461,7 @@ namespace rsx
for (auto It = list.rbegin(); It != list.rend(); ++It)
{
if (!(It->surface->usage & rsx::surface_usage_flags::attachment))
if (!(It->surface->memory_usage_flags & rsx::surface_usage_flags::attachment))
{
// HACK
// TODO: Properly analyse the input here to determine if it can properly fit what we need

View File

@ -414,55 +414,57 @@ void GLGSRender::end()
}
}
}
rsx::simple_array<int> buffers_to_clear;
bool clear_all_color = true;
bool clear_depth = false;
for (int index = 0; index < 4; index++)
else
{
if (std::get<0>(m_rtts.m_bound_render_targets[index]) != 0)
rsx::simple_array<int> buffers_to_clear;
bool clear_all_color = true;
bool clear_depth = false;
for (int index = 0; index < 4; index++)
{
if (std::get<1>(m_rtts.m_bound_render_targets[index])->cleared())
clear_all_color = false;
else
buffers_to_clear.push_back(index);
}
}
if (ds && !ds->cleared())
{
clear_depth = true;
}
if (clear_depth || buffers_to_clear.size() > 0)
{
gl_state.enable(GL_FALSE, GL_SCISSOR_TEST);
GLenum mask = 0;
if (clear_depth)
{
gl_state.depth_mask(GL_TRUE);
gl_state.clear_depth(1.f);
gl_state.clear_stencil(255);
mask |= GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
if (m_rtts.m_bound_render_targets[index].first)
{
if (!m_rtts.m_bound_render_targets[index].second->dirty())
clear_all_color = false;
else
buffers_to_clear.push_back(index);
}
}
if (clear_all_color)
mask |= GL_COLOR_BUFFER_BIT;
glClear(mask);
if (buffers_to_clear.size() > 0 && !clear_all_color)
if (ds && ds->dirty())
{
GLfloat colors[] = { 0.f, 0.f, 0.f, 0.f };
//It is impossible for the render target to be type A or B here (clear all would have been flagged)
for (auto &i : buffers_to_clear)
glClearBufferfv(GL_COLOR, i, colors);
clear_depth = true;
}
if (clear_depth)
gl_state.depth_mask(rsx::method_registers.depth_write_enabled());
if (clear_depth || buffers_to_clear.size() > 0)
{
gl_state.enable(GL_FALSE, GL_SCISSOR_TEST);
GLenum mask = 0;
if (clear_depth)
{
gl_state.depth_mask(GL_TRUE);
gl_state.clear_depth(1.f);
gl_state.clear_stencil(255);
mask |= GL_DEPTH_BUFFER_BIT | GL_STENCIL_BUFFER_BIT;
}
if (clear_all_color)
mask |= GL_COLOR_BUFFER_BIT;
glClear(mask);
if (buffers_to_clear.size() > 0 && !clear_all_color)
{
GLfloat colors[] = { 0.f, 0.f, 0.f, 0.f };
//It is impossible for the render target to be type A or B here (clear all would have been flagged)
for (auto &i : buffers_to_clear)
glClearBufferfv(GL_COLOR, i, colors);
}
if (clear_depth)
gl_state.depth_mask(rsx::method_registers.depth_write_enabled());
}
}
// Unconditionally enable stencil test if it was disabled before
@ -1136,6 +1138,13 @@ void GLGSRender::clear_surface(u32 arg)
GLbitfield mask = 0;
gl::command_context cmd{ gl_state };
const bool require_mem_load =
rsx::method_registers.scissor_origin_x() > 0 ||
rsx::method_registers.scissor_origin_y() > 0 ||
rsx::method_registers.scissor_width() < rsx::method_registers.surface_clip_width() ||
rsx::method_registers.scissor_height() < rsx::method_registers.surface_clip_height();
rsx::surface_depth_format surface_depth_format = rsx::method_registers.surface_depth_fmt();
if (auto ds = std::get<1>(m_rtts.m_bound_depth_stencil); arg & 0x3)
@ -1161,7 +1170,7 @@ void GLGSRender::clear_surface(u32 arg)
mask |= GLenum(gl::buffers::stencil);
}
if ((arg & 0x3) != 0x3 && ds->dirty)
if ((arg & 0x3) != 0x3 && !require_mem_load && ds->dirty())
{
verify(HERE), mask;
@ -1185,6 +1194,8 @@ void GLGSRender::clear_surface(u32 arg)
if (mask)
{
if (require_mem_load) ds->write_barrier(cmd);
// Memory has been initialized
m_rtts.on_write(std::get<0>(m_rtts.m_bound_depth_stencil));
}
@ -1220,8 +1231,9 @@ void GLGSRender::clear_surface(u32 arg)
for (auto &rtt : m_rtts.m_bound_render_targets)
{
if (const auto address = std::get<0>(rtt))
if (const auto address = rtt.first)
{
if (require_mem_load) rtt.second->write_barrier(cmd);
m_rtts.on_write(address);
}
}

View File

@ -615,36 +615,28 @@ void GLGSRender::read_buffers()
void gl::render_target::memory_barrier(gl::command_context& cmd, bool force_init)
{
auto is_depth = [](gl::texture::internal_format format)
auto clear_surface_impl = [&]()
{
// TODO: Change this to image aspect semantics
switch (format)
if (aspect() & gl::image_aspect::depth)
{
case gl::texture::internal_format::depth16:
case gl::texture::internal_format::depth24_stencil8:
case gl::texture::internal_format::depth32f_stencil8:
return true;
default:
return false;
gl::g_hw_blitter->fast_clear_image(cmd, this, 1.f, 255);
}
else
{
gl::g_hw_blitter->fast_clear_image(cmd, this, {});
}
state_flags &= ~rsx::surface_state_flags::erase_bkgnd;
};
if (!old_contents)
{
// No memory to inherit
if (dirty && force_init)
if (dirty() && (force_init || state_flags & rsx::surface_state_flags::erase_bkgnd))
{
// Initialize memory contents if we did not find anything usable
// TODO: Properly sync with Cell
if (is_depth(get_internal_format()))
{
gl::g_hw_blitter->fast_clear_image(cmd, this, 1.f, 255);
}
else
{
gl::g_hw_blitter->fast_clear_image(cmd, this, {});
}
clear_surface_impl();
on_write();
}
@ -686,6 +678,19 @@ void gl::render_target::memory_barrier(gl::command_context& cmd, bool force_init
const bool dst_is_depth = !!(aspect() & gl::image_aspect::depth);
old_contents.init_transfer(this);
if (state_flags & rsx::surface_state_flags::erase_bkgnd)
{
const auto area = old_contents.dst_rect();
if (area.x1 > 0 || area.y1 > 0 || area.x2 < width() || area.y2 < height())
{
clear_surface_impl();
}
else
{
state_flags &= ~rsx::surface_state_flags::erase_bkgnd;
}
}
gl::g_hw_blitter->scale_image(cmd, old_contents.source, this,
old_contents.src_rect(),
old_contents.dst_rect(),

View File

@ -63,16 +63,6 @@ namespace gl
: viewable_image(GL_TEXTURE_2D, width, height, 1, 1, sized_format)
{}
void set_cleared(bool clear=true)
{
dirty = !clear;
}
bool cleared() const
{
return !dirty;
}
// Internal pitch is the actual row length in bytes of the openGL texture
void set_native_pitch(u16 pitch)
{
@ -188,8 +178,8 @@ struct gl_render_target_traits
std::array<GLenum, 4> native_layout = { (GLenum)format.swizzle.a, (GLenum)format.swizzle.r, (GLenum)format.swizzle.g, (GLenum)format.swizzle.b };
result->set_native_component_layout(native_layout);
result->usage = rsx::surface_usage_flags::attachment;
result->set_cleared(false);
result->memory_usage_flags = rsx::surface_usage_flags::attachment;
result->state_flags = rsx::surface_state_flags::erase_bkgnd;
result->queue_tag(address);
result->add_ref();
return result;
@ -216,8 +206,8 @@ struct gl_render_target_traits
result->set_native_component_layout(native_layout);
result->set_format(surface_depth_format);
result->usage = rsx::surface_usage_flags::attachment;
result->set_cleared(false);
result->memory_usage_flags = rsx::surface_usage_flags::attachment;
result->state_flags = rsx::surface_state_flags::erase_bkgnd;
result->queue_tag(address);
result->add_ref();
return result;
@ -236,9 +226,12 @@ struct gl_render_target_traits
const auto new_h = rsx::apply_resolution_scale(prev.height, true, ref->get_surface_height());
sink.reset(new gl::render_target(new_w, new_h, internal_format));
sink->usage = rsx::surface_usage_flags::storage;
sink->add_ref();
sink->memory_usage_flags = rsx::surface_usage_flags::storage;
sink->state_flags = rsx::surface_state_flags::erase_bkgnd;
sink->format_info = ref->format_info;
sink->set_native_pitch(prev.width * ref->get_bpp());
sink->set_surface_dimensions(prev.width, prev.height, ref->get_rsx_pitch());
sink->set_native_component_layout(ref->get_native_component_layout());
@ -253,7 +246,6 @@ struct gl_render_target_traits
sink->sync_tag();
sink->set_old_contents_region(prev, false);
sink->set_cleared(false);
sink->last_use_tag = ref->last_use_tag;
}
@ -277,12 +269,12 @@ struct gl_render_target_traits
static void prepare_rtt_for_drawing(gl::command_context&, gl::render_target* rtt)
{
rtt->usage |= rsx::surface_usage_flags::attachment;
rtt->memory_usage_flags |= rsx::surface_usage_flags::attachment;
}
static void prepare_ds_for_drawing(gl::command_context&, gl::render_target* ds)
{
ds->usage |= rsx::surface_usage_flags::attachment;
ds->memory_usage_flags |= rsx::surface_usage_flags::attachment;
}
static void prepare_rtt_for_sampling(gl::command_context&, gl::render_target*) {}
@ -300,9 +292,8 @@ struct gl_render_target_traits
surface->set_rsx_pitch((u16)pitch);
surface->reset_aa_mode();
surface->queue_tag(address);
surface->set_cleared(false);
surface->last_use_tag = 0;
surface->usage = rsx::surface_usage_flags::unknown;
surface->memory_usage_flags = rsx::surface_usage_flags::unknown;
}
static
@ -326,6 +317,7 @@ struct gl_render_target_traits
static
void notify_surface_reused(const std::unique_ptr<gl::render_target>& surface)
{
surface->state_flags |= rsx::surface_state_flags::erase_bkgnd;
surface->add_ref();
}

View File

@ -233,7 +233,7 @@ namespace gl
if (context == rsx::texture_upload_context::framebuffer_storage)
{
auto as_rtt = static_cast<gl::render_target*>(vram_texture);
if (as_rtt->dirty) as_rtt->read_barrier(cmd);
if (as_rtt->dirty()) as_rtt->read_barrier(cmd);
}
gl::texture* target_texture = vram_texture;

View File

@ -1826,6 +1826,16 @@ void VKGSRender::end()
m_current_command_buffer->flags |= vk::command_buffer::cb_has_occlusion_task;
}
// Final heap check...
check_heap_status(VK_HEAP_CHECK_VERTEX_STORAGE | VK_HEAP_CHECK_VERTEX_LAYOUT_STORAGE);
// While vertex upload is an interruptible process, if we made it this far, there's no need to sync anything that occurs past this point
// Only textures are synchronized tightly with the GPU and they have been read back above
vk::enter_uninterruptible();
vkCmdBindPipeline(*m_current_command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_program->pipeline);
update_draw_state();
// Apply write memory barriers
if (1)//g_cfg.video.strict_rendering_mode)
{
@ -1838,46 +1848,41 @@ void VKGSRender::end()
surface->write_barrier(*m_current_command_buffer);
}
}
begin_render_pass();
}
// Final heap check...
check_heap_status(VK_HEAP_CHECK_VERTEX_STORAGE | VK_HEAP_CHECK_VERTEX_LAYOUT_STORAGE);
// While vertex upload is an interruptible process, if we made it this far, there's no need to sync anything that occurs past this point
// Only textures are synchronized tightly with the GPU and they have been read back above
vk::enter_uninterruptible();
vkCmdBindPipeline(*m_current_command_buffer, VK_PIPELINE_BIND_POINT_GRAPHICS, m_program->pipeline);
update_draw_state();
begin_render_pass();
// Clear any 'dirty' surfaces - possible is a recycled cache surface is used
rsx::simple_array<VkClearAttachment> buffers_to_clear;
if (ds && ds->dirty)
else
{
// Clear this surface before drawing on it
VkClearValue clear_value = {};
clear_value.depthStencil = { 1.f, 255 };
buffers_to_clear.push_back({ vk::get_aspect_flags(ds->info.format), 0, clear_value });
}
begin_render_pass();
for (u32 index = 0; index < m_draw_buffers.size(); ++index)
{
if (auto rtt = std::get<1>(m_rtts.m_bound_render_targets[index]))
// Clear any 'dirty' surfaces - possible is a recycled cache surface is used
rsx::simple_array<VkClearAttachment> buffers_to_clear;
if (ds && ds->dirty())
{
if (rtt->dirty)
// Clear this surface before drawing on it
VkClearValue clear_value = {};
clear_value.depthStencil = { 1.f, 255 };
buffers_to_clear.push_back({ vk::get_aspect_flags(ds->info.format), 0, clear_value });
}
for (u32 index = 0; index < m_draw_buffers.size(); ++index)
{
if (auto rtt = std::get<1>(m_rtts.m_bound_render_targets[index]))
{
buffers_to_clear.push_back({ VK_IMAGE_ASPECT_COLOR_BIT, index, {} });
if (rtt->dirty())
{
buffers_to_clear.push_back({ VK_IMAGE_ASPECT_COLOR_BIT, index, {} });
}
}
}
}
if (UNLIKELY(!buffers_to_clear.empty()))
{
VkClearRect rect = { {{0, 0}, {m_draw_fbo->width(), m_draw_fbo->height()}}, 0, 1 };
vkCmdClearAttachments(*m_current_command_buffer, buffers_to_clear.size(),
buffers_to_clear.data(), 1, &rect);
if (UNLIKELY(!buffers_to_clear.empty()))
{
VkClearRect rect = { {{0, 0}, {m_draw_fbo->width(), m_draw_fbo->height()}}, 0, 1 };
vkCmdClearAttachments(*m_current_command_buffer, buffers_to_clear.size(),
buffers_to_clear.data(), 1, &rect);
}
}
u32 sub_index = 0;
@ -2078,11 +2083,10 @@ void VKGSRender::clear_surface(u32 mask)
std::vector<VkClearAttachment> clear_descriptors;
VkClearValue depth_stencil_clear_values = {}, color_clear_values = {};
const auto scale = rsx::get_resolution_scale();
u16 scissor_x = rsx::apply_resolution_scale(rsx::method_registers.scissor_origin_x(), false);
u16 scissor_w = rsx::apply_resolution_scale(rsx::method_registers.scissor_width(), true);
u16 scissor_y = rsx::apply_resolution_scale(rsx::method_registers.scissor_origin_y(), false);
u16 scissor_h = rsx::apply_resolution_scale(rsx::method_registers.scissor_height(), true);
u16 scissor_x = (u16)m_scissor.offset.x;
u16 scissor_w = (u16)m_scissor.extent.width;
u16 scissor_y = (u16)m_scissor.offset.y;
u16 scissor_h = (u16)m_scissor.extent.height;
const u16 fb_width = m_draw_fbo->width();
const u16 fb_height = m_draw_fbo->height();
@ -2091,6 +2095,7 @@ void VKGSRender::clear_surface(u32 mask)
std::tie(scissor_x, scissor_y, scissor_w, scissor_h) = rsx::clip_region<u16>(fb_width, fb_height, scissor_x, scissor_y, scissor_w, scissor_h, true);
VkClearRect region = { { { scissor_x, scissor_y },{ scissor_w, scissor_h } }, 0, 1 };
const bool require_mem_load = (scissor_w * scissor_h) < (fb_width * fb_height);
auto surface_depth_format = rsx::method_registers.surface_depth_fmt();
if (auto ds = std::get<1>(m_rtts.m_bound_depth_stencil); mask & 0x3)
@ -2118,7 +2123,7 @@ void VKGSRender::clear_surface(u32 mask)
depth_stencil_mask |= VK_IMAGE_ASPECT_STENCIL_BIT;
}
if ((mask & 0x3) != 0x3 && ds->dirty)
if ((mask & 0x3) != 0x3 && !require_mem_load && ds->state_flags & rsx::surface_state_flags::erase_bkgnd)
{
verify(HERE), depth_stencil_mask;
@ -2202,8 +2207,10 @@ void VKGSRender::clear_surface(u32 mask)
for (const auto &index : m_draw_buffers)
{
if (auto rtt = std::get<1>(m_rtts.m_bound_render_targets[index]))
if (auto rtt = m_rtts.m_bound_render_targets[index].second)
{
if (require_mem_load) rtt->write_barrier(*m_current_command_buffer);
vk::insert_texture_barrier(*m_current_command_buffer, rtt);
m_attachment_clear_pass->run(*m_current_command_buffer, rtt,
region.rect, renderpass, m_framebuffers_to_clean);
@ -2219,8 +2226,9 @@ void VKGSRender::clear_surface(u32 mask)
for (auto &rtt : m_rtts.m_bound_render_targets)
{
if (const auto address = std::get<0>(rtt))
if (const auto address = rtt.first)
{
if (require_mem_load) rtt.second->write_barrier(*m_current_command_buffer);
m_rtts.on_write(address);
}
}
@ -2230,8 +2238,9 @@ void VKGSRender::clear_surface(u32 mask)
if (depth_stencil_mask)
{
if (const auto address = std::get<0>(m_rtts.m_bound_depth_stencil))
if (const auto address = m_rtts.m_bound_depth_stencil.first)
{
if (require_mem_load) m_rtts.m_bound_depth_stencil.second->write_barrier(*m_current_command_buffer);
m_rtts.on_write(address);
clear_descriptors.push_back({ (VkImageAspectFlags)depth_stencil_mask, 0, depth_stencil_clear_values });
}

View File

@ -67,32 +67,41 @@ namespace vk
void memory_barrier(vk::command_buffer& cmd, bool force_init = false)
{
// Helper to optionally clear/initialize memory contents depending on barrier type
auto clear_surface_impl = [&]()
{
VkImageSubresourceRange range{ aspect(), 0, 1, 0, 1 };
const auto old_layout = current_layout;
change_image_layout(cmd, this, VK_IMAGE_LAYOUT_GENERAL, range);
if (aspect() & VK_IMAGE_ASPECT_COLOR_BIT)
{
VkClearColorValue color{};
vkCmdClearColorImage(cmd, value, VK_IMAGE_LAYOUT_GENERAL, &color, 1, &range);
}
else
{
VkClearDepthStencilValue clear{ 1.f, 255 };
vkCmdClearDepthStencilImage(cmd, value, VK_IMAGE_LAYOUT_GENERAL, &clear, 1, &range);
}
change_image_layout(cmd, this, old_layout, range);
state_flags &= ~rsx::surface_state_flags::erase_bkgnd;
};
auto null_transfer_impl = [&]()
{
if (dirty && force_init)
if (dirty() && (force_init || state_flags & rsx::surface_state_flags::erase_bkgnd))
{
// Initialize memory contents if we did not find anything usable
// TODO: Properly sync with Cell
VkImageSubresourceRange range{ aspect(), 0, 1, 0, 1 };
const auto old_layout = current_layout;
change_image_layout(cmd, this, VK_IMAGE_LAYOUT_GENERAL, range);
if (aspect() & VK_IMAGE_ASPECT_COLOR_BIT)
{
VkClearColorValue color{};
vkCmdClearColorImage(cmd, value, VK_IMAGE_LAYOUT_GENERAL, &color, 1, &range);
}
else
{
VkClearDepthStencilValue clear{ 1.f, 255 };
vkCmdClearDepthStencilImage(cmd, value, VK_IMAGE_LAYOUT_GENERAL, &clear, 1, &range);
}
change_image_layout(cmd, this, old_layout, range);
clear_surface_impl();
on_write();
}
else
{
verify(HERE), state_flags == rsx::surface_state_flags::ready;
}
};
if (!old_contents)
@ -134,6 +143,19 @@ namespace vk
vk::blitter hw_blitter;
old_contents.init_transfer(this);
if (state_flags & rsx::surface_state_flags::erase_bkgnd)
{
const auto area = old_contents.dst_rect();
if (area.x1 > 0 || area.y1 > 0 || area.x2 < width() || area.y2 < height())
{
clear_surface_impl();
}
else
{
state_flags &= ~rsx::surface_state_flags::erase_bkgnd;
}
}
hw_blitter.scale_image(cmd, old_contents.source, this,
old_contents.src_rect(),
old_contents.dst_rect(),
@ -197,14 +219,14 @@ namespace rsx
change_image_layout(cmd, rtt.get(), VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL, vk::get_image_subresource_range(0, 0, 1, 1, VK_IMAGE_ASPECT_COLOR_BIT));
rtt->set_format(format);
rtt->usage = rsx::surface_usage_flags::attachment;
rtt->memory_usage_flags = rsx::surface_usage_flags::attachment;
rtt->state_flags = rsx::surface_state_flags::erase_bkgnd;
rtt->native_component_map = fmt.second;
rtt->rsx_pitch = (u16)pitch;
rtt->native_pitch = (u16)width * get_format_block_size_in_bytes(format);
rtt->surface_width = (u16)width;
rtt->surface_height = (u16)height;
rtt->queue_tag(address);
rtt->dirty = true;
rtt->add_ref();
return rtt;
@ -238,8 +260,10 @@ namespace rsx
ds->set_format(format);
ds->usage = rsx::surface_usage_flags::attachment;
ds->memory_usage_flags= rsx::surface_usage_flags::attachment;
ds->state_flags = rsx::surface_state_flags::erase_bkgnd;
ds->native_component_map = { VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R, VK_COMPONENT_SWIZZLE_R };
change_image_layout(cmd, ds.get(), VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL, range);
ds->native_pitch = (u16)width * 2;
@ -250,7 +274,6 @@ namespace rsx
ds->surface_width = (u16)width;
ds->surface_height = (u16)height;
ds->queue_tag(address);
ds->dirty = true;
ds->add_ref();
return ds;
@ -280,7 +303,8 @@ namespace rsx
sink->add_ref();
sink->format_info = ref->format_info;
sink->usage = rsx::surface_usage_flags::storage;
sink->memory_usage_flags = rsx::surface_usage_flags::storage;
sink->state_flags = rsx::surface_state_flags::erase_bkgnd;
sink->native_component_map = ref->native_component_map;
sink->native_pitch = u16(prev.width * ref->get_bpp());
sink->surface_width = prev.width;
@ -293,7 +317,6 @@ namespace rsx
sink->rsx_pitch = ref->get_rsx_pitch();
sink->sync_tag();
sink->set_old_contents_region(prev, false);
sink->dirty = true;
sink->last_use_tag = ref->last_use_tag;
change_image_layout(cmd, sink.get(), VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL);
@ -319,7 +342,7 @@ namespace rsx
{
surface->change_layout(cmd, VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL);
surface->frame_tag = 0;
surface->usage |= rsx::surface_usage_flags::attachment;
surface->memory_usage_flags |= rsx::surface_usage_flags::attachment;
}
static void prepare_rtt_for_sampling(vk::command_buffer& cmd, vk::render_target *surface)
@ -331,7 +354,7 @@ namespace rsx
{
surface->change_layout(cmd, VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL);
surface->frame_tag = 0;
surface->usage |= rsx::surface_usage_flags::attachment;
surface->memory_usage_flags |= rsx::surface_usage_flags::attachment;
}
static void prepare_ds_for_sampling(vk::command_buffer& cmd, vk::render_target *surface)
@ -349,9 +372,8 @@ namespace rsx
surface->rsx_pitch = (u16)pitch;
surface->reset_aa_mode();
surface->queue_tag(address);
surface->dirty = true;
surface->last_use_tag = 0;
surface->usage = rsx::surface_usage_flags::unknown;
surface->memory_usage_flags = rsx::surface_usage_flags::unknown;
}
static void notify_surface_invalidated(const std::unique_ptr<vk::render_target> &surface)
@ -375,6 +397,7 @@ namespace rsx
static void notify_surface_reused(const std::unique_ptr<vk::render_target> &surface)
{
surface->state_flags |= rsx::surface_state_flags::erase_bkgnd;
surface->add_ref();
}

View File

@ -192,7 +192,7 @@ namespace vk
if (context == rsx::texture_upload_context::framebuffer_storage)
{
auto as_rtt = static_cast<vk::render_target*>(vram_texture);
if (as_rtt->dirty) as_rtt->read_barrier(cmd);
if (as_rtt->dirty()) as_rtt->read_barrier(cmd);
}
vk::image *target = vram_texture;