rsx: Fix for rsx thread lockup due to nested access violations when WCB is enabled

This commit is contained in:
kd-11 2017-10-29 14:29:35 +03:00
parent f8f5f9f418
commit c2ac05f734

View File

@ -248,7 +248,7 @@ namespace rsx
} }
//Get intersecting set - Returns all objects intersecting a given range and their owning blocks //Get intersecting set - Returns all objects intersecting a given range and their owning blocks
std::vector<std::pair<section_storage_type*, ranged_storage*>> get_intersecting_set(u32 address, u32 range, bool check_whole_size) std::vector<std::pair<section_storage_type*, ranged_storage*>> get_intersecting_set(u32 address, u32 range)
{ {
std::vector<std::pair<section_storage_type*, ranged_storage*>> result; std::vector<std::pair<section_storage_type*, ranged_storage*>> result;
u64 cache_tag = get_system_time(); u64 cache_tag = get_system_time();
@ -278,7 +278,7 @@ namespace rsx
if (tex.cache_tag == cache_tag) continue; //already processed if (tex.cache_tag == cache_tag) continue; //already processed
if (!tex.is_locked()) continue; //flushable sections can be 'clean' but unlocked. TODO: Handle this better if (!tex.is_locked()) continue; //flushable sections can be 'clean' but unlocked. TODO: Handle this better
auto overlapped = tex.overlaps_page(trampled_range, address, check_whole_size); auto overlapped = tex.overlaps_page(trampled_range, address, tex.is_flushable());
if (std::get<0>(overlapped)) if (std::get<0>(overlapped))
{ {
auto &new_range = std::get<1>(overlapped); auto &new_range = std::get<1>(overlapped);
@ -313,12 +313,16 @@ namespace rsx
if (!region_intersects_cache(address, range, is_writing)) if (!region_intersects_cache(address, range, is_writing))
return {}; return {};
auto trampled_set = get_intersecting_set(address, range, allow_flush); auto trampled_set = get_intersecting_set(address, range);
if (trampled_set.size() > 0) if (trampled_set.size() > 0)
{
auto to_reprotect = trampled_set.end();
if (!discard_only)
{ {
// Rebuild the cache by only destroying ranges that need to be destroyed to unlock this page // Rebuild the cache by only destroying ranges that need to be destroyed to unlock this page
const auto to_reprotect = std::remove_if(trampled_set.begin(), trampled_set.end(), to_reprotect = std::remove_if(trampled_set.begin(), trampled_set.end(),
[&](const std::pair<section_storage_type*, ranged_storage*>& obj) [&](const std::pair<section_storage_type*, ranged_storage*>& obj)
{ {
if (!is_writing && obj.first->get_protection() != utils::protection::no) if (!is_writing && obj.first->get_protection() != utils::protection::no)
@ -335,14 +339,15 @@ namespace rsx
}); });
if (to_reprotect == trampled_set.begin()) if (to_reprotect == trampled_set.begin())
return {}; return{};
}
std::vector<section_storage_type*> sections_to_flush; std::vector<section_storage_type*> sections_to_flush;
for (auto It = trampled_set.begin(); It != to_reprotect; ++It) for (auto It = trampled_set.begin(); It != trampled_set.end(); ++It)
{ {
auto obj = *It; auto &obj = *It;
if (obj.first->is_flushable()) if (obj.first->is_flushable() && It < to_reprotect)
{ {
sections_to_flush.push_back(obj.first); sections_to_flush.push_back(obj.first);
} }
@ -387,7 +392,7 @@ namespace rsx
for (auto It = to_reprotect; It != trampled_set.end(); It++) for (auto It = to_reprotect; It != trampled_set.end(); It++)
{ {
auto obj = *It; auto &obj = *It;
auto old_prot = obj.first->get_protection(); auto old_prot = obj.first->get_protection();
obj.first->discard(); obj.first->discard();