rsx: Code quality and readability improvements

This commit is contained in:
kd-11 2023-11-25 02:56:22 +03:00 committed by kd-11
parent b674e332fc
commit 685b3ee41b
2 changed files with 89 additions and 37 deletions

View File

@ -6,6 +6,43 @@
namespace rsx namespace rsx
{ {
namespace iomap_helper
{
template <bool Shared>
struct io_lock
{
shared_mutex& ref;
io_lock(shared_mutex& obj)
: ref(obj)
{}
bool try_lock()
{
if constexpr (Shared)
{
return ref.try_lock_shared();
}
else
{
return ref.try_lock();
}
}
void lock()
{
if constexpr (Shared)
{
ref.lock_shared();
}
else
{
ref.lock();
}
}
};
}
struct rsx_iomap_table struct rsx_iomap_table
{ {
static constexpr u32 c_lock_stride = 8192; static constexpr u32 c_lock_stride = 8192;
@ -33,9 +70,9 @@ namespace rsx
for (u32 block = addr / c_lock_stride; block <= (end / c_lock_stride); block += Stride) for (u32 block = addr / c_lock_stride; block <= (end / c_lock_stride); block += Stride)
{ {
auto& mutex_ = rs[block]; auto mutex_ = iomap_helper::io_lock<!IsFullLock>(rs[block]);
if (IsFullLock ? !mutex_.try_lock() : !mutex_.try_lock_shared()) [[ unlikely ]] if (!mutex_.try_lock()) [[ unlikely ]]
{ {
if (self) if (self)
{ {
@ -44,11 +81,11 @@ namespace rsx
if (!self || self->id_type() != 0x55u) if (!self || self->id_type() != 0x55u)
{ {
IsFullLock ? mutex_.lock() : mutex_.lock_shared(); mutex_.lock();
} }
else else
{ {
while (IsFullLock ? !mutex_.try_lock() : !mutex_.try_lock_shared()) while (!mutex_.try_lock())
{ {
self->cpu_wait({}); self->cpu_wait({});
} }

View File

@ -44,26 +44,28 @@ namespace rsx
// If ranges do not overlap, the first range that is in main memory will be acquired. // If ranges do not overlap, the first range that is in main memory will be acquired.
reservation_lock(u32 dst_addr, u32 dst_length, u32 src_addr, u32 src_length) reservation_lock(u32 dst_addr, u32 dst_length, u32 src_addr, u32 src_length)
{ {
if (g_cfg.core.rsx_accurate_res_access) if (!g_cfg.core.rsx_accurate_res_access)
{ {
const auto range1 = utils::address_range::start_length(dst_addr, dst_length); return;
const auto range2 = utils::address_range::start_length(src_addr, src_length); }
utils::address_range target_range;
if (!range1.overlaps(range2)) [[likely]] const auto range1 = utils::address_range::start_length(dst_addr, dst_length);
{ const auto range2 = utils::address_range::start_length(src_addr, src_length);
target_range = (dst_addr < constants::local_mem_base) ? range1 : range2; utils::address_range target_range;
}
else
{
// Very unlikely
target_range = range1.get_min_max(range2);
}
if (target_range.start < constants::local_mem_base) if (!range1.overlaps(range2)) [[likely]]
{ {
lock_range(target_range.start, target_range.length()); target_range = (dst_addr < constants::local_mem_base) ? range1 : range2;
} }
else
{
// Very unlikely
target_range = range1.get_min_max(range2);
}
if (target_range.start < constants::local_mem_base)
{
lock_range(target_range.start, target_range.length());
} }
} }
@ -71,30 +73,43 @@ namespace rsx
template <typename T = void> template <typename T = void>
void update_if_enabled(u32 addr, u32 _length, const std::add_pointer_t<T>& lock_release = std::add_pointer_t<void>{}) void update_if_enabled(u32 addr, u32 _length, const std::add_pointer_t<T>& lock_release = std::add_pointer_t<void>{})
{ {
// This check is not perfect but it covers the important cases fast (this check is only an optimization - forcing true disables it) if (!length || _length <= 1)
if (length && (this->addr / rsx_iomap_table::c_lock_stride != addr / rsx_iomap_table::c_lock_stride || (addr % rsx_iomap_table::c_lock_stride + _length) > rsx_iomap_table::c_lock_stride) && _length > 1)
{ {
if constexpr (!std::is_void_v<T>) return;
{
// See SPUThread.cpp
lock_release->release(0);
}
unlock();
lock_range(addr, _length);
} }
// This check is not perfect but it covers the important cases fast (this check is only an optimization - forcing true disables it)
const bool should_update =
(this->addr / rsx_iomap_table::c_lock_stride) != (addr / rsx_iomap_table::c_lock_stride) || // Lock-addr and test-addr have different locks, update
(addr % rsx_iomap_table::c_lock_stride + _length) > rsx_iomap_table::c_lock_stride; // Test range spills beyond our base section
if (!should_update)
{
return;
}
if constexpr (!std::is_void_v<T>)
{
// See SPUThread.cpp
lock_release->release(0);
}
unlock();
lock_range(addr, _length);
} }
void unlock(bool destructor = false) void unlock(bool destructor = false)
{ {
if (length) if (!length)
{ {
get_current_renderer()->iomap_table.unlock<IsFullLock, Stride>(addr, length); return;
}
if (!destructor) get_current_renderer()->iomap_table.unlock<IsFullLock, Stride>(addr, length);
{
length = 0; if (!destructor)
} {
length = 0;
} }
} }