vm: add extern clear_range_locks function

Allows to wait for range locks to clear for specified range.
vm::range_lock now monitors specified reservation lock as well.
This commit is contained in:
Nekotekina 2020-10-30 07:58:16 +03:00
parent 0da24f21d6
commit 3419d15878
4 changed files with 32 additions and 15 deletions

View File

@ -1807,6 +1807,9 @@ static bool ppu_store_reservation(ppu_thread& ppu, u32 addr, u64 reg_value)
// Align address: we do not need the lower 7 bits anymore // Align address: we do not need the lower 7 bits anymore
addr &= -128; addr &= -128;
// Wait for range locks to clear
vm::clear_range_locks(addr, 128);
// Cache line data // Cache line data
auto& cline_data = vm::_ref<spu_rdata_t>(addr); auto& cline_data = vm::_ref<spu_rdata_t>(addr);

View File

@ -1985,7 +1985,7 @@ void spu_thread::do_dma_transfer(spu_thread* _this, const spu_mfc_cmd& args, u8*
} }
// Obtain range lock as normal store // Obtain range lock as normal store
vm::range_lock(range_lock, eal, size0); vm::range_lock(res, range_lock, eal, size0);
switch (size0) switch (size0)
{ {
@ -2057,32 +2057,35 @@ void spu_thread::do_dma_transfer(spu_thread* _this, const spu_mfc_cmd& args, u8*
perf_meter<"DMA_PUT"_u64> perf2; perf_meter<"DMA_PUT"_u64> perf2;
// TODO: split range-locked stores in cache lines for consistency
auto& res = vm::reservation_acquire(eal, args.size);
switch (u32 size = args.size) switch (u32 size = args.size)
{ {
case 1: case 1:
{ {
vm::range_lock(range_lock, eal, 1); vm::range_lock(res, range_lock, eal, 1);
*reinterpret_cast<u8*>(dst) = *reinterpret_cast<const u8*>(src); *reinterpret_cast<u8*>(dst) = *reinterpret_cast<const u8*>(src);
range_lock->release(0); range_lock->release(0);
break; break;
} }
case 2: case 2:
{ {
vm::range_lock(range_lock, eal, 2); vm::range_lock(res, range_lock, eal, 2);
*reinterpret_cast<u16*>(dst) = *reinterpret_cast<const u16*>(src); *reinterpret_cast<u16*>(dst) = *reinterpret_cast<const u16*>(src);
range_lock->release(0); range_lock->release(0);
break; break;
} }
case 4: case 4:
{ {
vm::range_lock(range_lock, eal, 4); vm::range_lock(res, range_lock, eal, 4);
*reinterpret_cast<u32*>(dst) = *reinterpret_cast<const u32*>(src); *reinterpret_cast<u32*>(dst) = *reinterpret_cast<const u32*>(src);
range_lock->release(0); range_lock->release(0);
break; break;
} }
case 8: case 8:
{ {
vm::range_lock(range_lock, eal, 8); vm::range_lock(res, range_lock, eal, 8);
*reinterpret_cast<u64*>(dst) = *reinterpret_cast<const u64*>(src); *reinterpret_cast<u64*>(dst) = *reinterpret_cast<const u64*>(src);
range_lock->release(0); range_lock->release(0);
break; break;
@ -2091,7 +2094,7 @@ void spu_thread::do_dma_transfer(spu_thread* _this, const spu_mfc_cmd& args, u8*
{ {
if (((eal & 127) + size) <= 128) if (((eal & 127) + size) <= 128)
{ {
vm::range_lock(range_lock, eal, size); vm::range_lock(res, range_lock, eal, size);
while (size) while (size)
{ {
@ -2117,7 +2120,7 @@ void spu_thread::do_dma_transfer(spu_thread* _this, const spu_mfc_cmd& args, u8*
size -= size0; size -= size0;
// Split locking + transfer in two parts (before 64K border, and after it) // Split locking + transfer in two parts (before 64K border, and after it)
vm::range_lock(range_lock, range_addr, size0); vm::range_lock(res, range_lock, range_addr, size0);
// Avoid unaligned stores in mov_rdata_avx // Avoid unaligned stores in mov_rdata_avx
if (reinterpret_cast<u64>(dst) & 0x10) if (reinterpret_cast<u64>(dst) & 0x10)
@ -2151,7 +2154,7 @@ void spu_thread::do_dma_transfer(spu_thread* _this, const spu_mfc_cmd& args, u8*
range_addr = nexta; range_addr = nexta;
} }
vm::range_lock(range_lock, range_addr, range_end - range_addr); vm::range_lock(res, range_lock, range_addr, range_end - range_addr);
// Avoid unaligned stores in mov_rdata_avx // Avoid unaligned stores in mov_rdata_avx
if (reinterpret_cast<u64>(dst) & 0x10) if (reinterpret_cast<u64>(dst) & 0x10)
@ -2511,6 +2514,9 @@ bool spu_thread::do_putllc(const spu_mfc_cmd& args)
return false; return false;
} }
// Wait for range locks to clear
vm::clear_range_locks(addr, 128);
vm::_ref<atomic_t<u32>>(addr) += 0; vm::_ref<atomic_t<u32>>(addr) += 0;
auto& super_data = *vm::get_super_ptr<spu_rdata_t>(addr); auto& super_data = *vm::get_super_ptr<spu_rdata_t>(addr);

View File

@ -226,11 +226,8 @@ namespace vm
return result; return result;
} }
static void _lock_shareable_cache(u8 value, u32 addr, u32 size) void clear_range_locks(u32 addr, u32 size)
{ {
// Block new range locks
g_addr_lock = addr | u64{size} << 32;
ASSUME(size); ASSUME(size);
const auto range = utils::address_range::start_length(addr, size); const auto range = utils::address_range::start_length(addr, size);
@ -259,6 +256,14 @@ namespace vm
} }
} }
static void _lock_shareable_cache(u8 value, u32 addr, u32 size)
{
// Block new range locks
g_addr_lock = addr | u64{size} << 32;
clear_range_locks(addr, size);
}
void passive_lock(cpu_thread& cpu) void passive_lock(cpu_thread& cpu)
{ {
bool ok = true; bool ok = true;

View File

@ -24,7 +24,7 @@ namespace vm
void range_lock_internal(atomic_t<u64, 64>* range_lock, u32 begin, u32 size); void range_lock_internal(atomic_t<u64, 64>* range_lock, u32 begin, u32 size);
// Lock memory range // Lock memory range
FORCE_INLINE void range_lock(atomic_t<u64, 64>* range_lock, u32 begin, u32 size) FORCE_INLINE void range_lock(atomic_t<u64>& res, atomic_t<u64, 64>* range_lock, u32 begin, u32 size)
{ {
const u64 lock_val = g_addr_lock.load(); const u64 lock_val = g_addr_lock.load();
const u64 lock_addr = static_cast<u32>(lock_val); // -> u64 const u64 lock_addr = static_cast<u32>(lock_val); // -> u64
@ -37,14 +37,14 @@ namespace vm
addr = addr & 0xffff; addr = addr & 0xffff;
} }
if (addr + size <= lock_addr || addr >= lock_addr + lock_size) [[likely]] if ((addr + size <= lock_addr || addr >= lock_addr + lock_size) && !(res.load() & 127)) [[likely]]
{ {
// Optimistic locking // Optimistic locking
range_lock->release(begin | (u64{size} << 32)); range_lock->release(begin | (u64{size} << 32));
const u64 new_lock_val = g_addr_lock.load(); const u64 new_lock_val = g_addr_lock.load();
if (!new_lock_val || new_lock_val == lock_val) [[likely]] if ((!new_lock_val || new_lock_val == lock_val) && !(res.load() & 127)) [[likely]]
{ {
return; return;
} }
@ -56,6 +56,9 @@ namespace vm
range_lock_internal(range_lock, begin, size); range_lock_internal(range_lock, begin, size);
} }
// Wait for all range locks to release in specified range
void clear_range_locks(u32 addr, u32 size);
// Release it // Release it
void free_range_lock(atomic_t<u64, 64>*) noexcept; void free_range_lock(atomic_t<u64, 64>*) noexcept;