SPU: Make GET's full and aligned cache line accesses atomic with Accurate DMA

This commit is contained in:
Eladash 2020-09-03 17:49:13 +03:00 committed by Megamouse
parent 3c43d8fe05
commit c5c9ea1b21

View File

@ -1370,8 +1370,8 @@ void spu_thread::do_dma_transfer(const spu_mfc_cmd& args)
{
const u64 time0 = vm::reservation_acquire(eal, size0);
// Ignore DMA lock bits
if (time0 & (127 & ~vm::dma_lockb))
// Ignore DMA lock bit on incomplete cache line accesses
if (time0 & (127 - (size0 != 128 ? vm::dma_lockb : 0)))
{
continue;
}
@ -1422,7 +1422,7 @@ void spu_thread::do_dma_transfer(const spu_mfc_cmd& args)
}
}
if (time0 != vm::reservation_acquire(eal, size0))
if (time0 != vm::reservation_acquire(eal, size0) || (size0 == 128 && !cmp_rdata(*reinterpret_cast<decltype(spu_thread::rdata)*>(dst), *reinterpret_cast<const decltype(spu_thread::rdata)*>(src))))
{
continue;
}