From c5c9ea1b214171b7c19ad0de7000d1f9bcbafdaf Mon Sep 17 00:00:00 2001 From: Eladash Date: Thu, 3 Sep 2020 17:49:13 +0300 Subject: [PATCH] SPU: Make GET's full and aligned cache line accesses atomic with Accurate DMA --- rpcs3/Emu/Cell/SPUThread.cpp | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/rpcs3/Emu/Cell/SPUThread.cpp b/rpcs3/Emu/Cell/SPUThread.cpp index 6ea2b15f73..fa03987b18 100644 --- a/rpcs3/Emu/Cell/SPUThread.cpp +++ b/rpcs3/Emu/Cell/SPUThread.cpp @@ -1370,8 +1370,8 @@ void spu_thread::do_dma_transfer(const spu_mfc_cmd& args) { const u64 time0 = vm::reservation_acquire(eal, size0); - // Ignore DMA lock bits - if (time0 & (127 & ~vm::dma_lockb)) + // Ignore DMA lock bit on incomplete cache line accesses + if (time0 & (127 - (size0 != 128 ? vm::dma_lockb : 0))) { continue; } @@ -1422,7 +1422,7 @@ void spu_thread::do_dma_transfer(const spu_mfc_cmd& args) } } - if (time0 != vm::reservation_acquire(eal, size0)) + if (time0 != vm::reservation_acquire(eal, size0) || (size0 == 128 && !cmp_rdata(*reinterpret_cast(dst), *reinterpret_cast(src)))) { continue; }