1
0
mirror of https://github.com/RPCS3/rpcs3.git synced 2025-03-01 16:13:23 +00:00

SPU: simplify unimplemented event check

Move checks closer to the actual use
This commit is contained in:
Nekotekina 2018-07-02 01:02:34 +03:00
parent afd5af04f6
commit a0c0d8b993
2 changed files with 38 additions and 60 deletions

@ -1250,6 +1250,11 @@ void spu_recompiler::branch_indirect(spu_opcode_t op, bool jt, bool ret)
}
else if (op.e)
{
auto _throw = [](SPUThread* _spu)
{
fmt::throw_exception("SPU Interrupts not implemented (mask=0x%x)" HERE, +_spu->ch_event_mask);
};
Label no_intr = c->newLabel();
Label intr = c->newLabel();
Label fail = c->newLabel();
@ -1264,8 +1269,7 @@ void spu_recompiler::branch_indirect(spu_opcode_t op, bool jt, bool ret)
c->jmp(no_intr);
c->bind(fail);
c->mov(SPU_OFF_32(pc), *addr);
c->mov(addr->r64(), reinterpret_cast<u64>(vm::base(0xffdead00)));
c->mov(asmjit::x86::dword_ptr(addr->r64()), "INTR"_u32);
c->jmp(imm_ptr<void(*)(SPUThread*)>(_throw));
// Save addr in srr0 and disable interrupts
c->bind(intr);
@ -1518,10 +1522,26 @@ void spu_recompiler::get_events()
c->jmp(label2);
});
Label fail = c->newLabel();
after.emplace_back([=]
{
auto _throw = [](SPUThread* _spu)
{
fmt::throw_exception("SPU Events not implemented (mask=0x%x)" HERE, +_spu->ch_event_mask);
};
c->bind(fail);
c->jmp(imm_ptr<void(*)(SPUThread*)>(_throw));
});
// Load active events into addr
c->bind(label2);
c->mov(*addr, SPU_OFF_32(ch_event_stat));
c->and_(*addr, SPU_OFF_32(ch_event_mask));
c->mov(qw1->r32(), SPU_OFF_32(ch_event_mask));
c->test(qw1->r32(), ~SPU_EVENT_IMPLEMENTED);
c->jnz(fail);
c->and_(*addr, qw1->r32());
}
void spu_recompiler::UNK(spu_opcode_t op)
@ -2759,46 +2779,13 @@ void spu_recompiler::WRCH(spu_opcode_t op)
}
case SPU_WrEventMask:
{
Label fail = c->newLabel();
Label ret = c->newLabel();
c->mov(qw0->r32(), SPU_OFF_32(gpr, op.rt, &v128::_u32, 3));
c->mov(*addr, ~SPU_EVENT_IMPLEMENTED);
c->mov(qw1->r32(), ~SPU_EVENT_INTR_IMPLEMENTED);
c->bt(SPU_OFF_8(interrupts_enabled), 0);
c->cmovc(*addr, qw1->r32());
c->test(qw0->r32(), *addr);
c->jnz(fail);
after.emplace_back([=, pos = m_pos]
{
c->bind(fail);
c->mov(SPU_OFF_32(pc), pos);
c->mov(ls->r32(), op.ra);
c->lea(*qw1, x86::qword_ptr(ret));
c->jmp(imm_ptr(spu_wrch));
});
c->mov(SPU_OFF_32(ch_event_mask), qw0->r32());
c->bind(ret);
return;
}
case SPU_WrEventAck:
{
Label fail = c->newLabel();
Label ret = c->newLabel();
c->mov(qw0->r32(), SPU_OFF_32(gpr, op.rt, &v128::_u32, 3));
c->test(qw0->r32(), ~SPU_EVENT_IMPLEMENTED);
c->jnz(fail);
after.emplace_back([=, pos = m_pos]
{
c->bind(fail);
c->mov(SPU_OFF_32(pc), pos);
c->mov(ls->r32(), op.ra);
c->lea(*qw1, x86::qword_ptr(ret));
c->jmp(imm_ptr(spu_wrch));
});
c->not_(qw0->r32());
c->lock().and_(SPU_OFF_32(ch_event_stat), qw0->r32());
return;

@ -1442,6 +1442,13 @@ bool SPUThread::process_mfc_cmd(spu_mfc_cmd args)
u32 SPUThread::get_events(bool waiting)
{
const u32 mask1 = ch_event_mask;
if (mask1 & ~SPU_EVENT_IMPLEMENTED)
{
fmt::throw_exception("SPU Events not implemented (mask=0x%x)" HERE, mask1);
}
// Check reservation status and set SPU_EVENT_LR if lost
if (raddr && (vm::reservation_acquire(raddr, sizeof(rdata)) != rtime || rdata != vm::_ref<decltype(rdata)>(raddr)))
{
@ -1459,9 +1466,9 @@ u32 SPUThread::get_events(bool waiting)
}
// Simple polling or polling with atomically set/removed SPU_EVENT_WAITING flag
return !waiting ? ch_event_stat & ch_event_mask : ch_event_stat.atomic_op([&](u32& stat) -> u32
return !waiting ? ch_event_stat & mask1 : ch_event_stat.atomic_op([&](u32& stat) -> u32
{
if (u32 res = stat & ch_event_mask)
if (u32 res = stat & mask1)
{
stat &= ~SPU_EVENT_WAITING;
return res;
@ -1474,9 +1481,9 @@ u32 SPUThread::get_events(bool waiting)
void SPUThread::set_events(u32 mask)
{
if (u32 unimpl = mask & ~SPU_EVENT_IMPLEMENTED)
if (mask & ~SPU_EVENT_IMPLEMENTED)
{
fmt::throw_exception("Unimplemented events (0x%x)" HERE, unimpl);
fmt::throw_exception("SPU Events not implemented (mask=0x%x)" HERE, mask);
}
// Set new events, get old event mask
@ -1493,11 +1500,12 @@ void SPUThread::set_interrupt_status(bool enable)
{
if (enable)
{
// detect enabling interrupts with events masked
if (u32 mask = ch_event_mask & ~SPU_EVENT_INTR_IMPLEMENTED)
// Detect enabling interrupts with events masked
if (ch_event_mask & ~SPU_EVENT_INTR_IMPLEMENTED)
{
fmt::throw_exception("SPU Interrupts not implemented (mask=0x%x)" HERE, mask);
fmt::throw_exception("SPU Interrupts not implemented (mask=0x%x)" HERE, +ch_event_mask);
}
interrupts_enabled = true;
}
else
@ -2007,29 +2015,12 @@ bool SPUThread::set_ch_value(u32 ch, u32 value)
case SPU_WrEventMask:
{
// detect masking events with enabled interrupt status
if (value & ~SPU_EVENT_INTR_IMPLEMENTED && interrupts_enabled)
{
fmt::throw_exception("SPU Interrupts not implemented (mask=0x%x)" HERE, value);
}
// detect masking unimplemented events
if (value & ~SPU_EVENT_IMPLEMENTED)
{
break;
}
ch_event_mask = value;
return true;
}
case SPU_WrEventAck:
{
if (value & ~SPU_EVENT_IMPLEMENTED)
{
break;
}
ch_event_stat &= ~value;
return true;
}