diff --git a/asmjit.vcxproj b/asmjit.vcxproj
new file mode 100644
index 0000000000..80ffab87f4
--- /dev/null
+++ b/asmjit.vcxproj
@@ -0,0 +1,169 @@
+
+
+
+
+ Debug
+ Win32
+
+
+ Debug
+ x64
+
+
+ Release
+ Win32
+
+
+ Release
+ x64
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {AC40FF01-426E-4838-A317-66354CEFAE88}
+ asmjit
+
+
+
+ StaticLibrary
+ true
+ v120
+ Unicode
+
+
+ StaticLibrary
+ true
+ v120
+ Unicode
+
+
+ StaticLibrary
+ false
+ v120
+ true
+ Unicode
+
+
+ StaticLibrary
+ false
+ v120
+ true
+ Unicode
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ .\libs\$(Configuration)\
+
+
+
+
+ .\libs\$(Configuration)\
+
+
+
+
+ .\libs\$(Configuration)\
+
+
+
+
+ .\libs\$(Configuration)\
+
+
+
+
+
+ Level3
+ Disabled
+ true
+ ASMJIT_STATIC;_MBCS;%(PreprocessorDefinitions)
+
+
+ true
+
+
+
+
+ Level3
+ Disabled
+ false
+ ASMJIT_STATIC;_MBCS;%(PreprocessorDefinitions)
+
+
+ true
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ ASMJIT_STATIC;_UNICODE;UNICODE;%(PreprocessorDefinitions)
+
+
+ true
+ true
+ true
+
+
+
+
+ Level3
+ MaxSpeed
+ true
+ true
+ true
+ ASMJIT_STATIC;_UNICODE;UNICODE;%(PreprocessorDefinitions)
+
+
+ true
+ true
+ true
+
+
+
+
+
+
\ No newline at end of file
diff --git a/asmjit.vcxproj.filters b/asmjit.vcxproj.filters
new file mode 100644
index 0000000000..bddd91cf68
--- /dev/null
+++ b/asmjit.vcxproj.filters
@@ -0,0 +1,29 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/asmjit.vcxproj.user b/asmjit.vcxproj.user
new file mode 100644
index 0000000000..ef5ff2a1fa
--- /dev/null
+++ b/asmjit.vcxproj.user
@@ -0,0 +1,4 @@
+
+
+
+
\ No newline at end of file
diff --git a/rpcs3.sln b/rpcs3.sln
index e8513ead3e..ea7660aef6 100644
--- a/rpcs3.sln
+++ b/rpcs3.sln
@@ -1,6 +1,6 @@
Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio 2013
-VisualStudioVersion = 12.0.21005.1
+VisualStudioVersion = 12.0.30110.0
MinimumVisualStudioVersion = 10.0.40219.1
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "rpcs3", "rpcs3\rpcs3.vcxproj", "{70CD65B0-91D6-4FAE-9A7B-4AF55D0D1B12}"
ProjectSection(ProjectDependencies) = postProject
@@ -80,6 +80,8 @@ Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "stc", "wxWidgets\build\msw\
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "wxscintilla", "wxWidgets\build\msw\wx_vc10_wxscintilla.vcxproj", "{74827EBD-93DC-5110-BA95-3F2AB029B6B0}"
EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "asmjit", "asmjit.vcxproj", "{AC40FF01-426E-4838-A317-66354CEFAE88}"
+EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Win32 = Debug|Win32
@@ -272,6 +274,14 @@ Global
{74827EBD-93DC-5110-BA95-3F2AB029B6B0}.Release|Win32.Build.0 = Release|Win32
{74827EBD-93DC-5110-BA95-3F2AB029B6B0}.Release|x64.ActiveCfg = Release|x64
{74827EBD-93DC-5110-BA95-3F2AB029B6B0}.Release|x64.Build.0 = Release|x64
+ {AC40FF01-426E-4838-A317-66354CEFAE88}.Debug|Win32.ActiveCfg = Debug|Win32
+ {AC40FF01-426E-4838-A317-66354CEFAE88}.Debug|Win32.Build.0 = Debug|Win32
+ {AC40FF01-426E-4838-A317-66354CEFAE88}.Debug|x64.ActiveCfg = Debug|x64
+ {AC40FF01-426E-4838-A317-66354CEFAE88}.Debug|x64.Build.0 = Debug|x64
+ {AC40FF01-426E-4838-A317-66354CEFAE88}.Release|Win32.ActiveCfg = Release|Win32
+ {AC40FF01-426E-4838-A317-66354CEFAE88}.Release|Win32.Build.0 = Release|Win32
+ {AC40FF01-426E-4838-A317-66354CEFAE88}.Release|x64.ActiveCfg = Release|x64
+ {AC40FF01-426E-4838-A317-66354CEFAE88}.Release|x64.Build.0 = Release|x64
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
diff --git a/rpcs3/Emu/Cell/SPURecompiler.h b/rpcs3/Emu/Cell/SPURecompiler.h
new file mode 100644
index 0000000000..f78cdf8339
--- /dev/null
+++ b/rpcs3/Emu/Cell/SPURecompiler.h
@@ -0,0 +1,1752 @@
+#pragma once
+
+#include "Emu/Cell/SPUOpcodes.h"
+#include "Emu/Memory/Memory.h"
+#include "Emu/Cell/SPUThread.h"
+#include "Emu/SysCalls/SysCalls.h"
+
+#define ASMJIT_STATIC
+
+#include "asmjit.h"
+
+using namespace asmjit;
+using namespace asmjit::host;
+
+#define UNIMPLEMENTED() UNK(__FUNCTION__)
+
+struct SPUImmTable
+{
+ __m128i s19_to_s32[1 << 18];
+ __m128i fsmbi_mask[1 << 16];
+
+ SPUImmTable()
+ {
+ // signed numbers table
+ for (u32 i = 0; i < sizeof(s19_to_s32) / sizeof(__m128i); i++)
+ {
+ const u32 v = (i & 0x40000) ? (i | 0xfff8000) : i;
+ s19_to_s32[i].m128i_i32[0] = v;
+ s19_to_s32[i].m128i_i32[1] = v;
+ s19_to_s32[i].m128i_i32[2] = v;
+ s19_to_s32[i].m128i_i32[3] = v;
+ }
+ // FSMBI mask table
+ for (u32 i = 0; i < sizeof(fsmbi_mask) / sizeof(__m128i); i++)
+ {
+ for (u32 j = 0; j < 16; j++)
+ {
+ fsmbi_mask[i].m128i_i8[j] = ((i >> j) & 0x1) ? 0xff : 0;
+ }
+ }
+ }
+};
+
+class SPURecompiler;
+
+class SPURecompilerCore : public CPUDecoder
+{
+ SPURecompiler* m_enc;
+ SPUInterpreter* m_inter;
+ SPUThread& CPU;
+
+public:
+ JitRuntime runtime;
+ Compiler compiler;
+
+ struct SPURecEntry
+ {
+ u16 host; // absolute position of first instruction of current block
+ u16 count; // count of instructions compiled from current point (and to be checked)
+ u32 valid; // copy of valid opcode for validation
+ void* pointer; // pointer to executable memory object
+ };
+
+ SPURecEntry entry[0x10000];
+
+ SPURecompilerCore(SPUThread& cpu);
+
+ ~SPURecompilerCore();
+
+ void Compile(u16 pos);
+
+ virtual void Decode(const u32 code);
+
+ virtual u8 DecodeMemory(const u64 address);
+};
+
+#define cpu_xmm(x) oword_ptr(*cpu_var, offsetof(SPUThread, x))
+#define cpu_qword(x) qword_ptr(*cpu_var, offsetof(SPUThread, x))
+#define cpu_dword(x,...) dword_ptr(*cpu_var, __VA_ARGS__, offsetof(SPUThread, x))
+#define cpu_word(x) word_ptr(*cpu_var, offsetof(SPUThread, x))
+#define cpu_byte(x) byte_ptr(*cpu_var, offsetof(SPUThread, x))
+
+#define imm_xmm(x) oword_ptr(*imm_var, offsetof(SPUImmTable, x))
+
+class SPURecompiler : public SPUOpcodes
+{
+private:
+ SPUThread& CPU;
+ SPURecompilerCore& rec;
+ Compiler& c;
+
+public:
+ bool do_finalize;
+ GpVar* cpu_var;
+ GpVar* ls_var;
+ GpVar* imm_var;
+
+ SPURecompiler(SPUThread& cpu, SPURecompilerCore& rec) : CPU(cpu), rec(rec), c(rec.compiler)
+ {
+ }
+
+private:
+ //0 - 10
+ void STOP(u32 code)
+ {
+ UNIMPLEMENTED();
+ }
+ void LNOP()
+ {
+ UNIMPLEMENTED();
+ }
+ void SYNC(u32 Cbit)
+ {
+ UNIMPLEMENTED();
+ }
+ void DSYNC()
+ {
+ UNIMPLEMENTED();
+ }
+ void MFSPR(u32 rt, u32 sa)
+ {
+ UNIMPLEMENTED();
+ //If register is a dummy register (register labeled 0x0)
+ if(sa == 0x0)
+ {
+ CPU.GPR[rt]._u128.hi = 0x0;
+ CPU.GPR[rt]._u128.lo = 0x0;
+ }
+ else
+ {
+ CPU.GPR[rt]._u128.hi = CPU.SPR[sa]._u128.hi;
+ CPU.GPR[rt]._u128.lo = CPU.SPR[sa]._u128.lo;
+ }
+ }
+ void RDCH(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ CPU.ReadChannel(CPU.GPR[rt], ra);
+ }
+ void RCHCNT(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt].Reset();
+ CPU.GPR[rt]._u32[3] = CPU.GetChannelCount(ra);
+ }
+ void SF(u32 rt, u32 ra, u32 rb)
+ {
+ XmmVar v0(c);
+ if (ra == rb)
+ {
+ // zero
+ c.xorps(v0, v0);
+ c.movaps(cpu_xmm(GPR[rt]), v0);
+ }
+ {
+ // sub from
+ c.movdqa(v0, cpu_xmm(GPR[rb]));
+ c.psubd(v0, cpu_xmm(GPR[ra]));
+ c.movdqa(cpu_xmm(GPR[rt]), v0);
+ }
+ }
+ void OR(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[0] = CPU.GPR[ra]._u32[0] | CPU.GPR[rb]._u32[0];
+ CPU.GPR[rt]._u32[1] = CPU.GPR[ra]._u32[1] | CPU.GPR[rb]._u32[1];
+ CPU.GPR[rt]._u32[2] = CPU.GPR[ra]._u32[2] | CPU.GPR[rb]._u32[2];
+ CPU.GPR[rt]._u32[3] = CPU.GPR[ra]._u32[3] | CPU.GPR[rb]._u32[3];
+ }
+ void BG(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[0] = CPU.GPR[ra]._u32[0] > CPU.GPR[rb]._u32[0] ? 0 : 1;
+ CPU.GPR[rt]._u32[1] = CPU.GPR[ra]._u32[1] > CPU.GPR[rb]._u32[1] ? 0 : 1;
+ CPU.GPR[rt]._u32[2] = CPU.GPR[ra]._u32[2] > CPU.GPR[rb]._u32[2] ? 0 : 1;
+ CPU.GPR[rt]._u32[3] = CPU.GPR[ra]._u32[3] > CPU.GPR[rb]._u32[3] ? 0 : 1;
+ }
+ void SFH(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._u16[h] = CPU.GPR[rb]._u16[h] - CPU.GPR[ra]._u16[h];
+ }
+ void NOR(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[0] = ~(CPU.GPR[ra]._u32[0] | CPU.GPR[rb]._u32[0]);
+ CPU.GPR[rt]._u32[1] = ~(CPU.GPR[ra]._u32[1] | CPU.GPR[rb]._u32[1]);
+ CPU.GPR[rt]._u32[2] = ~(CPU.GPR[ra]._u32[2] | CPU.GPR[rb]._u32[2]);
+ CPU.GPR[rt]._u32[3] = ~(CPU.GPR[ra]._u32[3] | CPU.GPR[rb]._u32[3]);
+ }
+ void ABSDB(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int b = 0; b < 16; b++)
+ CPU.GPR[rt]._u8[b] = CPU.GPR[rb]._u8[b] > CPU.GPR[ra]._u8[b] ? CPU.GPR[rb]._u8[b] - CPU.GPR[ra]._u8[b] : CPU.GPR[ra]._u8[b] - CPU.GPR[rb]._u8[b];
+ }
+ void ROT(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[0] = (CPU.GPR[ra]._u32[0] << (CPU.GPR[rb]._u32[0] & 0x1f)) | (CPU.GPR[ra]._u32[0] >> (32 - (CPU.GPR[rb]._u32[0] & 0x1f)));
+ CPU.GPR[rt]._u32[1] = (CPU.GPR[ra]._u32[1] << (CPU.GPR[rb]._u32[1] & 0x1f)) | (CPU.GPR[ra]._u32[1] >> (32 - (CPU.GPR[rb]._u32[1] & 0x1f)));
+ CPU.GPR[rt]._u32[2] = (CPU.GPR[ra]._u32[2] << (CPU.GPR[rb]._u32[2] & 0x1f)) | (CPU.GPR[ra]._u32[2] >> (32 - (CPU.GPR[rb]._u32[2] & 0x1f)));
+ CPU.GPR[rt]._u32[3] = (CPU.GPR[ra]._u32[3] << (CPU.GPR[rb]._u32[3] & 0x1f)) | (CPU.GPR[ra]._u32[3] >> (32 - (CPU.GPR[rb]._u32[3] & 0x1f)));
+ }
+ void ROTM(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[0] = ((0 - CPU.GPR[rb]._u32[0]) % 64) < 32 ? CPU.GPR[ra]._u32[0] >> ((0 - CPU.GPR[rb]._u32[0]) % 64) : 0;
+ CPU.GPR[rt]._u32[1] = ((0 - CPU.GPR[rb]._u32[1]) % 64) < 32 ? CPU.GPR[ra]._u32[1] >> ((0 - CPU.GPR[rb]._u32[1]) % 64) : 0;
+ CPU.GPR[rt]._u32[2] = ((0 - CPU.GPR[rb]._u32[2]) % 64) < 32 ? CPU.GPR[ra]._u32[2] >> ((0 - CPU.GPR[rb]._u32[2]) % 64) : 0;
+ CPU.GPR[rt]._u32[3] = ((0 - CPU.GPR[rb]._u32[3]) % 64) < 32 ? CPU.GPR[ra]._u32[3] >> ((0 - CPU.GPR[rb]._u32[3]) % 64) : 0;
+ }
+ void ROTMA(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._i32[0] = ((0 - CPU.GPR[rb]._i32[0]) % 64) < 32 ? CPU.GPR[ra]._i32[0] >> ((0 - CPU.GPR[rb]._i32[0]) % 64) : CPU.GPR[ra]._i32[0] >> 31;
+ CPU.GPR[rt]._i32[1] = ((0 - CPU.GPR[rb]._i32[1]) % 64) < 32 ? CPU.GPR[ra]._i32[1] >> ((0 - CPU.GPR[rb]._i32[1]) % 64) : CPU.GPR[ra]._i32[1] >> 31;
+ CPU.GPR[rt]._i32[2] = ((0 - CPU.GPR[rb]._i32[2]) % 64) < 32 ? CPU.GPR[ra]._i32[2] >> ((0 - CPU.GPR[rb]._i32[2]) % 64) : CPU.GPR[ra]._i32[2] >> 31;
+ CPU.GPR[rt]._i32[3] = ((0 - CPU.GPR[rb]._i32[3]) % 64) < 32 ? CPU.GPR[ra]._i32[3] >> ((0 - CPU.GPR[rb]._i32[3]) % 64) : CPU.GPR[ra]._i32[3] >> 31;
+ }
+ void SHL(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[0] = (CPU.GPR[rb]._u32[0] & 0x3f) > 31 ? 0 : CPU.GPR[ra]._u32[0] << (CPU.GPR[rb]._u32[0] & 0x3f);
+ CPU.GPR[rt]._u32[1] = (CPU.GPR[rb]._u32[1] & 0x3f) > 31 ? 0 : CPU.GPR[ra]._u32[1] << (CPU.GPR[rb]._u32[1] & 0x3f);
+ CPU.GPR[rt]._u32[2] = (CPU.GPR[rb]._u32[2] & 0x3f) > 31 ? 0 : CPU.GPR[ra]._u32[2] << (CPU.GPR[rb]._u32[2] & 0x3f);
+ CPU.GPR[rt]._u32[3] = (CPU.GPR[rb]._u32[3] & 0x3f) > 31 ? 0 : CPU.GPR[ra]._u32[3] << (CPU.GPR[rb]._u32[3] & 0x3f);
+ }
+ void ROTH(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._u16[h] = (CPU.GPR[ra]._u16[h] << (CPU.GPR[rb]._u16[h] & 0xf)) | (CPU.GPR[ra]._u16[h] >> (16 - (CPU.GPR[rb]._u16[h] & 0xf)));
+ }
+ void ROTHM(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._u16[h] = ((0 - CPU.GPR[rb]._u16[h]) % 32) < 16 ? CPU.GPR[ra]._u16[h] >> ((0 - CPU.GPR[rb]._u16[h]) % 32) : 0;
+ }
+ void ROTMAH(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._i16[h] = ((0 - CPU.GPR[rb]._i16[h]) % 32) < 16 ? CPU.GPR[ra]._i16[h] >> ((0 - CPU.GPR[rb]._i16[h]) % 32) : CPU.GPR[ra]._i16[h] >> 15;
+ }
+ void SHLH(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._u16[h] = (CPU.GPR[rb]._u16[h] & 0x1f) > 15 ? 0 : CPU.GPR[ra]._u16[h] << (CPU.GPR[rb]._u16[h] & 0x1f);
+ }
+ void ROTI(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int nRot = i7 & 0x1f;
+ CPU.GPR[rt]._u32[0] = (CPU.GPR[ra]._u32[0] << nRot) | (CPU.GPR[ra]._u32[0] >> (32 - nRot));
+ CPU.GPR[rt]._u32[1] = (CPU.GPR[ra]._u32[1] << nRot) | (CPU.GPR[ra]._u32[1] >> (32 - nRot));
+ CPU.GPR[rt]._u32[2] = (CPU.GPR[ra]._u32[2] << nRot) | (CPU.GPR[ra]._u32[2] >> (32 - nRot));
+ CPU.GPR[rt]._u32[3] = (CPU.GPR[ra]._u32[3] << nRot) | (CPU.GPR[ra]._u32[3] >> (32 - nRot));
+ }
+ void ROTMI(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int nRot = (0 - i7) % 64;
+ CPU.GPR[rt]._u32[0] = nRot < 32 ? CPU.GPR[ra]._u32[0] >> nRot : 0;
+ CPU.GPR[rt]._u32[1] = nRot < 32 ? CPU.GPR[ra]._u32[1] >> nRot : 0;
+ CPU.GPR[rt]._u32[2] = nRot < 32 ? CPU.GPR[ra]._u32[2] >> nRot : 0;
+ CPU.GPR[rt]._u32[3] = nRot < 32 ? CPU.GPR[ra]._u32[3] >> nRot : 0;
+ }
+ void ROTMAI(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int nRot = (0 - i7) % 64;
+ CPU.GPR[rt]._i32[0] = nRot < 32 ? CPU.GPR[ra]._i32[0] >> nRot : CPU.GPR[ra]._i32[0] >> 31;
+ CPU.GPR[rt]._i32[1] = nRot < 32 ? CPU.GPR[ra]._i32[1] >> nRot : CPU.GPR[ra]._i32[1] >> 31;
+ CPU.GPR[rt]._i32[2] = nRot < 32 ? CPU.GPR[ra]._i32[2] >> nRot : CPU.GPR[ra]._i32[2] >> 31;
+ CPU.GPR[rt]._i32[3] = nRot < 32 ? CPU.GPR[ra]._i32[3] >> nRot : CPU.GPR[ra]._i32[3] >> 31;
+ }
+ void SHLI(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const u32 s = i7 & 0x3f;
+
+ for (u32 j = 0; j < 4; ++j)
+ CPU.GPR[rt]._u32[j] = CPU.GPR[ra]._u32[j] << s;
+ }
+ void ROTHI(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int nRot = i7 & 0xf;
+
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._u16[h] = (CPU.GPR[ra]._u16[h] << nRot) | (CPU.GPR[ra]._u16[h] >> (16 - nRot));
+ }
+ void ROTHMI(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int nRot = (0 - i7) % 32;
+
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._u16[h] = nRot < 16 ? CPU.GPR[ra]._u16[h] >> nRot : 0;
+ }
+ void ROTMAHI(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int nRot = (0 - i7) % 32;
+
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._i16[h] = nRot < 16 ? CPU.GPR[ra]._i16[h] >> nRot : CPU.GPR[ra]._i16[h] >> 15;
+ }
+ void SHLHI(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int nRot = i7 & 0x1f;
+
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._u16[0] = nRot > 15 ? 0 : CPU.GPR[ra]._u16[0] << nRot;
+ }
+ void A(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[0] = CPU.GPR[ra]._u32[0] + CPU.GPR[rb]._u32[0];
+ CPU.GPR[rt]._u32[1] = CPU.GPR[ra]._u32[1] + CPU.GPR[rb]._u32[1];
+ CPU.GPR[rt]._u32[2] = CPU.GPR[ra]._u32[2] + CPU.GPR[rb]._u32[2];
+ CPU.GPR[rt]._u32[3] = CPU.GPR[ra]._u32[3] + CPU.GPR[rb]._u32[3];
+ }
+ void AND(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[0] = CPU.GPR[ra]._u32[0] & CPU.GPR[rb]._u32[0];
+ CPU.GPR[rt]._u32[1] = CPU.GPR[ra]._u32[1] & CPU.GPR[rb]._u32[1];
+ CPU.GPR[rt]._u32[2] = CPU.GPR[ra]._u32[2] & CPU.GPR[rb]._u32[2];
+ CPU.GPR[rt]._u32[3] = CPU.GPR[ra]._u32[3] & CPU.GPR[rb]._u32[3];
+ }
+ void CG(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[0] = ((CPU.GPR[ra]._u32[0] + CPU.GPR[rb]._u32[0]) < CPU.GPR[ra]._u32[0]) ? 1 : 0;
+ CPU.GPR[rt]._u32[1] = ((CPU.GPR[ra]._u32[1] + CPU.GPR[rb]._u32[1]) < CPU.GPR[ra]._u32[1]) ? 1 : 0;
+ CPU.GPR[rt]._u32[2] = ((CPU.GPR[ra]._u32[2] + CPU.GPR[rb]._u32[2]) < CPU.GPR[ra]._u32[2]) ? 1 : 0;
+ CPU.GPR[rt]._u32[3] = ((CPU.GPR[ra]._u32[3] + CPU.GPR[rb]._u32[3]) < CPU.GPR[ra]._u32[3]) ? 1 : 0;
+ }
+ void AH(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._u16[h] = CPU.GPR[ra]._u16[h] + CPU.GPR[rb]._u16[h];
+ }
+ void NAND(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[0] = ~(CPU.GPR[ra]._u32[0] & CPU.GPR[rb]._u32[0]);
+ CPU.GPR[rt]._u32[1] = ~(CPU.GPR[ra]._u32[1] & CPU.GPR[rb]._u32[1]);
+ CPU.GPR[rt]._u32[2] = ~(CPU.GPR[ra]._u32[2] & CPU.GPR[rb]._u32[2]);
+ CPU.GPR[rt]._u32[3] = ~(CPU.GPR[ra]._u32[3] & CPU.GPR[rb]._u32[3]);
+ }
+ void AVGB(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int b = 0; b < 16; b++)
+ CPU.GPR[rt]._u8[b] = (CPU.GPR[ra]._u8[b] + CPU.GPR[rb]._u8[b] + 1) >> 1;
+ }
+ void MTSPR(u32 rt, u32 sa)
+ {
+ UNIMPLEMENTED();
+ if(sa != 0)
+ {
+ CPU.SPR[sa]._u128.hi = CPU.GPR[rt]._u128.hi;
+ CPU.SPR[sa]._u128.lo = CPU.GPR[rt]._u128.lo;
+ }
+ }
+ void WRCH(u32 ra, u32 rt)
+ {
+ UNIMPLEMENTED();
+ CPU.WriteChannel(ra, CPU.GPR[rt]);
+ }
+ void BIZ(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ if(CPU.GPR[rt]._u32[3] == 0)
+ CPU.SetBranch(branchTarget(CPU.GPR[ra]._u32[3], 0));
+ }
+ void BINZ(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ if(CPU.GPR[rt]._u32[3] != 0)
+ CPU.SetBranch(branchTarget(CPU.GPR[ra]._u32[3], 0));
+ }
+ void BIHZ(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ if(CPU.GPR[rt]._u16[6] == 0)
+ CPU.SetBranch(branchTarget(CPU.GPR[ra]._u32[3], 0));
+ }
+ void BIHNZ(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ if(CPU.GPR[rt]._u16[6] != 0)
+ CPU.SetBranch(branchTarget(CPU.GPR[ra]._u32[3], 0));
+ }
+ void STOPD(u32 rc, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ Emu.Pause();
+ }
+ void STQX(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ u32 lsa = (CPU.GPR[ra]._u32[3] + CPU.GPR[rb]._u32[3]) & 0x3fff0;
+ if(!CPU.IsGoodLSA(lsa))
+ {
+ ConLog.Error("STQX: bad lsa (0x%x)", lsa);
+ Emu.Pause();
+ return;
+ }
+
+ CPU.WriteLS128(lsa, CPU.GPR[rt]._u128);
+ }
+ void BI(u32 ra)
+ {
+ UNIMPLEMENTED();
+ CPU.SetBranch(branchTarget(CPU.GPR[ra]._u32[3], 0));
+ }
+ void BISL(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ const u32 NewPC = CPU.GPR[ra]._u32[3];
+ CPU.GPR[rt].Reset();
+ CPU.GPR[rt]._u32[3] = CPU.PC + 4;
+ CPU.SetBranch(branchTarget(NewPC, 0));
+ }
+ void IRET(u32 ra)
+ {
+ UNIMPLEMENTED();
+ //SetBranch(SRR0);
+ }
+ void BISLED(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ }
+ void HBR(u32 p, u32 ro, u32 ra)
+ {
+ UNIMPLEMENTED();
+ }
+ void GB(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[3] = (CPU.GPR[ra]._u32[0] & 1) |
+ ((CPU.GPR[ra]._u32[1] & 1) << 1) |
+ ((CPU.GPR[ra]._u32[2] & 1) << 2) |
+ ((CPU.GPR[ra]._u32[3] & 1) << 3);
+ CPU.GPR[rt]._u32[2] = 0;
+ CPU.GPR[rt]._u64[0] = 0;
+ }
+ void GBH(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ u32 temp = 0;
+ for (int h = 0; h < 8; h++)
+ temp |= (CPU.GPR[ra]._u16[h] & 1) << h;
+ CPU.GPR[rt]._u32[3] = temp;
+ CPU.GPR[rt]._u32[2] = 0;
+ CPU.GPR[rt]._u64[0] = 0;
+ }
+ void GBB(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ u32 temp = 0;
+ for (int b = 0; b < 16; b++)
+ temp |= (CPU.GPR[ra]._u8[b] & 1) << b;
+ CPU.GPR[rt]._u32[3] = temp;
+ CPU.GPR[rt]._u32[2] = 0;
+ CPU.GPR[rt]._u64[0] = 0;
+ }
+ void FSM(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ const u32 pref = CPU.GPR[ra]._u32[3];
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._u32[w] = (pref & (1 << w)) ? ~0 : 0;
+ }
+ void FSMH(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ const u32 pref = CPU.GPR[ra]._u32[3];
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._u16[h] = (pref & (1 << h)) ? ~0 : 0;
+ }
+ void FSMB(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ const u32 pref = CPU.GPR[ra]._u32[3];
+ for (int b = 0; b < 16; b++)
+ CPU.GPR[rt]._u8[b] = (pref & (1 << b)) ? ~0 : 0;
+ }
+ void FREST(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ //CPU.GPR[rt]._m128 = _mm_rcp_ps(CPU.GPR[ra]._m128);
+ for (int i = 0; i < 4; i++)
+ CPU.GPR[rt]._f[i] = 1 / CPU.GPR[ra]._f[i];
+ }
+ void FRSQEST(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ //const __u32x4 FloatAbsMask = {0x7fffffff, 0x7fffffff, 0x7fffffff, 0x7fffffff};
+ //CPU.GPR[rt]._m128 = _mm_rsqrt_ps(_mm_and_ps(CPU.GPR[ra]._m128, FloatAbsMask.m128));
+ for (int i = 0; i < 4; i++)
+ CPU.GPR[rt]._f[i] = 1 / sqrt(abs(CPU.GPR[ra]._f[i]));
+ }
+ void LQX(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ u32 a = CPU.GPR[ra]._u32[3], b = CPU.GPR[rb]._u32[3];
+
+ u32 lsa = (a + b) & 0x3fff0;
+
+ if(!CPU.IsGoodLSA(lsa))
+ {
+ ConLog.Error("LQX: bad lsa (0x%x)", lsa);
+ Emu.Pause();
+ return;
+ }
+
+ CPU.GPR[rt]._u128 = CPU.ReadLS128(lsa);
+ }
+ void ROTQBYBI(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ const int s = (CPU.GPR[rb]._u32[3] >> 3) & 0xf;
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ for (int b = 0; b < 16; b++)
+ CPU.GPR[rt]._u8[b] = temp._u8[(b - s) & 0xf];
+ }
+ void ROTQMBYBI(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ const int s = (0 - (CPU.GPR[rb]._u32[3] >> 3)) & 0x1f;
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ CPU.GPR[rt].Reset();
+ for (int b = 0; b < 16 - s; b++)
+ CPU.GPR[rt]._u8[b] = temp._u8[b + s];
+ }
+ void SHLQBYBI(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ const int s = (CPU.GPR[rb]._u32[3] >> 3) & 0x1f;
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ CPU.GPR[rt].Reset();
+ for (int b = s; b < 16; b++)
+ CPU.GPR[rt]._u8[b] = temp._u8[b - s];
+ }
+ void CBX(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ const u32 t = (CPU.GPR[rb]._u32[3] + CPU.GPR[ra]._u32[3]) & 0xF;
+
+ CPU.GPR[rt]._u64[0] = (u64)0x18191A1B1C1D1E1F;
+ CPU.GPR[rt]._u64[1] = (u64)0x1011121314151617;
+ CPU.GPR[rt]._u8[15 - t] = 0x03;
+ }
+ void CHX(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ const u32 t = (CPU.GPR[rb]._u32[3] + CPU.GPR[ra]._u32[3]) & 0xE;
+
+ CPU.GPR[rt]._u64[0] = (u64)0x18191A1B1C1D1E1F;
+ CPU.GPR[rt]._u64[1] = (u64)0x1011121314151617;
+ CPU.GPR[rt]._u16[7 - (t >> 1)] = 0x0203;
+ }
+ void CWX(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ const u32 t = (CPU.GPR[ra]._u32[3] + CPU.GPR[rb]._u32[3]) & 0xC;
+
+ CPU.GPR[rt]._u64[0] = (u64)0x18191A1B1C1D1E1F;
+ CPU.GPR[rt]._u64[1] = (u64)0x1011121314151617;
+ CPU.GPR[rt]._u32[3 - (t >> 2)] = 0x00010203;
+ }
+ void CDX(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ const u32 t = (CPU.GPR[rb]._u32[3] + CPU.GPR[ra]._u32[3]) & 0x8;
+
+ CPU.GPR[rt]._u64[0] = (u64)0x18191A1B1C1D1E1F;
+ CPU.GPR[rt]._u64[1] = (u64)0x1011121314151617;
+ CPU.GPR[rt]._u64[1 - (t >> 3)] = (u64)0x0001020304050607;
+ }
+ void ROTQBI(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ const int t = CPU.GPR[rb]._u32[3] & 0x7;
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ CPU.GPR[rt]._u32[0] = (temp._u32[0] << t) | (temp._u32[3] >> (32 - t));
+ CPU.GPR[rt]._u32[1] = (temp._u32[1] << t) | (temp._u32[0] >> (32 - t));
+ CPU.GPR[rt]._u32[2] = (temp._u32[2] << t) | (temp._u32[1] >> (32 - t));
+ CPU.GPR[rt]._u32[3] = (temp._u32[3] << t) | (temp._u32[2] >> (32 - t));
+ }
+ void ROTQMBI(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ const int t = (0 - CPU.GPR[rb]._u32[3]) & 0x7;
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ CPU.GPR[rt]._u32[0] = (temp._u32[0] >> t) | (temp._u32[1] << (32 - t));
+ CPU.GPR[rt]._u32[1] = (temp._u32[1] >> t) | (temp._u32[2] << (32 - t));
+ CPU.GPR[rt]._u32[2] = (temp._u32[2] >> t) | (temp._u32[3] << (32 - t));
+ CPU.GPR[rt]._u32[3] = (temp._u32[3] >> t);
+ }
+ void SHLQBI(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ const int t = CPU.GPR[rb]._u32[3] & 0x7;
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ CPU.GPR[rt]._u32[0] = (temp._u32[0] << t);
+ CPU.GPR[rt]._u32[1] = (temp._u32[1] << t) | (temp._u32[0] >> (32 - t));
+ CPU.GPR[rt]._u32[2] = (temp._u32[2] << t) | (temp._u32[1] >> (32 - t));
+ CPU.GPR[rt]._u32[3] = (temp._u32[3] << t) | (temp._u32[2] >> (32 - t));
+ }
+ void ROTQBY(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ const int s = CPU.GPR[rb]._u32[3] & 0xf;
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ for (int b = 0; b < 16; ++b)
+ CPU.GPR[rt]._u8[b] = temp._u8[(b - s) & 0xf];
+ }
+ void ROTQMBY(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ const int s = (0 - CPU.GPR[rb]._u32[3]) & 0x1f;
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ CPU.GPR[rt].Reset();
+ for (int b = 0; b < 16 - s; b++)
+ CPU.GPR[rt]._u8[b] = temp._u8[b + s];
+ }
+ void SHLQBY(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ const int s = CPU.GPR[rb]._u32[3] & 0x1f;
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ CPU.GPR[rt].Reset();
+ for (int b = s; b < 16; b++)
+ CPU.GPR[rt]._u8[b] = temp._u8[b - s];
+ }
+ void ORX(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[3] = CPU.GPR[ra]._u32[0] | CPU.GPR[ra]._u32[1] | CPU.GPR[ra]._u32[2] | CPU.GPR[ra]._u32[3];
+ CPU.GPR[rt]._u32[2] = 0;
+ CPU.GPR[rt]._u64[0] = 0;
+ }
+ void CBD(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int t = (CPU.GPR[ra]._u32[3] + i7) & 0xF;
+
+ CPU.GPR[rt]._u64[0] = (u64)0x18191A1B1C1D1E1F;
+ CPU.GPR[rt]._u64[1] = (u64)0x1011121314151617;
+ CPU.GPR[rt]._u8[15 - t] = 0x03;
+ }
+ void CHD(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int t = (CPU.GPR[ra]._u32[3] + i7) & 0xE;
+
+ CPU.GPR[rt]._u64[0] = (u64)0x18191A1B1C1D1E1F;
+ CPU.GPR[rt]._u64[1] = (u64)0x1011121314151617;
+ CPU.GPR[rt]._u16[7 - (t >> 1)] = 0x0203;
+ }
+ void CWD(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int t = (CPU.GPR[ra]._u32[3] + i7) & 0xC;
+
+ CPU.GPR[rt]._u64[0] = (u64)0x18191A1B1C1D1E1F;
+ CPU.GPR[rt]._u64[1] = (u64)0x1011121314151617;
+ CPU.GPR[rt]._u32[3 - (t >> 2)] = 0x00010203;
+ }
+ void CDD(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int t = (CPU.GPR[ra]._u32[3] + i7) & 0x8;
+
+ CPU.GPR[rt]._u64[0] = (u64)0x18191A1B1C1D1E1F;
+ CPU.GPR[rt]._u64[1] = (u64)0x1011121314151617;
+ CPU.GPR[rt]._u64[1 - (t >> 3)] = (u64)0x0001020304050607;
+ }
+ void ROTQBII(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int s = i7 & 0x7;
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ CPU.GPR[rt]._u32[0] = (temp._u32[0] << s) | (temp._u32[3] >> (32 - s));
+ CPU.GPR[rt]._u32[1] = (temp._u32[1] << s) | (temp._u32[0] >> (32 - s));
+ CPU.GPR[rt]._u32[2] = (temp._u32[2] << s) | (temp._u32[1] >> (32 - s));
+ CPU.GPR[rt]._u32[3] = (temp._u32[3] << s) | (temp._u32[2] >> (32 - s));
+ }
+ void ROTQMBII(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int s = (0 - i7) & 0x7;
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ CPU.GPR[rt]._u32[0] = (temp._u32[0] >> s) | (temp._u32[1] << (32 - s));
+ CPU.GPR[rt]._u32[1] = (temp._u32[1] >> s) | (temp._u32[2] << (32 - s));
+ CPU.GPR[rt]._u32[2] = (temp._u32[2] >> s) | (temp._u32[3] << (32 - s));
+ CPU.GPR[rt]._u32[3] = (temp._u32[3] >> s);
+ }
+ void SHLQBII(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int s = i7 & 0x7;
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ CPU.GPR[rt]._u32[0] = (temp._u32[0] << s);
+ CPU.GPR[rt]._u32[1] = (temp._u32[1] << s) | (temp._u32[0] >> (32 - s));
+ CPU.GPR[rt]._u32[2] = (temp._u32[2] << s) | (temp._u32[1] >> (32 - s));
+ CPU.GPR[rt]._u32[3] = (temp._u32[3] << s) | (temp._u32[2] >> (32 - s));
+ }
+ void ROTQBYI(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int s = i7 & 0xf;
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ for (int b = 0; b < 16; b++)
+ CPU.GPR[rt]._u8[b] = temp._u8[(b - s) & 0xf];
+ }
+ void ROTQMBYI(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const int s = (0 - i7) & 0x1f;
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ CPU.GPR[rt].Reset();
+ for (int b = 0; b < 16 - s; b++)
+ CPU.GPR[rt]._u8[b] = temp._u8[b + s];
+ }
+ void SHLQBYI(u32 rt, u32 ra, s32 i7)
+ {
+ const int s = i7 & 0x1f;
+ XmmVar v0(c);
+ if (s == 0)
+ {
+ if (ra == rt)
+ {
+ // nop
+ }
+ else
+ {
+ // mov
+ c.movaps(v0, cpu_xmm(GPR[ra]));
+ c.movaps(cpu_xmm(GPR[rt]), v0);
+ }
+ }
+ else if (s > 15)
+ {
+ // zero
+ c.xorps(v0, v0);
+ c.movaps(cpu_xmm(GPR[rt]), v0);
+ }
+ else
+ {
+ // shift left
+ c.movdqa(v0, cpu_xmm(GPR[ra]));
+ c.pslldq(v0, s);
+ c.movdqa(cpu_xmm(GPR[rt]), v0);
+ }
+ }
+ void NOP(u32 rt)
+ {
+ UNIMPLEMENTED();
+ }
+ void CGT(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._u32[w] = CPU.GPR[ra]._i32[w] > CPU.GPR[rb]._i32[w] ? 0xffffffff : 0;
+ }
+ void XOR(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._u32[w] = CPU.GPR[ra]._u32[w] ^ CPU.GPR[rb]._u32[w];
+ }
+ void CGTH(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._u16[h] = CPU.GPR[ra]._i16[h] > CPU.GPR[rb]._i16[h] ? 0xffff : 0;
+ }
+ void EQV(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._u32[w] = CPU.GPR[ra]._u32[w] ^ (~CPU.GPR[rb]._u32[w]);
+ }
+ void CGTB(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int b = 0; b < 16; b++)
+ CPU.GPR[rt]._u8[b] = CPU.GPR[ra]._i8[b] > CPU.GPR[rb]._i8[b] ? 0xff : 0;
+ }
+ void SUMB(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ const SPU_GPR_hdr _a = CPU.GPR[ra];
+ const SPU_GPR_hdr _b = CPU.GPR[rb];
+ for (int w = 0; w < 4; w++)
+ {
+ CPU.GPR[rt]._u16[w*2] = _a._u8[w*4] + _a._u8[w*4 + 1] + _a._u8[w*4 + 2] + _a._u8[w*4 + 3];
+ CPU.GPR[rt]._u16[w*2 + 1] = _b._u8[w*4] + _b._u8[w*4 + 1] + _b._u8[w*4 + 2] + _b._u8[w*4 + 3];
+ }
+ }
+ //HGT uses signed values. HLGT uses unsigned values
+ void HGT(u32 rt, s32 ra, s32 rb)
+ {
+ UNIMPLEMENTED();
+ if(CPU.GPR[ra]._i32[3] > CPU.GPR[rb]._i32[3]) CPU.Stop();
+ }
+ void CLZ(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ {
+ int nPos;
+
+ for (nPos = 0; nPos < 32; nPos++)
+ if (CPU.GPR[ra]._u32[w] & (1 << (31 - nPos)))
+ break;
+
+ CPU.GPR[rt]._u32[w] = nPos;
+ }
+ }
+ void XSWD(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._i64[0] = (s64)CPU.GPR[ra]._i32[0];
+ CPU.GPR[rt]._i64[1] = (s64)CPU.GPR[ra]._i32[2];
+ }
+ void XSHW(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._i32[w] = (s32)CPU.GPR[ra]._i16[w*2];
+ }
+ void CNTB(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ CPU.GPR[rt].Reset();
+ for (int b = 0; b < 16; b++)
+ for (int i = 0; i < 8; i++)
+ CPU.GPR[rt]._u8[b] += (temp._u8[b] & (1 << i)) ? 1 : 0;
+ }
+ void XSBH(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._i16[h] = (s16)CPU.GPR[ra]._i8[h*2];
+ }
+ void CLGT(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for(u32 i = 0; i < 4; ++i)
+ {
+ CPU.GPR[rt]._u32[i] = (CPU.GPR[ra]._u32[i] > CPU.GPR[rb]._u32[i]) ? 0xffffffff : 0x00000000;
+ }
+ }
+ void ANDC(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._u32[w] = CPU.GPR[ra]._u32[w] & (~CPU.GPR[rb]._u32[w]);
+ }
+ void FCGT(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[0] = CPU.GPR[ra]._f[0] > CPU.GPR[rb]._f[0] ? 0xffffffff : 0;
+ CPU.GPR[rt]._u32[1] = CPU.GPR[ra]._f[1] > CPU.GPR[rb]._f[1] ? 0xffffffff : 0;
+ CPU.GPR[rt]._u32[2] = CPU.GPR[ra]._f[2] > CPU.GPR[rb]._f[2] ? 0xffffffff : 0;
+ CPU.GPR[rt]._u32[3] = CPU.GPR[ra]._f[3] > CPU.GPR[rb]._f[3] ? 0xffffffff : 0;
+ }
+ void DFCGT(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u64[0] = CPU.GPR[ra]._d[0] > CPU.GPR[rb]._d[0] ? 0xffffffffffffffff : 0;
+ CPU.GPR[rt]._u64[1] = CPU.GPR[ra]._d[1] > CPU.GPR[rb]._d[1] ? 0xffffffffffffffff : 0;
+ }
+ void FA(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._f[0] = CPU.GPR[ra]._f[0] + CPU.GPR[rb]._f[0];
+ CPU.GPR[rt]._f[1] = CPU.GPR[ra]._f[1] + CPU.GPR[rb]._f[1];
+ CPU.GPR[rt]._f[2] = CPU.GPR[ra]._f[2] + CPU.GPR[rb]._f[2];
+ CPU.GPR[rt]._f[3] = CPU.GPR[ra]._f[3] + CPU.GPR[rb]._f[3];
+ }
+ void FS(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._f[0] = CPU.GPR[ra]._f[0] - CPU.GPR[rb]._f[0];
+ CPU.GPR[rt]._f[1] = CPU.GPR[ra]._f[1] - CPU.GPR[rb]._f[1];
+ CPU.GPR[rt]._f[2] = CPU.GPR[ra]._f[2] - CPU.GPR[rb]._f[2];
+ CPU.GPR[rt]._f[3] = CPU.GPR[ra]._f[3] - CPU.GPR[rb]._f[3];
+ }
+ void FM(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._f[0] = CPU.GPR[ra]._f[0] * CPU.GPR[rb]._f[0];
+ CPU.GPR[rt]._f[1] = CPU.GPR[ra]._f[1] * CPU.GPR[rb]._f[1];
+ CPU.GPR[rt]._f[2] = CPU.GPR[ra]._f[2] * CPU.GPR[rb]._f[2];
+ CPU.GPR[rt]._f[3] = CPU.GPR[ra]._f[3] * CPU.GPR[rb]._f[3];
+ }
+ void CLGTH(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._u16[h] = CPU.GPR[ra]._u16[h] > CPU.GPR[rb]._u16[h] ? 0xffff : 0;
+ }
+ void ORC(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._u32[w] = CPU.GPR[ra]._u32[w] | (~CPU.GPR[rb]._u32[w]);
+ }
+ void FCMGT(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[0] = fabs(CPU.GPR[ra]._f[0]) > fabs(CPU.GPR[rb]._f[0]) ? 0xffffffff : 0;
+ CPU.GPR[rt]._u32[1] = fabs(CPU.GPR[ra]._f[1]) > fabs(CPU.GPR[rb]._f[1]) ? 0xffffffff : 0;
+ CPU.GPR[rt]._u32[2] = fabs(CPU.GPR[ra]._f[2]) > fabs(CPU.GPR[rb]._f[2]) ? 0xffffffff : 0;
+ CPU.GPR[rt]._u32[3] = fabs(CPU.GPR[ra]._f[3]) > fabs(CPU.GPR[rb]._f[3]) ? 0xffffffff : 0;
+ }
+ void DFCMGT(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u64[0] = fabs(CPU.GPR[ra]._d[0]) > fabs(CPU.GPR[rb]._d[0]) ? 0xffffffffffffffff : 0;
+ CPU.GPR[rt]._u64[1] = fabs(CPU.GPR[ra]._d[1]) > fabs(CPU.GPR[rb]._d[1]) ? 0xffffffffffffffff : 0;
+ }
+ void DFA(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._d[0] = CPU.GPR[ra]._d[0] + CPU.GPR[rb]._d[0];
+ CPU.GPR[rt]._d[1] = CPU.GPR[ra]._d[1] + CPU.GPR[rb]._d[1];
+ }
+ void DFS(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._d[0] = CPU.GPR[ra]._d[0] - CPU.GPR[rb]._d[0];
+ CPU.GPR[rt]._d[1] = CPU.GPR[ra]._d[1] - CPU.GPR[rb]._d[1];
+ }
+ void DFM(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._d[0] = CPU.GPR[ra]._d[0] * CPU.GPR[rb]._d[0];
+ CPU.GPR[rt]._d[1] = CPU.GPR[ra]._d[1] * CPU.GPR[rb]._d[1];
+ }
+ void CLGTB(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int b = 0; b < 16; b++)
+ CPU.GPR[rt]._u8[b] = CPU.GPR[ra]._u8[b] > CPU.GPR[rb]._u8[b] ? 0xff : 0;
+ }
+ void HLGT(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ if(CPU.GPR[ra]._u32[3] > CPU.GPR[rb]._u32[3]) CPU.Stop();
+ }
+ void DFMA(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._d[0] += CPU.GPR[ra]._d[0] * CPU.GPR[rb]._d[0];
+ CPU.GPR[rt]._d[1] += CPU.GPR[ra]._d[1] * CPU.GPR[rb]._d[1];
+ }
+ void DFMS(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._d[0] = CPU.GPR[ra]._d[0] * CPU.GPR[rb]._d[0] - CPU.GPR[rt]._d[0];
+ CPU.GPR[rt]._d[1] = CPU.GPR[ra]._d[1] * CPU.GPR[rb]._d[1] - CPU.GPR[rt]._d[1];
+ }
+ void DFNMS(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._d[0] -= CPU.GPR[ra]._d[0] * CPU.GPR[rb]._d[0];
+ CPU.GPR[rt]._d[1] -= CPU.GPR[ra]._d[1] * CPU.GPR[rb]._d[1];
+ }
+ void DFNMA(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._d[0] = -(CPU.GPR[ra]._d[0] * CPU.GPR[rb]._d[0] + CPU.GPR[rt]._d[0]);
+ CPU.GPR[rt]._d[1] = -(CPU.GPR[ra]._d[1] * CPU.GPR[rb]._d[1] + CPU.GPR[rt]._d[1]);
+ }
+ void CEQ(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._u32[w] = CPU.GPR[ra]._i32[w] == CPU.GPR[rb]._i32[w] ? 0xffffffff : 0;
+ }
+ void MPYHHU(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._u32[w] = CPU.GPR[ra]._u16[w*2+1] * CPU.GPR[rb]._u16[w*2+1];
+ }
+ void ADDX(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._u32[w] = CPU.GPR[ra]._u32[w] + CPU.GPR[rb]._u32[w] + (CPU.GPR[rt]._u32[w] & 1);
+ }
+ void SFX(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._u32[w] = CPU.GPR[rb]._u32[w] - CPU.GPR[ra]._u32[w] - (1 - (CPU.GPR[rt]._u32[w] & 1));
+ }
+ void CGX(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._u32[w] = ((u64)CPU.GPR[ra]._u32[w] + (u64)CPU.GPR[rb]._u32[w] + (u64)(CPU.GPR[rt]._u32[w] & 1)) >> 32;
+ }
+ void BGX(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ s64 nResult;
+
+ for (int w = 0; w < 4; w++)
+ {
+ nResult = (u64)CPU.GPR[rb]._u32[w] - (u64)CPU.GPR[ra]._u32[w] - (u64)(1 - (CPU.GPR[rt]._u32[w] & 1));
+ CPU.GPR[rt]._u32[w] = nResult < 0 ? 0 : 1;
+ }
+ }
+ void MPYHHA(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._i32[w] += CPU.GPR[ra]._i16[w*2+1] * CPU.GPR[rb]._i16[w*2+1];
+ }
+ void MPYHHAU(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._u32[w] += CPU.GPR[ra]._u16[w*2+1] * CPU.GPR[rb]._u16[w*2+1];
+ }
+ //Forced bits to 0, hence the shift:
+
+ void FSCRRD(u32 rt)
+ {
+ /*CPU.GPR[rt]._u128.lo =
+ CPU.FPSCR.Exception0 << 20 &
+ CPU.FPSCR.*/
+ UNIMPLEMENTED();
+ }
+ void FESD(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._d[0] = (double)CPU.GPR[ra]._f[1];
+ CPU.GPR[rt]._d[1] = (double)CPU.GPR[ra]._f[3];
+ }
+ void FRDS(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._f[1] = (float)CPU.GPR[ra]._d[0];
+ CPU.GPR[rt]._u32[0] = 0x00000000;
+ CPU.GPR[rt]._f[3] = (float)CPU.GPR[ra]._d[1];
+ CPU.GPR[rt]._u32[2] = 0x00000000;
+ }
+ void FSCRWR(u32 rt, u32 ra)
+ {
+ UNIMPLEMENTED();
+ }
+ void DFTSV(u32 rt, u32 ra, s32 i7)
+ {
+ UNIMPLEMENTED();
+ const u64 DoubleExpMask = 0x7ff0000000000000;
+ const u64 DoubleFracMask = 0x000fffffffffffff;
+ const u64 DoubleSignMask = 0x8000000000000000;
+ const SPU_GPR_hdr temp = CPU.GPR[ra];
+ CPU.GPR[rt].Reset();
+ if (i7 & 1) //Negative Denorm Check (-, exp is zero, frac is non-zero)
+ for (int i = 0; i < 2; i++)
+ {
+ if (temp._u64[i] & DoubleFracMask)
+ if ((temp._u64[i] & (DoubleSignMask | DoubleExpMask)) == DoubleSignMask)
+ CPU.GPR[rt]._u64[i] = 0xffffffffffffffff;
+ }
+ if (i7 & 2) //Positive Denorm Check (+, exp is zero, frac is non-zero)
+ for (int i = 0; i < 2; i++)
+ {
+ if (temp._u64[i] & DoubleFracMask)
+ if ((temp._u64[i] & (DoubleSignMask | DoubleExpMask)) == 0)
+ CPU.GPR[rt]._u64[i] = 0xffffffffffffffff;
+ }
+ if (i7 & 4) //Negative Zero Check (-, exp is zero, frac is zero)
+ for (int i = 0; i < 2; i++)
+ {
+ if (temp._u64[i] == DoubleSignMask)
+ CPU.GPR[rt]._u64[i] = 0xffffffffffffffff;
+ }
+ if (i7 & 8) //Positive Zero Check (+, exp is zero, frac is zero)
+ for (int i = 0; i < 2; i++)
+ {
+ if (temp._u64[i] == 0)
+ CPU.GPR[rt]._u64[i] = 0xffffffffffffffff;
+ }
+ if (i7 & 16) //Negative Infinity Check (-, exp is 0x7ff, frac is zero)
+ for (int i = 0; i < 2; i++)
+ {
+ if (temp._u64[i] == (DoubleSignMask | DoubleExpMask))
+ CPU.GPR[rt]._u64[i] = 0xffffffffffffffff;
+ }
+ if (i7 & 32) //Positive Infinity Check (+, exp is 0x7ff, frac is zero)
+ for (int i = 0; i < 2; i++)
+ {
+ if (temp._u64[i] == DoubleExpMask)
+ CPU.GPR[rt]._u64[i] = 0xffffffffffffffff;
+ }
+ if (i7 & 64) //Not-a-Number Check (any sign, exp is 0x7ff, frac is non-zero)
+ for (int i = 0; i < 2; i++)
+ {
+ if (temp._u64[i] & DoubleFracMask)
+ if ((temp._u64[i] & DoubleExpMask) == DoubleExpMask)
+ CPU.GPR[rt]._u64[i] = 0xffffffffffffffff;
+ }
+ }
+ void FCEQ(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[0] = CPU.GPR[ra]._f[0] == CPU.GPR[rb]._f[0] ? 0xffffffff : 0;
+ CPU.GPR[rt]._u32[1] = CPU.GPR[ra]._f[1] == CPU.GPR[rb]._f[1] ? 0xffffffff : 0;
+ CPU.GPR[rt]._u32[2] = CPU.GPR[ra]._f[2] == CPU.GPR[rb]._f[2] ? 0xffffffff : 0;
+ CPU.GPR[rt]._u32[3] = CPU.GPR[ra]._f[3] == CPU.GPR[rb]._f[3] ? 0xffffffff : 0;
+ }
+ void DFCEQ(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u64[0] = CPU.GPR[ra]._d[0] == CPU.GPR[rb]._d[0] ? 0xffffffffffffffff : 0;
+ CPU.GPR[rt]._u64[1] = CPU.GPR[ra]._d[1] == CPU.GPR[rb]._d[1] ? 0xffffffffffffffff : 0;
+ }
+ void MPY(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._i32[w] = CPU.GPR[ra]._i16[w*2] * CPU.GPR[rb]._i16[w*2];
+ }
+ void MPYH(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._i32[w] = (CPU.GPR[ra]._i16[w*2+1] * CPU.GPR[rb]._i16[w*2]) << 16;
+ }
+ void MPYHH(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._i32[w] = CPU.GPR[ra]._i16[w*2+1] * CPU.GPR[rb]._i16[w*2+1];
+ }
+ void MPYS(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._i32[w] = (CPU.GPR[ra]._i16[w*2] * CPU.GPR[rb]._i16[w*2]) >> 16;
+ }
+ void CEQH(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._u16[h] = CPU.GPR[ra]._u16[h] == CPU.GPR[rb]._u16[h] ? 0xffff : 0;
+ }
+ void FCMEQ(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u32[0] = fabs(CPU.GPR[ra]._f[0]) == fabs(CPU.GPR[rb]._f[0]) ? 0xffffffff : 0;
+ CPU.GPR[rt]._u32[1] = fabs(CPU.GPR[ra]._f[1]) == fabs(CPU.GPR[rb]._f[1]) ? 0xffffffff : 0;
+ CPU.GPR[rt]._u32[2] = fabs(CPU.GPR[ra]._f[2]) == fabs(CPU.GPR[rb]._f[2]) ? 0xffffffff : 0;
+ CPU.GPR[rt]._u32[3] = fabs(CPU.GPR[ra]._f[3]) == fabs(CPU.GPR[rb]._f[3]) ? 0xffffffff : 0;
+ }
+ void DFCMEQ(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._u64[0] = fabs(CPU.GPR[ra]._d[0]) == fabs(CPU.GPR[rb]._d[0]) ? 0xffffffffffffffff : 0;
+ CPU.GPR[rt]._u64[1] = fabs(CPU.GPR[ra]._d[1]) == fabs(CPU.GPR[rb]._d[1]) ? 0xffffffffffffffff : 0;
+ }
+ void MPYU(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._u32[w] = CPU.GPR[ra]._u16[w*2] * CPU.GPR[rb]._u16[w*2];
+ }
+ void CEQB(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ for (int b = 0; b < 16; b++)
+ CPU.GPR[rt]._u8[b] = CPU.GPR[ra]._u8[b] == CPU.GPR[rb]._u8[b] ? 0xff : 0;
+ }
+ void FI(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ //Floating Interpolation: ra will be ignored.
+ //It should work correctly if result of preceding FREST or FRSQEST is sufficiently exact
+ CPU.GPR[rt] = CPU.GPR[rb];
+ }
+ void HEQ(u32 rt, u32 ra, u32 rb)
+ {
+ UNIMPLEMENTED();
+ if(CPU.GPR[ra]._i32[3] == CPU.GPR[rb]._i32[3]) CPU.Stop();
+ }
+
+ //0 - 9
+ void CFLTS(u32 rt, u32 ra, s32 i8)
+ {
+ UNIMPLEMENTED();
+ const u32 scale = 173 - (i8 & 0xff); //unsigned immediate
+ for (int i = 0; i < 4; i++)
+ {
+ u32 exp = ((CPU.GPR[ra]._u32[i] >> 23) & 0xff) + scale;
+
+ if (exp > 255)
+ exp = 255;
+
+ CPU.GPR[rt]._u32[i] = (CPU.GPR[ra]._u32[i] & 0x807fffff) | (exp << 23);
+
+ CPU.GPR[rt]._u32[i] = (u32)CPU.GPR[rt]._f[i]; //trunc
+ }
+ //CPU.GPR[rt]._m128i = _mm_cvttps_epi32(CPU.GPR[rt]._m128);
+ }
+ void CFLTU(u32 rt, u32 ra, s32 i8)
+ {
+ UNIMPLEMENTED();
+ const u32 scale = 173 - (i8 & 0xff); //unsigned immediate
+ for (int i = 0; i < 4; i++)
+ {
+ u32 exp = ((CPU.GPR[ra]._u32[i] >> 23) & 0xff) + scale;
+
+ if (exp > 255)
+ exp = 255;
+
+ if (CPU.GPR[ra]._u32[i] & 0x80000000) //if negative, result = 0
+ CPU.GPR[rt]._u32[i] = 0;
+ else
+ {
+ CPU.GPR[rt]._u32[i] = (CPU.GPR[ra]._u32[i] & 0x807fffff) | (exp << 23);
+
+ if (CPU.GPR[rt]._f[i] > 0xffffffff) //if big, result = max
+ CPU.GPR[rt]._u32[i] = 0xffffffff;
+ else
+ CPU.GPR[rt]._u32[i] = floor(CPU.GPR[rt]._f[i]);
+ }
+ }
+ }
+ void CSFLT(u32 rt, u32 ra, s32 i8)
+ {
+ UNIMPLEMENTED();
+ //CPU.GPR[rt]._m128 = _mm_cvtepi32_ps(CPU.GPR[ra]._m128i);
+ const u32 scale = 155 - (i8 & 0xff); //unsigned immediate
+ for (int i = 0; i < 4; i++)
+ {
+ CPU.GPR[rt]._f[i] = (s32)CPU.GPR[ra]._i32[i];
+
+ u32 exp = ((CPU.GPR[rt]._u32[i] >> 23) & 0xff) - scale;
+
+ if (exp > 255) //< 0
+ exp = 0;
+
+ CPU.GPR[rt]._u32[i] = (CPU.GPR[rt]._u32[i] & 0x807fffff) | (exp << 23);
+ }
+ }
+ void CUFLT(u32 rt, u32 ra, s32 i8)
+ {
+ UNIMPLEMENTED();
+ const u32 scale = 155 - (i8 & 0xff); //unsigned immediate
+ for (int i = 0; i < 4; i++)
+ {
+ CPU.GPR[rt]._f[i] = (float)CPU.GPR[ra]._u32[i];
+ u32 exp = ((CPU.GPR[rt]._u32[i] >> 23) & 0xff) - scale;
+
+ if (exp > 255) //< 0
+ exp = 0;
+
+ CPU.GPR[rt]._u32[i] = (CPU.GPR[rt]._u32[i] & 0x807fffff) | (exp << 23);
+ }
+ }
+
+ //0 - 8
+ void BRZ(u32 rt, s32 i16)
+ {
+ UNIMPLEMENTED();
+ if (CPU.GPR[rt]._u32[3] == 0)
+ CPU.SetBranch(branchTarget(CPU.PC, i16));
+ }
+ void STQA(u32 rt, s32 i16)
+ {
+ UNIMPLEMENTED();
+ u32 lsa = (i16 << 2) & 0x3fff0;
+ if(!CPU.IsGoodLSA(lsa))
+ {
+ ConLog.Error("STQA: bad lsa (0x%x)", lsa);
+ Emu.Pause();
+ return;
+ }
+
+ CPU.WriteLS128(lsa, CPU.GPR[rt]._u128);
+ }
+ void BRNZ(u32 rt, s32 i16)
+ {
+ UNIMPLEMENTED();
+ if (CPU.GPR[rt]._u32[3] != 0)
+ CPU.SetBranch(branchTarget(CPU.PC, i16));
+ }
+ void BRHZ(u32 rt, s32 i16)
+ {
+ UNIMPLEMENTED();
+ if (CPU.GPR[rt]._u16[6] == 0)
+ CPU.SetBranch(branchTarget(CPU.PC, i16));
+ }
+ void BRHNZ(u32 rt, s32 i16)
+ {
+ UNIMPLEMENTED();
+ if (CPU.GPR[rt]._u16[6] != 0)
+ CPU.SetBranch(branchTarget(CPU.PC, i16));
+ }
+ void STQR(u32 rt, s32 i16)
+ {
+ UNIMPLEMENTED();
+ u32 lsa = branchTarget(CPU.PC, i16) & 0x3fff0;
+ if(!CPU.IsGoodLSA(lsa))
+ {
+ ConLog.Error("STQR: bad lsa (0x%x)", lsa);
+ Emu.Pause();
+ return;
+ }
+
+ CPU.WriteLS128(lsa, CPU.GPR[rt]._u128);
+ }
+ void BRA(s32 i16)
+ {
+ UNIMPLEMENTED();
+ CPU.SetBranch(branchTarget(0, i16));
+ }
+ void LQA(u32 rt, s32 i16)
+ {
+ UNIMPLEMENTED();
+ u32 lsa = (i16 << 2) & 0x3fff0;
+ if(!CPU.IsGoodLSA(lsa))
+ {
+ ConLog.Error("LQA: bad lsa (0x%x)", lsa);
+ Emu.Pause();
+ return;
+ }
+
+ CPU.GPR[rt]._u128 = CPU.ReadLS128(lsa);
+ }
+ void BRASL(u32 rt, s32 i16)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt].Reset();
+ CPU.GPR[rt]._u32[3] = CPU.PC + 4;
+ CPU.SetBranch(branchTarget(0, i16));
+ }
+ void BR(s32 i16)
+ {
+ UNIMPLEMENTED();
+ CPU.SetBranch(branchTarget(CPU.PC, i16));
+ }
+ void FSMBI(u32 rt, s32 i16)
+ {
+ XmmVar v0(c);
+ c.movaps(v0, imm_xmm(fsmbi_mask[i16 & 0xffff]));
+ c.movaps(cpu_xmm(GPR[rt]), v0);
+ }
+ void BRSL(u32 rt, s32 i16)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt].Reset();
+ CPU.GPR[rt]._u32[3] = CPU.PC + 4;
+ CPU.SetBranch(branchTarget(CPU.PC, i16));
+ }
+ void LQR(u32 rt, s32 i16)
+ {
+ u32 lsa = branchTarget(CPU.PC, i16) & 0x3fff0;
+
+ GpVar v0(c, kVarTypeUInt64);
+ GpVar v1(c, kVarTypeUInt64);
+ c.mov(v0, qword_ptr(*ls_var, lsa));
+ c.mov(v1, qword_ptr(*ls_var, lsa + 8));
+ c.bswap(v0);
+ c.bswap(v1);
+ c.mov(cpu_qword(GPR[rt]._u64[0]), v1);
+ c.mov(cpu_qword(GPR[rt]._u64[1]), v0);
+ }
+ void IL(u32 rt, s32 i16)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._i32[0] =
+ CPU.GPR[rt]._i32[1] =
+ CPU.GPR[rt]._i32[2] =
+ CPU.GPR[rt]._i32[3] = i16;
+ }
+ void ILHU(u32 rt, s32 i16)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._i32[w] = i16 << 16;
+ }
+ void ILH(u32 rt, s32 i16)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._i16[h] = i16;
+ }
+ void IOHL(u32 rt, s32 i16)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._i32[w] |= (i16 & 0xFFFF);
+ }
+
+
+ //0 - 7
+ void ORI(u32 rt, u32 ra, s32 i10)
+ {
+ XmmVar v0(c);
+ if (i10 == 0)
+ {
+ // zero
+ c.xorps(v0, v0);
+ c.movaps(cpu_xmm(GPR[rt]), v0);
+ }
+ else if (i10 == -1)
+ {
+ if (rt == ra)
+ {
+ // nop
+ }
+ else
+ {
+ // mov
+ c.movaps(v0, cpu_xmm(GPR[ra]));
+ c.movaps(cpu_xmm(GPR[rt]), v0);
+ }
+ }
+ else
+ {
+ c.movaps(v0, cpu_xmm(GPR[ra]));
+ c.orps(v0, imm_xmm(s19_to_s32[i10 & 0x7ffff]));
+ c.movaps(cpu_xmm(GPR[rt]), v0);
+ }
+ }
+ void ORHI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._i16[h] = CPU.GPR[ra]._i16[h] | i10;
+ }
+ void ORBI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int b = 0; b < 16; b++)
+ CPU.GPR[rt]._i8[b] = CPU.GPR[ra]._i8[b] | i10;
+ }
+ void SFI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._i32[w] = i10 - CPU.GPR[ra]._i32[w];
+ }
+ void SFHI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._i16[h] = i10 - CPU.GPR[ra]._i16[h];
+ }
+ void ANDI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._i32[w] = CPU.GPR[ra]._i32[w] & i10;
+ }
+ void ANDHI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._i16[h] = CPU.GPR[ra]._i16[h] & i10;
+ }
+ void ANDBI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int b = 0; b < 16; b++)
+ CPU.GPR[rt]._i8[b] = CPU.GPR[ra]._i8[b] & i10;
+ }
+ void AI(u32 rt, u32 ra, s32 i10)
+ {
+ XmmVar v0(c);
+ if (i10 == 0)
+ {
+ if (rt == ra)
+ {
+ // nop
+ }
+ else
+ {
+ // mov
+ c.movaps(v0, cpu_xmm(GPR[ra]));
+ c.movaps(cpu_xmm(GPR[rt]), v0);
+ }
+ }
+ else
+ {
+ // add
+ c.movdqa(v0, cpu_xmm(GPR[ra]));
+ c.paddd(v0, imm_xmm(s19_to_s32[i10 & 0x7ffff]));
+ c.movdqa(cpu_xmm(GPR[rt]), v0);
+ }
+ }
+ void AHI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for(u32 h = 0; h < 8; ++h)
+ CPU.GPR[rt]._i16[h] = CPU.GPR[ra]._i16[h] + i10;
+ }
+ void STQD(u32 rt, s32 i10, u32 ra) //i10 is shifted left by 4 while decoding
+ {
+ GpVar lsa(c, kVarTypeUInt32);
+ GpVar v0(c, kVarTypeUInt64);
+ GpVar v1(c, kVarTypeUInt64);
+
+ c.mov(lsa, cpu_dword(GPR[ra]._u32[3]));
+ if (i10) c.add(lsa, i10);
+ c.and_(lsa, 0x3fff0);
+ c.mov(v0, cpu_qword(GPR[rt]._u64[0]));
+ c.mov(v1, cpu_qword(GPR[rt]._u64[1]));
+ c.bswap(v0);
+ c.bswap(v1);
+ c.mov(qword_ptr(*ls_var, lsa, 0, 0), v1);
+ c.mov(qword_ptr(*ls_var, lsa, 0, 8), v0);
+ }
+ void LQD(u32 rt, s32 i10, u32 ra) //i10 is shifted left by 4 while decoding
+ {
+ UNIMPLEMENTED();
+ const u32 lsa = (CPU.GPR[ra]._i32[3] + i10) & 0x3fff0;
+ if(!CPU.IsGoodLSA(lsa))
+ {
+ ConLog.Error("LQD: bad lsa (0x%x)", lsa);
+ Emu.Pause();
+ return;
+ }
+
+ CPU.GPR[rt]._u128 = CPU.ReadLS128(lsa);
+ }
+ void XORI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._i32[w] = CPU.GPR[ra]._i32[w] ^ i10;
+ }
+ void XORHI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._i16[h] = CPU.GPR[ra]._i16[h] ^ i10;
+ }
+ void XORBI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int b = 0; b < 16; b++)
+ CPU.GPR[rt]._i8[b] = CPU.GPR[ra]._i8[b] ^ i10;
+ }
+ void CGTI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._u32[w] = CPU.GPR[ra]._i32[w] > i10 ? 0xffffffff : 0;
+ }
+ void CGTHI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._u16[h] = CPU.GPR[ra]._i16[h] > i10 ? 0xffff : 0;
+ }
+ void CGTBI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int b = 0; b < 16; b++)
+ CPU.GPR[rt]._u8[b] = CPU.GPR[ra]._i8[b] > (s8)(i10 & 0xff) ? 0xff : 0;
+ }
+ void HGTI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ if(CPU.GPR[ra]._i32[3] > i10) CPU.Stop();
+ }
+ void CLGTI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for(u32 i = 0; i < 4; ++i)
+ {
+ CPU.GPR[rt]._u32[i] = (CPU.GPR[ra]._u32[i] > (u32)i10) ? 0xffffffff : 0x00000000;
+ }
+ }
+ void CLGTHI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for(u32 i = 0; i < 8; ++i)
+ {
+ CPU.GPR[rt]._u16[i] = (CPU.GPR[ra]._u16[i] > (u16)i10) ? 0xffff : 0x0000;
+ }
+ }
+ void CLGTBI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int b = 0; b < 16; b++)
+ CPU.GPR[rt]._u8[b] = CPU.GPR[ra]._u8[b] > (u8)(i10 & 0xff) ? 0xff : 0;
+ }
+ void HLGTI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ if(CPU.GPR[ra]._u32[3] > (u32)i10) CPU.Stop();
+ }
+ void MPYI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._i32[w] = CPU.GPR[ra]._i16[w*2] * i10;
+ }
+ void MPYUI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._u32[w] = CPU.GPR[ra]._u16[w*2] * (u16)(i10 & 0xffff);
+ }
+ void CEQI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for(u32 i = 0; i < 4; ++i)
+ CPU.GPR[rt]._u32[i] = (CPU.GPR[ra]._i32[i] == i10) ? 0xffffffff : 0x00000000;
+ }
+ void CEQHI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int h = 0; h < 8; h++)
+ CPU.GPR[rt]._u16[h] = (CPU.GPR[ra]._i16[h] == (s16)i10) ? 0xffff : 0;
+ }
+ void CEQBI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ for (int b = 0; b < 16; b++)
+ CPU.GPR[rt]._i8[b] = (CPU.GPR[ra]._i8[b] == (s8)(i10 & 0xff)) ? 0xff : 0;
+ }
+ void HEQI(u32 rt, u32 ra, s32 i10)
+ {
+ UNIMPLEMENTED();
+ if(CPU.GPR[ra]._i32[3] == i10) CPU.Stop();
+ }
+
+
+ //0 - 6
+ void HBRA(s32 ro, s32 i16)
+ { //i16 is shifted left by 2 while decoding
+ //UNIMPLEMENTED();
+ }
+ void HBRR(s32 ro, s32 i16)
+ {
+ //UNIMPLEMENTED();
+ }
+ void ILA(u32 rt, u32 i18)
+ {
+ XmmVar v0(c);
+ c.movaps(v0, imm_xmm(s19_to_s32[i18 & 0x3ffff]));
+ c.movaps(cpu_xmm(GPR[rt]), v0);
+ }
+
+ //0 - 3
+ void SELB(u32 rt, u32 ra, u32 rb, u32 rc)
+ {
+ UNIMPLEMENTED();
+ for(u64 i = 0; i < 2; ++i)
+ {
+ CPU.GPR[rt]._u64[i] =
+ ( CPU.GPR[rc]._u64[i] & CPU.GPR[rb]._u64[i]) |
+ (~CPU.GPR[rc]._u64[i] & CPU.GPR[ra]._u64[i]);
+ }
+ }
+ void SHUFB(u32 rt, u32 ra, u32 rb, u32 rc)
+ {
+ UNIMPLEMENTED();
+ const SPU_GPR_hdr _a = CPU.GPR[ra];
+ const SPU_GPR_hdr _b = CPU.GPR[rb];
+ for (int i = 0; i < 16; i++)
+ {
+ u8 b = CPU.GPR[rc]._u8[i];
+ if(b & 0x80)
+ {
+ if(b & 0x40)
+ {
+ if(b & 0x20)
+ CPU.GPR[rt]._u8[i] = 0x80;
+ else
+ CPU.GPR[rt]._u8[i] = 0xFF;
+ }
+ else
+ CPU.GPR[rt]._u8[i] = 0x00;
+ }
+ else
+ {
+ if(b & 0x10)
+ CPU.GPR[rt]._u8[i] = _b._u8[15 - (b & 0x0F)];
+ else
+ CPU.GPR[rt]._u8[i] = _a._u8[15 - (b & 0x0F)];
+ }
+ }
+ }
+ void MPYA(u32 rt, u32 ra, u32 rb, u32 rc)
+ {
+ UNIMPLEMENTED();
+ for (int w = 0; w < 4; w++)
+ CPU.GPR[rt]._i32[w] = CPU.GPR[ra]._i16[w*2] * CPU.GPR[rb]._i16[w*2] + CPU.GPR[rc]._i32[w];
+ }
+ void FNMS(u32 rt, u32 ra, u32 rb, u32 rc)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._f[0] = CPU.GPR[rc]._f[0] - CPU.GPR[ra]._f[0] * CPU.GPR[rb]._f[0];
+ CPU.GPR[rt]._f[1] = CPU.GPR[rc]._f[1] - CPU.GPR[ra]._f[1] * CPU.GPR[rb]._f[1];
+ CPU.GPR[rt]._f[2] = CPU.GPR[rc]._f[2] - CPU.GPR[ra]._f[2] * CPU.GPR[rb]._f[2];
+ CPU.GPR[rt]._f[3] = CPU.GPR[rc]._f[3] - CPU.GPR[ra]._f[3] * CPU.GPR[rb]._f[3];
+ }
+ void FMA(u32 rt, u32 ra, u32 rb, u32 rc)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._f[0] = CPU.GPR[ra]._f[0] * CPU.GPR[rb]._f[0] + CPU.GPR[rc]._f[0];
+ CPU.GPR[rt]._f[1] = CPU.GPR[ra]._f[1] * CPU.GPR[rb]._f[1] + CPU.GPR[rc]._f[1];
+ CPU.GPR[rt]._f[2] = CPU.GPR[ra]._f[2] * CPU.GPR[rb]._f[2] + CPU.GPR[rc]._f[2];
+ CPU.GPR[rt]._f[3] = CPU.GPR[ra]._f[3] * CPU.GPR[rb]._f[3] + CPU.GPR[rc]._f[3];
+ }
+ void FMS(u32 rt, u32 ra, u32 rb, u32 rc)
+ {
+ UNIMPLEMENTED();
+ CPU.GPR[rt]._f[0] = CPU.GPR[ra]._f[0] * CPU.GPR[rb]._f[0] - CPU.GPR[rc]._f[0];
+ CPU.GPR[rt]._f[1] = CPU.GPR[ra]._f[1] * CPU.GPR[rb]._f[1] - CPU.GPR[rc]._f[1];
+ CPU.GPR[rt]._f[2] = CPU.GPR[ra]._f[2] * CPU.GPR[rb]._f[2] - CPU.GPR[rc]._f[2];
+ CPU.GPR[rt]._f[3] = CPU.GPR[ra]._f[3] * CPU.GPR[rb]._f[3] - CPU.GPR[rc]._f[3];
+ }
+
+ void UNK(u32 code, u32 opcode, u32 gcode)
+ {
+ UNK(fmt::Format("(SPURecompiler) Unimplemented opcode! (0x%08x, 0x%x, 0x%x)", code, opcode, gcode));
+ }
+
+ void UNK(const std::string& err)
+ {
+ ConLog.Error(err + fmt::Format(" #pc: 0x%x", CPU.PC));
+ do_finalize = true;
+ Emu.Pause();
+ }
+};
\ No newline at end of file
diff --git a/rpcs3/Emu/Cell/SPURecompilerCore.cpp b/rpcs3/Emu/Cell/SPURecompilerCore.cpp
new file mode 100644
index 0000000000..bf3662c399
--- /dev/null
+++ b/rpcs3/Emu/Cell/SPURecompilerCore.cpp
@@ -0,0 +1,132 @@
+#include "stdafx.h"
+#include "SPUInstrTable.h"
+#include "SPUInterpreter.h"
+#include "SPURecompiler.h"
+
+static const SPUImmTable g_spu_imm;
+
+SPURecompilerCore::SPURecompilerCore(SPUThread& cpu)
+: m_enc(new SPURecompiler(cpu, *this))
+, m_inter(new SPUInterpreter(cpu))
+, CPU(cpu)
+, compiler(&runtime)
+{
+ memset(entry, 0, sizeof(entry));
+}
+
+SPURecompilerCore::~SPURecompilerCore()
+{
+ delete m_enc;
+ delete m_inter;
+}
+
+void SPURecompilerCore::Decode(const u32 code) // decode instruction and run with interpreter
+{
+ (*SPU_instr::rrr_list)(m_inter, code);
+}
+
+void SPURecompilerCore::Compile(u16 pos)
+{
+ compiler.addFunc(kFuncConvHost, FuncBuilder4());
+ entry[pos].host = pos;
+
+ GpVar cpu_var(compiler, kVarTypeIntPtr, "cpu");
+ compiler.setArg(0, cpu_var);
+ compiler.alloc(cpu_var);
+ m_enc->cpu_var = &cpu_var;
+
+ GpVar ls_var(compiler, kVarTypeIntPtr, "ls");
+ compiler.setArg(1, ls_var);
+ compiler.alloc(ls_var);
+ m_enc->ls_var = &ls_var;
+
+ GpVar imm_var(compiler, kVarTypeIntPtr, "imm");
+ compiler.setArg(2, imm_var);
+ compiler.alloc(imm_var);
+ m_enc->imm_var = &imm_var;
+
+ GpVar pos_var(compiler, kVarTypeUInt16, "pos");
+ compiler.setArg(3, pos_var);
+ compiler.alloc(pos_var);
+
+ while (true)
+ {
+ const u32 opcode = Memory.Read32(CPU.dmac.ls_offset + pos * 4);
+ m_enc->do_finalize = false;
+ (*SPU_instr::rrr_list)(m_enc, opcode); // compile single opcode
+ bool fin = m_enc->do_finalize;
+ entry[pos].valid = opcode;
+
+ if (fin) break;
+ CPU.PC += 4;
+ pos++;
+ entry[pos].host = entry[pos - 1].host;
+ }
+
+ compiler.xor_(pos_var, pos_var);
+ compiler.ret(pos_var);
+ compiler.endFunc();
+ entry[entry[pos].host].pointer = compiler.make();
+}
+
+u8 SPURecompilerCore::DecodeMemory(const u64 address)
+{
+ const u64 m_offset = address - CPU.PC;
+ const u16 pos = (CPU.PC >> 2);
+
+ u32* ls = (u32*)Memory.VirtualToRealAddr(m_offset);
+
+ if (!pos)
+ {
+ ConLog.Error("SPURecompilerCore::DecodeMemory(): ls_addr = 0");
+ Emu.Pause();
+ return 0;
+ }
+
+ if (entry[pos].pointer)
+ {
+ // check data (hard way)
+ bool is_valid = true;
+ for (u32 i = pos; i < entry[pos].count + pos; i++)
+ {
+ if (entry[i].valid != ls[i])
+ {
+ is_valid = false;
+ break;
+ }
+ }
+ // invalidate if necessary
+ if (!is_valid)
+ {
+ // TODO
+ }
+ }
+
+ if (!entry[pos].pointer)
+ {
+ // compile from current position to nearest dynamic or statically unresolved branch, zero data or something other
+ Compile(pos);
+ }
+
+ if (!entry[pos].pointer)
+ {
+ ConLog.Error("SPURecompilerCore::DecodeMemory(ls_addr=0x%x): compilation failed", pos * sizeof(u32));
+ Emu.Pause();
+ return 0;
+ }
+ // jump
+ typedef u16(*Func)(void* _cpu, void* _ls, const SPUImmTable* _imm, u16 _pos);
+
+ Func func = asmjit_cast(entry[entry[pos].host].pointer);
+
+ void* cpu = (u8*)&CPU.GPR[0] - offsetof(SPUThread, GPR[0]); // ugly cpu base offset detection
+
+ u16 res = pos == entry[pos].host ? 0 : pos;
+ res = func(cpu, ls, &g_spu_imm, res);
+
+ ConLog.Write("func -> %d", res);
+
+ return 0;
+ /*Decode(Memory.Read32(address));
+ return 4;*/
+}
\ No newline at end of file
diff --git a/rpcs3/rpcs3.vcxproj b/rpcs3/rpcs3.vcxproj
index 5aa56519bc..dc4dfcfc0b 100644
--- a/rpcs3/rpcs3.vcxproj
+++ b/rpcs3/rpcs3.vcxproj
@@ -69,20 +69,20 @@
- .\;..\wxWidgets\include;..\SDL-1.3.0-5538\include;..\SDL_image-1.2.10;..\pthreads-2.8.0;..\;..\ffmpeg\WindowsInclude;..\ffmpeg\Windows\x86\Include;.\OpenAL\include;$(IncludePath)
+ .\;..\wxWidgets\include;..\SDL-1.3.0-5538\include;..\SDL_image-1.2.10;..\pthreads-2.8.0;..\;..\ffmpeg\WindowsInclude;..\ffmpeg\Windows\x86\Include;.\OpenAL\include;$(IncludePath);..\asmjit\src\asmjit
$(SolutionDir)bin\
..\libs\$(Configuration)\;$(LibraryPath)
$(ProjectName)-$(PlatformShortName)-dbg
- .\;..\wxWidgets\include;..\SDL-1.3.0-5538\include;..\SDL_image-1.2.10;..\pthreads-2.8.0;..\;..\ffmpeg\WindowsInclude;..\ffmpeg\Windows\x86_64\Include;.\OpenAL\include;$(IncludePath)
+ .\;..\wxWidgets\include;..\SDL-1.3.0-5538\include;..\SDL_image-1.2.10;..\pthreads-2.8.0;..\;..\ffmpeg\WindowsInclude;..\ffmpeg\Windows\x86_64\Include;.\OpenAL\include;$(IncludePath);..\asmjit\src\asmjit
$(SolutionDir)bin\
..\libs\$(Configuration)\;$(LibraryPath)
$(ProjectName)-$(PlatformShortName)-dbg
false
- .\;..\wxWidgets\include;..\SDL-1.3.0-5538\include;..\SDL_image-1.2.10;..\pthreads-2.8.0;..\;..\ffmpeg\WindowsInclude;..\ffmpeg\Windows\x86\Include;.\OpenAL\include;$(IncludePath)
+ .\;..\wxWidgets\include;..\SDL-1.3.0-5538\include;..\SDL_image-1.2.10;..\pthreads-2.8.0;..\;..\ffmpeg\WindowsInclude;..\ffmpeg\Windows\x86\Include;.\OpenAL\include;$(IncludePath);..\asmjit\src\asmjit
$(SolutionDir)bin\
..\libs\$(Configuration)\;$(LibraryPath)
false
@@ -91,7 +91,7 @@
false
- .\;..\wxWidgets\include;..\SDL-1.3.0-5538\include;..\SDL_image-1.2.10;..\pthreads-2.8.0;..\;..\ffmpeg\WindowsInclude;..\ffmpeg\Windows\x86_64\Include;.\OpenAL\include;$(IncludePath)
+ .\;..\wxWidgets\include;..\SDL-1.3.0-5538\include;..\SDL_image-1.2.10;..\pthreads-2.8.0;..\;..\ffmpeg\WindowsInclude;..\ffmpeg\Windows\x86_64\Include;.\OpenAL\include;$(IncludePath);..\asmjit\src\asmjit
$(SolutionDir)bin\
..\libs\$(Configuration)\;$(LibraryPath)
false
@@ -109,7 +109,7 @@
true
- wxmsw31ud_adv.lib;wxbase31ud.lib;wxmsw31ud_core.lib;wxmsw31ud_aui.lib;wxtiffd.lib;wxjpegd.lib;wxpngd.lib;wxzlibd.lib;odbc32.lib;odbccp32.lib;comctl32.lib;ws2_32.lib;shlwapi.lib;winmm.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;rpcrt4.lib;avcodec.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;OpenAL32.lib;EFX-Util.lib;%(AdditionalDependencies)
+ wxmsw31ud_adv.lib;wxbase31ud.lib;wxmsw31ud_core.lib;wxmsw31ud_aui.lib;wxtiffd.lib;wxjpegd.lib;wxpngd.lib;wxzlibd.lib;odbc32.lib;odbccp32.lib;comctl32.lib;ws2_32.lib;shlwapi.lib;winmm.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;rpcrt4.lib;avcodec.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;OpenAL32.lib;EFX-Util.lib;asmjit.lib;%(AdditionalDependencies)
%(IgnoreSpecificDefaultLibraries)
false
..\wxWidgets\lib\vc_lib;..\ffmpeg\Windows\x86\lib;..\OpenAL\Win32
@@ -129,7 +129,7 @@
true
- wxmsw31ud_adv.lib;wxbase31ud.lib;wxmsw31ud_core.lib;wxmsw31ud_aui.lib;wxtiffd.lib;wxjpegd.lib;wxpngd.lib;wxzlibd.lib;odbc32.lib;odbccp32.lib;comctl32.lib;ws2_32.lib;shlwapi.lib;winmm.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;rpcrt4.lib;avcodec.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;OpenAL32.lib;EFX-Util.lib;%(AdditionalDependencies)
+ wxmsw31ud_adv.lib;wxbase31ud.lib;wxmsw31ud_core.lib;wxmsw31ud_aui.lib;wxtiffd.lib;wxjpegd.lib;wxpngd.lib;wxzlibd.lib;odbc32.lib;odbccp32.lib;comctl32.lib;ws2_32.lib;shlwapi.lib;winmm.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;rpcrt4.lib;avcodec.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;OpenAL32.lib;EFX-Util.lib;asmjit.lib;%(AdditionalDependencies)
%(IgnoreSpecificDefaultLibraries)
false
..\wxWidgets\lib\vc_x64_lib;..\ffmpeg\Windows\x86_64\lib;..\OpenAL\Win64
@@ -161,7 +161,7 @@
true
true
true
- wxmsw31u_adv.lib;wxbase31u.lib;wxmsw31u_core.lib;wxmsw31u_aui.lib;odbc32.lib;odbccp32.lib;comctl32.lib;ws2_32.lib;shlwapi.lib;winmm.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;rpcrt4.lib;wxtiff.lib;wxjpeg.lib;wxpng.lib;wxzlib.lib;wxregexu.lib;wxexpat.lib;wsock32.lib;wininet.lib;avcodec.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;OpenAL32.lib;EFX-Util.lib
+ wxmsw31u_adv.lib;wxbase31u.lib;wxmsw31u_core.lib;wxmsw31u_aui.lib;odbc32.lib;odbccp32.lib;comctl32.lib;ws2_32.lib;shlwapi.lib;winmm.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;rpcrt4.lib;wxtiff.lib;wxjpeg.lib;wxpng.lib;wxzlib.lib;wxregexu.lib;wxexpat.lib;wsock32.lib;wininet.lib;avcodec.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;OpenAL32.lib;EFX-Util.lib;asmjit.lib
%(IgnoreSpecificDefaultLibraries)
@@ -193,7 +193,7 @@
true
true
true
- wxmsw31u_adv.lib;wxbase31u.lib;wxmsw31u_core.lib;wxmsw31u_aui.lib;odbc32.lib;odbccp32.lib;comctl32.lib;ws2_32.lib;shlwapi.lib;winmm.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;rpcrt4.lib;wxtiff.lib;wxjpeg.lib;wxpng.lib;wxzlib.lib;wxregexu.lib;wxexpat.lib;wsock32.lib;wininet.lib;avcodec.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;OpenAL32.lib;EFX-Util.lib;%(AdditionalDependencies)
+ wxmsw31u_adv.lib;wxbase31u.lib;wxmsw31u_core.lib;wxmsw31u_aui.lib;odbc32.lib;odbccp32.lib;comctl32.lib;ws2_32.lib;shlwapi.lib;winmm.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;rpcrt4.lib;wxtiff.lib;wxjpeg.lib;wxpng.lib;wxzlib.lib;wxregexu.lib;wxexpat.lib;wsock32.lib;wininet.lib;avcodec.lib;avformat.lib;avutil.lib;swresample.lib;swscale.lib;OpenAL32.lib;EFX-Util.lib;asmjit.lib;%(AdditionalDependencies)
%(IgnoreSpecificDefaultLibraries)
@@ -227,6 +227,7 @@
+
diff --git a/rpcs3/rpcs3.vcxproj.filters b/rpcs3/rpcs3.vcxproj.filters
index 1c1ed0a365..1e00a296fe 100644
--- a/rpcs3/rpcs3.vcxproj.filters
+++ b/rpcs3/rpcs3.vcxproj.filters
@@ -487,6 +487,9 @@
Utilities
+
+ Emu\Cell
+