mirror of
https://github.com/RPCS3/rpcs3.git
synced 2024-12-28 09:23:34 +00:00
SPURS: Fixed more issues
This commit is contained in:
parent
d1a7c85e95
commit
daaa5059e9
@ -1,5 +1,7 @@
|
||||
#pragma once
|
||||
|
||||
#define IS_LE_MACHINE
|
||||
|
||||
union _CRT_ALIGN(16) u128
|
||||
{
|
||||
u64 _u64[2];
|
||||
@ -136,16 +138,28 @@ union _CRT_ALIGN(16) u128
|
||||
}
|
||||
};
|
||||
|
||||
// Index 0 returns the MSB and index 127 returns the LSB
|
||||
bit_element operator [] (u32 index)
|
||||
{
|
||||
assert(index < 128);
|
||||
return bit_element(data[index / 64], 1ull << (index % 64));
|
||||
|
||||
#ifdef IS_LE_MACHINE
|
||||
return bit_element(data[1 - (index >> 6)], 0x8000000000000000ull >> (index & 0x3F));
|
||||
#else
|
||||
return bit_element(data[index >> 6], 0x8000000000000000ull >> (index & 0x3F));
|
||||
#endif
|
||||
}
|
||||
|
||||
// Index 0 returns the MSB and index 127 returns the LSB
|
||||
const bool operator [] (u32 index) const
|
||||
{
|
||||
assert(index < 128);
|
||||
return (data[index / 64] & (1ull << (index % 64))) != 0;
|
||||
|
||||
#ifdef IS_LE_MACHINE
|
||||
return (data[1 - (index >> 6)] & (0x8000000000000000ull >> (index & 0x3F))) != 0;
|
||||
#else
|
||||
return (data[index >> 6] & (0x8000000000000000ull >> (index & 0x3F))) != 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
} _bit;
|
||||
@ -509,8 +523,6 @@ struct be_storage_t<T, 16>
|
||||
typedef u128 type;
|
||||
};
|
||||
|
||||
#define IS_LE_MACHINE
|
||||
|
||||
template<typename T, typename T2 = T>
|
||||
class be_t
|
||||
{
|
||||
|
@ -34,9 +34,9 @@ s64 spursCreateLv2EventQueue(vm::ptr<CellSpurs> spurs, u32& queue_id, vm::ptr<u8
|
||||
{
|
||||
#ifdef PRX_DEBUG_XXX
|
||||
vm::var<be_t<u32>> queue;
|
||||
s32 res = cb_call<s32, vm::ptr<CellSpurs>, vm::ptr<u32>, vm::ptr<u8>, s32, u32>(GetCurrentPPUThread(), libsre + 0xB14C, libsre_rtoc,
|
||||
s32 res = cb_call<s32, vm::ptr<CellSpurs>, vm::ptr<be_t<u32>>, vm::ptr<u8>, s32, u32>(GetCurrentPPUThread(), libsre + 0xB14C, libsre_rtoc,
|
||||
spurs, queue, port, size, vm::read32(libsre_rtoc - 0x7E2C));
|
||||
queue_id = queue;
|
||||
queue_id = queue.value();
|
||||
return res;
|
||||
#endif
|
||||
|
||||
@ -2768,14 +2768,9 @@ s64 spursCreateTask(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id, vm:
|
||||
if (ls_pattern.addr() != 0)
|
||||
{
|
||||
u32 ls_blocks = 0;
|
||||
for (auto i = 0; i < 64; i++)
|
||||
for (auto i = 0; i < 128; i++)
|
||||
{
|
||||
if (ls_pattern->u64[0] & ((u64)1 << i))
|
||||
{
|
||||
ls_blocks++;
|
||||
}
|
||||
|
||||
if (ls_pattern->u64[1] & ((u64)1 << i))
|
||||
if (ls_pattern->_u128.value()._bit[i])
|
||||
{
|
||||
ls_blocks++;
|
||||
}
|
||||
@ -2786,7 +2781,8 @@ s64 spursCreateTask(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id, vm:
|
||||
return CELL_SPURS_TASK_ERROR_INVAL;
|
||||
}
|
||||
|
||||
if (ls_pattern->u32[0] & 0xFC000000)
|
||||
u128 _0 = u128::from32(0);
|
||||
if ((ls_pattern->_u128.value() & u128::from32r(0xFC000000)) != _0)
|
||||
{
|
||||
// Prevent save/restore to SPURS management area
|
||||
return CELL_SPURS_TASK_ERROR_INVAL;
|
||||
@ -2819,13 +2815,10 @@ s64 spursCreateTask(vm::ptr<CellSpursTaskset> taskset, vm::ptr<u32> task_id, vm:
|
||||
|
||||
taskset->m.task_info[tmp_task_id].elf_addr.set(elf_addr.addr());
|
||||
taskset->m.task_info[tmp_task_id].context_save_storage_and_alloc_ls_blocks = (context_addr.addr() | alloc_ls_blocks);
|
||||
for (u32 i = 0; i < 2; i++)
|
||||
taskset->m.task_info[tmp_task_id].args = *arg;
|
||||
if (ls_pattern.addr())
|
||||
{
|
||||
taskset->m.task_info[tmp_task_id].args.u64[i] = arg != 0 ? arg->u64[i] : 0;
|
||||
if (ls_pattern.addr())
|
||||
{
|
||||
taskset->m.task_info[tmp_task_id].ls_pattern.u64[i] = ls_pattern->u64[i];
|
||||
}
|
||||
taskset->m.task_info[tmp_task_id].ls_pattern = *ls_pattern;
|
||||
}
|
||||
|
||||
*task_id = tmp_task_id;
|
||||
@ -3162,12 +3155,7 @@ s64 _cellSpursTaskAttribute2Initialize(vm::ptr<CellSpursTaskAttribute2> attribut
|
||||
|
||||
for (s32 c = 0; c < 4; c++)
|
||||
{
|
||||
attribute->lsPattern.u32[c] = 0;
|
||||
}
|
||||
|
||||
for (s32 i = 0; i < 2; i++)
|
||||
{
|
||||
attribute->lsPattern.u64[i] = 0;
|
||||
attribute->lsPattern._u128 = u128::from64r(0);
|
||||
}
|
||||
|
||||
attribute->name_addr = 0;
|
||||
|
@ -413,6 +413,7 @@ struct CellSpurs
|
||||
be_t<u64> arg; // spu argument
|
||||
be_t<u32> size;
|
||||
atomic_t<u8> uniqueId; // The unique id is the same for all workloads with the same addr
|
||||
u8 pad[3];
|
||||
u8 priority[8];
|
||||
};
|
||||
|
||||
@ -622,14 +623,12 @@ struct CellSpursEventFlag
|
||||
|
||||
union CellSpursTaskArgument
|
||||
{
|
||||
be_t<u32> u32[4];
|
||||
be_t<u64> u64[2];
|
||||
be_t<u128> _u128;
|
||||
};
|
||||
|
||||
union CellSpursTaskLsPattern
|
||||
{
|
||||
be_t<u32> u32[4];
|
||||
be_t<u64> u64[2];
|
||||
be_t<u128> _u128;
|
||||
};
|
||||
|
||||
struct CellSpursTaskset
|
||||
|
@ -1120,7 +1120,7 @@ void spursTasksetStartTask(SPUThread & spu, CellSpursTaskArgument & taskArgs) {
|
||||
auto taskset = vm::get_ptr<CellSpursTaskset>(spu.ls_offset + 0x2700);
|
||||
|
||||
spu.GPR[2].clear();
|
||||
spu.GPR[3] = u128::from64(taskArgs.u64[0], taskArgs.u64[1]);
|
||||
spu.GPR[3] = taskArgs._u128;
|
||||
spu.GPR[4]._u64[1] = taskset->m.args;
|
||||
spu.GPR[4]._u64[0] = taskset->m.spurs.addr();
|
||||
for (auto i = 5; i < 128; i++) {
|
||||
@ -1359,7 +1359,7 @@ s32 spursTasketSaveTaskContext(SPUThread & spu) {
|
||||
u32 allocLsBlocks = taskInfo->context_save_storage_and_alloc_ls_blocks & 0x7F;
|
||||
u32 lsBlocks = 0;
|
||||
for (auto i = 0; i < 128; i++) {
|
||||
if (taskInfo->ls_pattern.u64[i < 64 ? 0 : 1] & (0x8000000000000000ull >> i)) {
|
||||
if (taskInfo->ls_pattern._u128.value()._bit[i]) {
|
||||
lsBlocks++;
|
||||
}
|
||||
}
|
||||
@ -1370,7 +1370,7 @@ s32 spursTasketSaveTaskContext(SPUThread & spu) {
|
||||
|
||||
// Make sure the stack is area is specified in the ls pattern
|
||||
for (auto i = (ctxt->savedContextSp.value()._u32[3]) >> 11; i < 128; i++) {
|
||||
if ((taskInfo->ls_pattern.u64[i < 64 ? 0 : 1] & (0x8000000000000000ull >> i)) == 0) {
|
||||
if (taskInfo->ls_pattern._u128.value()._bit[i] == false) {
|
||||
return CELL_SPURS_TASK_ERROR_STAT;
|
||||
}
|
||||
}
|
||||
@ -1390,8 +1390,7 @@ s32 spursTasketSaveTaskContext(SPUThread & spu) {
|
||||
|
||||
// Save LS context
|
||||
for (auto i = 6; i < 128; i++) {
|
||||
bool shouldStore = taskInfo->ls_pattern.u64[i < 64 ? 0 : 1] & (0x8000000000000000ull >> i) ? true : false;
|
||||
if (shouldStore) {
|
||||
if (taskInfo->ls_pattern._u128.value()._bit[i]) {
|
||||
// TODO: Combine DMA requests for consecutive blocks into a single request
|
||||
spursDma(spu, MFC_PUT_CMD, contextSaveStorage + 0x400 + ((i - 6) << 11), CELL_SPURS_TASK_TOP + ((i - 6) << 11), 0x800/*size*/, ctxt->dmaTagId);
|
||||
}
|
||||
@ -1475,8 +1474,7 @@ void spursTasksetDispatch(SPUThread & spu) {
|
||||
}
|
||||
|
||||
// If the entire LS is saved then there is no need to load the ELF as it will be be saved in the context save area as well
|
||||
if (taskInfo->ls_pattern.u64[1] != 0xFFFFFFFFFFFFFFFFull ||
|
||||
(taskInfo->ls_pattern.u64[0] | 0xFC00000000000000ull) != 0xFFFFFFFFFFFFFFFFull) {
|
||||
if (taskInfo->ls_pattern._u128.value() != u128::from64r(0x03FFFFFFFFFFFFFFull, 0xFFFFFFFFFFFFFFFFull)) {
|
||||
// Load the ELF
|
||||
u32 entryPoint;
|
||||
if (spursTasksetLoadElf(spu, &entryPoint, nullptr, taskInfo->elf_addr.addr(), true) != CELL_OK) {
|
||||
@ -1489,8 +1487,7 @@ void spursTasksetDispatch(SPUThread & spu) {
|
||||
u64 contextSaveStorage = taskInfo->context_save_storage_and_alloc_ls_blocks & 0xFFFFFFFFFFFFFF80ull;
|
||||
spursDma(spu, MFC_GET_CMD, contextSaveStorage, 0x2C80/*LSA*/, 0x380/*size*/, ctxt->dmaTagId);
|
||||
for (auto i = 6; i < 128; i++) {
|
||||
bool shouldLoad = taskInfo->ls_pattern.u64[i < 64 ? 0 : 1] & (0x8000000000000000ull >> i) ? true : false;
|
||||
if (shouldLoad) {
|
||||
if (taskInfo->ls_pattern._u128.value()._bit[i]) {
|
||||
// TODO: Combine DMA requests for consecutive blocks into a single request
|
||||
spursDma(spu, MFC_GET_CMD, contextSaveStorage + 0x400 + ((i - 6) << 11), CELL_SPURS_TASK_TOP + ((i - 6) << 11), 0x800/*size*/, ctxt->dmaTagId);
|
||||
}
|
||||
|
Loading…
Reference in New Issue
Block a user