vm::atomic -> atomic_t

This commit is contained in:
Nekotekina 2014-09-27 22:49:33 +04:00
parent e40776ba79
commit ae17ef4d68
15 changed files with 394 additions and 258 deletions

View File

@ -188,3 +188,37 @@ static __forceinline uint64_t InterlockedXor(volatile uint64_t* dest, uint64_t v
return _InterlockedXor64((volatile long long*)dest, value);
}
#endif
static __forceinline uint32_t cntlz32(uint32_t arg)
{
#if defined(__GNUG__)
return __builtin_clzl(arg);
#else
unsigned long res;
if (!_BitScanReverse(&res, arg))
{
return 32;
}
else
{
return res ^ 31;
}
#endif
}
static __forceinline uint64_t cntlz64(uint64_t arg)
{
#if defined(__GNUG__)
return __builtin_clzll(arg);
#else
unsigned long res;
if (!_BitScanReverse64(&res, arg))
{
return 64;
}
else
{
return res ^ 63;
}
#endif
}

View File

@ -1,5 +1,5 @@
#pragma once
#include "Emu/Memory/vm_atomic.h"
#include "Emu/Memory/atomic_type.h"
bool SM_IsAborted();
void SM_Sleep();
@ -25,9 +25,9 @@ template
>
class SMutexBase
{
static_assert(sizeof(T) == sizeof(vm::atomic_le<T>), "Invalid SMutexBase type");
static_assert(sizeof(T) == sizeof(atomic_le_t<T>), "Invalid SMutexBase type");
T owner;
typedef vm::atomic_le<T> AT;
typedef atomic_le_t<T> AT;
public:
static const T GetFreeValue()

View File

@ -0,0 +1,174 @@
#pragma once
template<typename T, size_t size = sizeof(T)>
struct _to_atomic
{
static_assert(size == 1 || size == 2 || size == 4 || size == 8, "Invalid atomic type");
typedef T type;
};
template<typename T>
struct _to_atomic<T, 1>
{
typedef uint8_t type;
};
template<typename T>
struct _to_atomic<T, 2>
{
typedef uint16_t type;
};
template<typename T>
struct _to_atomic<T, 4>
{
typedef uint32_t type;
};
template<typename T>
struct _to_atomic<T, 8>
{
typedef uint64_t type;
};
template<typename T>
class _atomic_base
{
typedef typename _to_atomic<T, sizeof(T)>::type atomic_type;
atomic_type data;
public:
// atomically compare data with cmp, replace with exch if equal, return previous data value anyway
__forceinline const T compare_and_swap(const T& cmp, const T& exch) volatile
{
const atomic_type res = InterlockedCompareExchange(&data, (atomic_type&)(exch), (atomic_type&)(cmp));
return (T&)res;
}
// atomically compare data with cmp, replace with exch if equal, return true if data was replaced
__forceinline bool compare_and_swap_test(const T& cmp, const T& exch) volatile
{
return InterlockedCompareExchange(&data, (atomic_type&)(exch), (atomic_type&)(cmp)) == (atomic_type&)(cmp);
}
// read data with memory barrier
__forceinline const T read_sync() const volatile
{
const atomic_type res = InterlockedCompareExchange(const_cast<volatile atomic_type*>(&data), 0, 0);
return (T&)res;
}
// atomically replace data with exch, return previous data value
__forceinline const T exchange(const T& exch) volatile
{
const atomic_type res = InterlockedExchange(&data, (atomic_type&)(exch));
return (T&)res;
}
// read data without memory barrier
__forceinline const T read_relaxed() const volatile
{
return (T&)data;
}
// write data without memory barrier
__forceinline void write_relaxed(const T& value) volatile
{
data = (atomic_type&)(value);
}
// perform atomic operation on data
template<typename FT> __forceinline void atomic_op(const FT atomic_proc) volatile
{
while (true)
{
const T old = read_relaxed();
T _new = old;
atomic_proc(_new); // function should accept reference to T type
if (compare_and_swap_test(old, _new)) return;
}
}
// perform atomic operation on data with special exit condition (if intermediate result != proceed_value)
template<typename RT, typename FT> __forceinline RT atomic_op(const RT proceed_value, const FT atomic_proc) volatile
{
while (true)
{
const T old = read_relaxed();
T _new = old;
RT res = (RT)atomic_proc(_new); // function should accept reference to T type and return some value
if (res != proceed_value) return res;
if (compare_and_swap_test(old, _new)) return proceed_value;
}
}
template<typename FT> __forceinline void direct_op(const FT direct_proc) volatile
{
direct_proc((T&)data);
}
// atomic bitwise OR, returns previous data
__forceinline const T _or(const T& right) volatile
{
const atomic_type res = InterlockedOr(&data, (atomic_type&)(right));
return (T&)res;
}
// atomic bitwise AND, returns previous data
__forceinline const T _and(const T& right) volatile
{
const atomic_type res = InterlockedAnd(&data, (atomic_type&)(right));
return (T&)res;
}
// atomic bitwise XOR, returns previous data
__forceinline const T _xor(const T& right) volatile
{
const atomic_type res = InterlockedXor(&data, (atomic_type&)(right));
return (T&)res;
}
__forceinline const T operator |= (const T& right) volatile
{
const atomic_type res = InterlockedOr(&data, (atomic_type&)(right)) | (atomic_type&)(right);
return (T&)res;
}
__forceinline const T operator &= (const T& right) volatile
{
const atomic_type res = InterlockedAnd(&data, (atomic_type&)(right)) & (atomic_type&)(right);
return (T&)res;
}
__forceinline const T operator ^= (const T& right) volatile
{
const atomic_type res = InterlockedXor(&data, (atomic_type&)(right)) ^ (atomic_type&)(right);
return (T&)res;
}
};
template<typename T> struct atomic_le_t : public _atomic_base<T>
{
};
template<typename T> struct atomic_be_t : public _atomic_base<typename to_be_t<T>::type>
{
};
namespace ps3
{
template<typename T> struct atomic_t : public atomic_be_t<T>
{
};
}
namespace psv
{
template<typename T> struct atomic_t : public atomic_le_t<T>
{
};
}
using namespace ps3;

View File

@ -205,4 +205,4 @@ namespace vm
#include "vm_ref.h"
#include "vm_ptr.h"
#include "vm_var.h"
#include "vm_atomic.h"
#include "atomic_type.h"

View File

@ -1,172 +0,0 @@
#pragma once
namespace vm
{
template<typename T, size_t size = sizeof(T)>
struct _to_atomic
{
static_assert(size == 1 || size == 2 || size == 4 || size == 8, "Invalid atomic type");
typedef T type;
};
template<typename T>
struct _to_atomic<T, 1>
{
typedef uint8_t type;
};
template<typename T>
struct _to_atomic<T, 2>
{
typedef uint16_t type;
};
template<typename T>
struct _to_atomic<T, 4>
{
typedef uint32_t type;
};
template<typename T>
struct _to_atomic<T, 8>
{
typedef uint64_t type;
};
template<typename T>
class _atomic_base
{
typedef typename _to_atomic<T, sizeof(T)>::type atomic_type;
atomic_type data;
public:
// atomically compare data with cmp, replace with exch if equal, return previous data value anyway
__forceinline const T compare_and_swap(const T& cmp, const T& exch) volatile
{
const atomic_type res = InterlockedCompareExchange(&data, (atomic_type&)(exch), (atomic_type&)(cmp));
return (T&)res;
}
// atomically compare data with cmp, replace with exch if equal, return true if data was replaced
__forceinline bool compare_and_swap_test(const T& cmp, const T& exch) volatile
{
return InterlockedCompareExchange(&data, (atomic_type&)(exch), (atomic_type&)(cmp)) == (atomic_type&)(cmp);
}
// read data with memory barrier
__forceinline const T read_sync() const volatile
{
const atomic_type res = InterlockedCompareExchange(const_cast<volatile atomic_type*>(&data), 0, 0);
return (T&)res;
}
// atomically replace data with exch, return previous data value
__forceinline const T exchange(const T& exch) volatile
{
const atomic_type res = InterlockedExchange(&data, (atomic_type&)(exch));
return (T&)res;
}
// read data without memory barrier
__forceinline const T read_relaxed() const volatile
{
return (T&)data;
}
// write data without memory barrier
__forceinline void write_relaxed(const T& value) volatile
{
data = (atomic_type&)(value);
}
// perform atomic operation on data
template<typename FT> __forceinline void atomic_op(const FT atomic_proc) volatile
{
while (true)
{
const T old = read_relaxed();
T _new = old;
atomic_proc(_new); // function should accept reference to T type
if (compare_and_swap_test(old, _new)) return;
}
}
// perform atomic operation on data with special exit condition (if intermediate result != proceed_value)
template<typename RT, typename FT> __forceinline RT atomic_op(const RT proceed_value, const FT atomic_proc) volatile
{
while (true)
{
const T old = read_relaxed();
T _new = old;
RT res = (RT)atomic_proc(_new); // function should accept reference to T type and return some value
if (res != proceed_value) return res;
if (compare_and_swap_test(old, _new)) return proceed_value;
}
}
// atomic bitwise OR, returns previous data
__forceinline const T _or(const T& right) volatile
{
const atomic_type res = InterlockedOr(&data, (atomic_type&)(right));
return (T&)res;
}
// atomic bitwise AND, returns previous data
__forceinline const T _and(const T& right) volatile
{
const atomic_type res = InterlockedAnd(&data, (atomic_type&)(right));
return (T&)res;
}
// atomic bitwise XOR, returns previous data
__forceinline const T _xor(const T& right) volatile
{
const atomic_type res = InterlockedXor(&data, (atomic_type&)(right));
return (T&)res;
}
__forceinline const T operator |= (const T& right) volatile
{
const atomic_type res = InterlockedOr(&data, (atomic_type&)(right)) | (atomic_type&)(right);
return (T&)res;
}
__forceinline const T operator &= (const T& right) volatile
{
const atomic_type res = InterlockedAnd(&data, (atomic_type&)(right)) & (atomic_type&)(right);
return (T&)res;
}
__forceinline const T operator ^= (const T& right) volatile
{
const atomic_type res = InterlockedXor(&data, (atomic_type&)(right)) ^ (atomic_type&)(right);
return (T&)res;
}
};
template<typename T> struct atomic_le : public _atomic_base<T>
{
};
template<typename T> struct atomic_be : public _atomic_base<typename to_be_t<T>::type>
{
};
namespace ps3
{
template<typename T> struct atomic : public atomic_be<T>
{
};
}
namespace psv
{
template<typename T> struct atomic : public atomic_le<T>
{
};
}
using namespace ps3;
}

View File

@ -92,7 +92,7 @@ s64 spursInit(
if (!isSecond)
{
spurs->m.unk0 = 0xffff;
spurs->m.wklMask.write_relaxed(be_t<u32>::make(0xffff));
}
spurs->m.unk6[0xC] = 0;
spurs->m.unk6[0xD] = 0;
@ -114,7 +114,7 @@ s64 spursInit(
{
sem = semaphore_create(0, 1, SYS_SYNC_PRIORITY, *(u64*)"_spuWkl");
assert(sem && ~sem); // should rollback if semaphore creation failed and return the error
spurs->m.sub1[i].sem = sem;
spurs->m.wklF1[i].sem = sem;
}
if (isSecond)
{
@ -122,7 +122,7 @@ s64 spursInit(
{
sem = semaphore_create(0, 1, SYS_SYNC_PRIORITY, *(u64*)"_spuWkl");
assert(sem && ~sem);
spurs->m.sub2[i].sem = sem;
spurs->m.wklF2[i].sem = sem;
}
}
sem = semaphore_create(0, 1, SYS_SYNC_PRIORITY, *(u64*)"_spuPrv");
@ -131,7 +131,10 @@ s64 spursInit(
spurs->m.unk11 = -1;
spurs->m.unk12 = -1;
spurs->m.unk13 = 0;
spurs->m.nSpus = nSpus;
spurs->m.x70.direct_op([nSpus](CellSpurs::_sub_x70& x70)
{
x70.nSpus = nSpus;
});
spurs->m.spuPriority = spuPriority;
#ifdef PRX_DEBUG
assert(spu_image_import(spurs->m.spuImg, vm::read32(libsre_rtoc - (isSecond ? 0x7E94 : 0x7E98)), 1) == CELL_OK);
@ -184,8 +187,11 @@ s64 spursInit(
assert(lwmutex_create(spurs->m.mutex, SYS_SYNC_PRIORITY, SYS_SYNC_NOT_RECURSIVE, *(u64*)"_spuPrv") == CELL_OK);
assert(lwcond_create(spurs->m.cond, spurs->m.mutex, *(u64*)"_spuPrv") == CELL_OK);
spurs->m.flags1 = (flags & SAF_EXIT_IF_NO_WORK) << 7 | (isSecond ? 0x40 : 0);
spurs->m.unk15 = -1;
spurs->m.x70.direct_op([flags, isSecond](CellSpurs::_sub_x70& x70)
{
x70.flags1 = (flags & SAF_EXIT_IF_NO_WORK) << 7 | (isSecond ? 0x40 : 0);
x70.unk7 = -1;
});
spurs->m.unk18 = -1;
spurs->_u8[0xD64] = 0;
spurs->_u8[0xD65] = 0;
@ -798,16 +804,85 @@ s32 spursAddWorkload(
const u8 priorityTable[],
u32 minContention,
u32 maxContention,
const char* nameClass,
const char* nameInstance,
vm::ptr<const char> nameClass,
vm::ptr<const char> nameInstance,
vm::ptr<CellSpursShutdownCompletionEventHook> hook,
vm::ptr<void> hookArg)
{
#ifdef PRX_DEBUG
return cb_call<s32, vm::ptr<CellSpurs>, vm::ptr<u32>, vm::ptr<const void>, u32, u64, u32, u32, u32, u32, u32, u32, u32>(GetCurrentPPUThread(), libsre + 0x96EC, libsre_rtoc,
spurs, wid, pm, size, data, Memory.RealToVirtualAddr(priorityTable), minContention, maxContention,
Memory.RealToVirtualAddr(nameClass), Memory.RealToVirtualAddr(nameInstance), hook.addr(), hookArg.addr());
nameClass.addr(), nameInstance.addr(), hook.addr(), hookArg.addr());
#endif
if (!spurs || !wid || !pm)
{
return CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER;
}
if (spurs.addr() % 128 || pm.addr() % 16)
{
return CELL_SPURS_POLICY_MODULE_ERROR_ALIGN;
}
if (minContention == 0 || *(u64*)priorityTable & 0xf0f0f0f0f0f0f0f0ull) // check if some priority > 15
{
return CELL_SPURS_POLICY_MODULE_ERROR_INVAL;
}
if (spurs->m.unk21.ToBE())
{
return CELL_SPURS_POLICY_MODULE_ERROR_STAT;
}
u32 wnum;
const u32 wmax = spurs->m.x70.read_relaxed().flags1 & 0x40 ? 0x20 : 0x10; // check isSecond (TODO: check if can be changed)
spurs->m.wklMask.atomic_op([spurs, wmax, &wnum](be_t<u32>& value)
{
wnum = cntlz32(~(u32)value); // found empty position
if (wnum < wmax)
{
value |= (u32)(0x80000000ull >> wnum); // set workload bit
}
});
*wid = wnum; // store workload id
if (wnum >= wmax)
{
return CELL_SPURS_POLICY_MODULE_ERROR_AGAIN;
}
if (wnum <= 15)
{
assert((spurs->m.wklA1[wnum] & 0xf) == 0);
assert((spurs->m.wklB1[wnum] & 0xf) == 0);
spurs->m.wklC1[wnum] = 1;
spurs->m.wklD1[wnum] = 0;
spurs->m.wklE1[wnum] = 0;
spurs->m.wklG1[wnum].wklPm = pm;
spurs->m.wklG1[wnum].wklArg = data;
spurs->m.wklG1[wnum].wklSize = size;
spurs->m.wklG1[wnum].wklPriority = *(be_t<u64>*)priorityTable;
spurs->m.wklH1[wnum].nameClass = nameClass;
spurs->m.wklH1[wnum].nameInstance = nameInstance;
memset(spurs->m.wklF1[wnum].unk0, 0, 0x18);
// (preserve semaphore id)
memset(spurs->m.wklF1[wnum].unk1, 0, 0x60);
if (hook)
{
spurs->m.wklF1[wnum].hook = hook;
spurs->m.wklF1[wnum].hookArg = hookArg;
}
spurs->m.wklY1[wnum] = 0;
if (spurs->m.x70.read_relaxed().flags1 & 0x40)
{
}
else
{
spurs->m.wklZ1[wnum] = 0;
spurs->m.wklMinCnt[wnum] = minContention > 8 ? 8 : 0;
}
}
else
{
}
return CELL_OK;
}
@ -838,8 +913,8 @@ s64 cellSpursAddWorkload(
*priorityTable,
minContention,
maxContention,
nullptr,
nullptr,
{},
{},
{},
{});
}
@ -965,8 +1040,8 @@ s64 cellSpursAddWorkloadWithAttribute(vm::ptr<CellSpurs> spurs, vm::ptr<u32> wid
attr->m.priority,
attr->m.minContention,
attr->m.maxContention,
attr->m.nameClass.get_ptr(),
attr->m.nameInstance.get_ptr(),
vm::ptr<const char>::make(attr->m.nameClass.addr()),
vm::ptr<const char>::make(attr->m.nameInstance.addr()),
vm::ptr<CellSpursShutdownCompletionEventHook>::make(attr->m.hook.addr()),
vm::ptr<void>::make(attr->m.hookArg.addr()));
}

View File

@ -104,6 +104,8 @@ class SPURSManager;
class SPURSManagerEventFlag;
class SPURSManagerTaskset;
struct CellSpurs;
enum SpursAttrFlags : u32
{
SAF_NONE = 0x0,
@ -158,6 +160,8 @@ struct CellSpursAttribute
};
};
typedef void(*CellSpursShutdownCompletionEventHook)(vm::ptr<CellSpurs>, u32 wid, vm::ptr<void> arg);
// Core CellSpurs structures
struct CellSpurs
{
@ -172,7 +176,11 @@ struct CellSpurs
u8 unk0[0x20];
be_t<u64> sem; // 0x20
u8 unk_[0x58];
u8 unk1[0x8];
u32 pad;
vm::bptr<CellSpursShutdownCompletionEventHook, 1, u64> hook; // 0x30
vm::bptr<void, 1, u64> hookArg; // 0x38
u8 unk2[0x40];
};
struct _sub_str2
@ -187,6 +195,41 @@ struct CellSpurs
u8 unk_[0x68];
};
struct _sub_str3
{
static const uint size = 0x20;
vm::bptr<const void, 1, u64> wklPm; // policy module
be_t<u64> wklArg; // spu argument
be_t<u32> wklSize;
be_t<u64> wklPriority;
};
struct _sub_str4
{
static const uint size = 0x10;
vm::bptr<const char, 1, u64> nameClass;
vm::bptr<const char, 1, u64> nameInstance;
};
struct _sub_x70
{
u8 unk0;
u8 unk1;
u8 unk2;
u8 unk3;
u8 flags1;
u8 unk5;
u8 nSpus;
u8 unk7;
};
struct _sub_x78
{
u64 unk;
};
union
{
// raw data
@ -196,26 +239,31 @@ struct CellSpurs
// real data
struct
{
u8 unknown[0x6C];
u8 wklY1[0x10];
u8 wklZ1[0x10]; // 0x10
u8 wklA1[0x10]; // 0x20
u8 wklB1[0x10]; // 0x30
u8 wklMinCnt[0x10]; // 0x40
u8 unknown0[0x6C - 0x50];
be_t<u32> unk18; // 0x6C
u8 unk17[4]; // 0x70
u8 flags1; // 0x74
u8 unk16; // 0x75
u8 nSpus; // 0x76
u8 unk15; // 0x77
u8 unknown0[0xB0 - 0x78];
be_t<u32> unk0; // 0x0B0
atomic_t<_sub_x70> x70; // 0x70
atomic_t<_sub_x78> x78; // 0x78
u8 wklC1[0x10]; // 0x80
u8 wklD1[0x10]; // 0x90
u8 wklE1[0x10]; // 0xA0
atomic_t<u32> wklMask;// 0xB0
u8 unknown2[0xC0 - 0xB4];
u8 unk6[0x10]; // 0x0C0 (SPU port at 0xc9)
u8 unk6[0x10]; // 0xC0 (SPU port at 0xc9)
u8 unknown1[0x100 - 0x0D0];
_sub_str1 sub1[0x10]; // 0x100
_sub_str1 wklF1[0x10];// 0x100
be_t<u64> unk22; // 0x900
u8 unknown7[0x980 - 0x908];
be_t<u64> semPrv; // 0x980
be_t<u32> unk11; // 0x988
be_t<u32> unk12; // 0x98C
be_t<u64> unk13; // 0x990
u8 unknown4[0xD00 - 0x998];
u8 unknown4[0xB00 - 0x998];
_sub_str3 wklG1[0x10];// 0xB00
be_t<u64> unk7; // 0xD00
be_t<u64> unk8; // 0xD08
be_t<u32> unk9; // 0xD10
@ -228,8 +276,8 @@ struct CellSpurs
u8 unknown3[0xD5C - 0xD54];
be_t<u32> queue; // 0xD5C
be_t<u32> port; // 0xD60
vm::atomic<u8> unk19[4]; // 0xD64 (used in wakeup)
vm::atomic<u32> enableEH; // 0xD68
atomic_t<u8> unk19[4]; // 0xD64 (used in wakeup)
atomic_t<u32> enableEH; // 0xD68
be_t<u32> unk21; // 0xD6C
sys_spu_image spuImg; // 0xD70
be_t<u32> flags; // 0xD80
@ -240,13 +288,14 @@ struct CellSpurs
be_t<u32> unk5; // 0xD9C
be_t<u32> revision; // 0xDA0
be_t<u32> sdkVersion; // 0xDA4
vm::atomic<u64> spups;// 0xDA8
atomic_t<u64> spups;// 0xDA8
sys_lwmutex_t mutex; // 0xDB0
sys_lwcond_t cond; // 0xDC8
u8 unknown9[0xF00 - 0xDD0];
u8 unknown9[0xE00 - 0xDD0];
_sub_str4 wklH1[0x10];// 0xE00
_sub_str2 sub3; // 0xF00
u8 unknown6[0x1200 - 0xF80];
_sub_str1 sub2[0x10]; // 0x1200
_sub_str1 wklF2[0x10];// 0x1200
// ...
} m;
@ -260,8 +309,6 @@ struct CellSpurs
typedef CellSpurs CellSpurs2;
typedef void(*CellSpursShutdownCompletionEventHook)(vm::ptr<CellSpurs>, u32 wid, vm::ptr<void> arg);
struct CellSpursWorkloadAttribute
{
static const uint align = 8;

View File

@ -1158,7 +1158,7 @@ s32 syncLFQueueCompletePushPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer,
}
s32 var9_ = 15 - var1;
// calculate (1 slw (15 - var1))
// calculate (u16)(1 slw (15 - var1))
if (var9_ & 0x30)
{
var9_ = 0;
@ -1167,18 +1167,7 @@ s32 syncLFQueueCompletePushPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer,
{
var9_ = 1 << var9_;
}
s32 var9 = ~(var9_ | (u16)push3.m_h6);
// count leading zeros in u16
{
u16 v = var9;
for (var9 = 0; var9 < 16; var9++)
{
if (v & (1 << (15 - var9)))
{
break;
}
}
}
s32 var9 = cntlz32((u32)(u16)~(var9_ | (u16)push3.m_h6)) - 16; // count leading zeros in u16
s32 var5 = (s32)(u16)push3.m_h6 | var9_;
if (var9 & 0x30)
@ -1548,7 +1537,7 @@ s32 syncLFQueueCompletePopPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer, c
}
s32 var9_ = 15 - var1;
// calculate (1 slw (15 - var1))
// calculate (u16)(1 slw (15 - var1))
if (var9_ & 0x30)
{
var9_ = 0;
@ -1557,18 +1546,7 @@ s32 syncLFQueueCompletePopPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer, c
{
var9_ = 1 << var9_;
}
s32 var9 = ~(var9_ | (u16)pop3.m_h2);
// count leading zeros in u16
{
u16 v = var9;
for (var9 = 0; var9 < 16; var9++)
{
if (v & (1 << (15 - var9)))
{
break;
}
}
}
s32 var9 = cntlz32((u32)(u16)~(var9_ | (u16)pop3.m_h2)) - 16; // count leading zeros in u16
s32 var5 = (s32)(u16)pop3.m_h2 | var9_;
if (var9 & 0x30)

View File

@ -37,7 +37,7 @@ struct CellSyncMutex
be_t<u16> m_acq; // acquire order (increased when mutex is locked)
};
vm::atomic<data_t> data;
atomic_t<data_t> data;
};
static_assert(sizeof(CellSyncMutex) == 4, "CellSyncMutex: wrong size");
@ -50,7 +50,7 @@ struct CellSyncBarrier
be_t<s16> m_count;
};
vm::atomic<data_t> data;
atomic_t<data_t> data;
};
static_assert(sizeof(CellSyncBarrier) == 4, "CellSyncBarrier: wrong size");
@ -63,7 +63,7 @@ struct CellSyncRwm
be_t<u16> m_writers;
};
vm::atomic<data_t> data;
atomic_t<data_t> data;
be_t<u32> m_size;
vm::bptr<void, 1, u64> m_buffer;
};
@ -78,7 +78,7 @@ struct CellSyncQueue
be_t<u32> m_v2;
};
vm::atomic<data_t> data;
atomic_t<data_t> data;
be_t<u32> m_size;
be_t<u32> m_depth;
vm::bptr<u8, 1, u64> m_buffer;
@ -137,13 +137,13 @@ struct CellSyncLFQueue
union
{
vm::atomic<pop1_t> pop1; // 0x0
vm::atomic<pop3_t> pop3;
atomic_t<pop1_t> pop1; // 0x0
atomic_t<pop3_t> pop3;
};
union
{
vm::atomic<push1_t> push1; // 0x8
vm::atomic<push3_t> push3;
atomic_t<push1_t> push1; // 0x8
atomic_t<push3_t> push3;
};
be_t<u32> m_size; // 0x10
be_t<u32> m_depth; // 0x14
@ -151,10 +151,10 @@ struct CellSyncLFQueue
u8 m_bs[4]; // 0x20
be_t<CellSyncQueueDirection> m_direction; // 0x24
be_t<u32> m_v1; // 0x28
vm::atomic<u32> init; // 0x2C
vm::atomic<push2_t> push2; // 0x30
atomic_t<u32> init; // 0x2C
atomic_t<push2_t> push2; // 0x30
be_t<u16> m_hs1[15]; // 0x32
vm::atomic<pop2_t> pop2; // 0x50
atomic_t<pop2_t> pop2; // 0x50
be_t<u16> m_hs2[15]; // 0x52
vm::bptr<void, 1, u64> m_eaSignal; // 0x70
be_t<u32> m_v2; // 0x78
@ -163,7 +163,7 @@ struct CellSyncLFQueue
static_assert(sizeof(CellSyncLFQueue) == 128, "CellSyncLFQueue: wrong size");
s32 syncMutexInitialize(vm::ptr<vm::atomic<CellSyncMutex>> mutex);
s32 syncMutexInitialize(vm::ptr<CellSyncMutex> mutex);
s32 syncBarrierInitialize(vm::ptr<CellSyncBarrier> barrier, u16 total_count);

View File

@ -211,7 +211,7 @@ s32 sys_ppu_thread_create(vm::ptr<be_t<u64>> thread_id, u32 entry, u64 arg, s32
return CELL_OK;
}
void sys_ppu_thread_once(PPUThread& CPU, vm::ptr<vm::atomic<u32>> once_ctrl, vm::ptr<void(*)()> init)
void sys_ppu_thread_once(PPUThread& CPU, vm::ptr<atomic_t<u32>> once_ctrl, vm::ptr<void(*)()> init)
{
sys_ppu_thread.Warning("sys_ppu_thread_once(once_ctrl_addr=0x%x, init_addr=0x%x)", once_ctrl.addr(), init.addr());

View File

@ -30,6 +30,6 @@ s32 sys_ppu_thread_get_stack_information(PPUThread& CPU, u32 info_addr);
s32 sys_ppu_thread_stop(u64 thread_id);
s32 sys_ppu_thread_restart(u64 thread_id);
s32 sys_ppu_thread_create(vm::ptr<be_t<u64>> thread_id, u32 entry, u64 arg, s32 prio, u32 stacksize, u64 flags, vm::ptr<const char> threadname);
void sys_ppu_thread_once(PPUThread& CPU, vm::ptr<vm::atomic<u32>> once_ctrl, vm::ptr<void(*)()> init);
void sys_ppu_thread_once(PPUThread& CPU, vm::ptr<atomic_t<u32>> once_ctrl, vm::ptr<void(*)()> init);
s32 sys_ppu_thread_get_id(PPUThread& CPU, vm::ptr<be_t<u64>> thread_id);
s32 sys_ppu_thread_rename(u64 thread_id, vm::ptr<const char> name);

View File

@ -7,7 +7,7 @@
SysCallBase sys_spinlock("sys_spinlock");
void sys_spinlock_initialize(vm::ptr<vm::atomic<u32>> lock)
void sys_spinlock_initialize(vm::ptr<atomic_t<u32>> lock)
{
sys_spinlock.Log("sys_spinlock_initialize(lock_addr=0x%x)", lock.addr());
@ -15,7 +15,7 @@ void sys_spinlock_initialize(vm::ptr<vm::atomic<u32>> lock)
lock->exchange(be_t<u32>::make(0));
}
void sys_spinlock_lock(vm::ptr<vm::atomic<u32>> lock)
void sys_spinlock_lock(vm::ptr<atomic_t<u32>> lock)
{
sys_spinlock.Log("sys_spinlock_lock(lock_addr=0x%x)", lock.addr());
@ -39,7 +39,7 @@ void sys_spinlock_lock(vm::ptr<vm::atomic<u32>> lock)
}
}
s32 sys_spinlock_trylock(vm::ptr<vm::atomic<u32>> lock)
s32 sys_spinlock_trylock(vm::ptr<atomic_t<u32>> lock)
{
sys_spinlock.Log("sys_spinlock_trylock(lock_addr=0x%x)", lock.addr());
@ -52,7 +52,7 @@ s32 sys_spinlock_trylock(vm::ptr<vm::atomic<u32>> lock)
return CELL_OK;
}
void sys_spinlock_unlock(vm::ptr<vm::atomic<u32>> lock)
void sys_spinlock_unlock(vm::ptr<atomic_t<u32>> lock)
{
sys_spinlock.Log("sys_spinlock_unlock(lock_addr=0x%x)", lock.addr());

View File

@ -1,7 +1,7 @@
#pragma once
// SysCalls
void sys_spinlock_initialize(vm::ptr<vm::atomic<u32>> lock);
void sys_spinlock_lock(vm::ptr<vm::atomic<u32>> lock);
s32 sys_spinlock_trylock(vm::ptr<vm::atomic<u32>> lock);
void sys_spinlock_unlock(vm::ptr<vm::atomic<u32>> lock);
void sys_spinlock_initialize(vm::ptr<atomic_t<u32>> lock);
void sys_spinlock_lock(vm::ptr<atomic_t<u32>> lock);
s32 sys_spinlock_trylock(vm::ptr<atomic_t<u32>> lock);
void sys_spinlock_unlock(vm::ptr<atomic_t<u32>> lock);

View File

@ -311,7 +311,7 @@
<ClInclude Include="Emu\Io\PadHandler.h" />
<ClInclude Include="Emu\Memory\Memory.h" />
<ClInclude Include="Emu\Memory\MemoryBlock.h" />
<ClInclude Include="Emu\Memory\vm_atomic.h" />
<ClInclude Include="Emu\Memory\atomic_type.h" />
<ClInclude Include="Emu\RSX\GCM.h" />
<ClInclude Include="Emu\RSX\GL\GLBuffers.h" />
<ClInclude Include="Emu\RSX\GL\GLFragmentProgram.h" />

View File

@ -1219,7 +1219,7 @@
<ClInclude Include="Emu\SysCalls\SyncPrimitivesManager.h">
<Filter>Emu\SysCalls</Filter>
</ClInclude>
<ClInclude Include="Emu\Memory\vm_atomic.h">
<ClInclude Include="Emu\Memory\atomic_type.h">
<Filter>Emu\Memory</Filter>
</ClInclude>
</ItemGroup>