mirror of
https://github.com/libretro/RetroArch
synced 2025-02-02 23:54:06 +00:00
420b7913d4
2820ab0b51 Merge pull request #1076 from KhronosGroup/bitcast-pre-330-glsl 63bcbd511e GLSL: Need extension to use bitcast on GLSL < 330. 9f3bebe3d0 Merge pull request #1075 from lifpan/master b11c20fc1d Remove unreasonable assertion for OpTypeImage Sampled parameter. 1a592b7c0f Merge pull request #1067 from cdavis5e/msl-scalar-block-layout 28454facbb MSL: Handle packed matrices. ea5c0ed82f MSL: Fix alignment of packed types. 44f688bf0b Merge pull request #1070 from KhronosGroup/fix-1066 25c74b324e Forget loop variable enables after emitting block chain. 6b010e0cbc Merge pull request #1069 from KhronosGroup/fix-1053 f6f849397e MSL: Re-roll array expressions in initializers. e5fa7edfd6 MSL: Support scalar block layout. git-subtree-dir: deps/SPIRV-Cross git-subtree-split: 2820ab0b51bf5e4187435d904b34e762b988f48b
71 lines
4.2 KiB
Plaintext
71 lines
4.2 KiB
Plaintext
#pragma clang diagnostic ignored "-Wunused-variable"
|
|
|
|
#include <metal_stdlib>
|
|
#include <simd/simd.h>
|
|
#include <metal_atomic>
|
|
|
|
using namespace metal;
|
|
|
|
struct SSBO
|
|
{
|
|
uint u32;
|
|
int i32;
|
|
};
|
|
|
|
kernel void main0(device SSBO& ssbo [[buffer(0)]])
|
|
{
|
|
threadgroup uint shared_u32;
|
|
threadgroup int shared_i32;
|
|
uint _16 = atomic_fetch_add_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed);
|
|
uint _18 = atomic_fetch_or_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed);
|
|
uint _20 = atomic_fetch_xor_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed);
|
|
uint _22 = atomic_fetch_and_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed);
|
|
uint _24 = atomic_fetch_min_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed);
|
|
uint _26 = atomic_fetch_max_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed);
|
|
uint _28 = atomic_exchange_explicit((volatile device atomic_uint*)&ssbo.u32, 1u, memory_order_relaxed);
|
|
uint _32;
|
|
do
|
|
{
|
|
_32 = 10u;
|
|
} while (!atomic_compare_exchange_weak_explicit((volatile device atomic_uint*)&ssbo.u32, &_32, 2u, memory_order_relaxed, memory_order_relaxed) && _32 == 10u);
|
|
int _36 = atomic_fetch_add_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed);
|
|
int _38 = atomic_fetch_or_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed);
|
|
int _40 = atomic_fetch_xor_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed);
|
|
int _42 = atomic_fetch_and_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed);
|
|
int _44 = atomic_fetch_min_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed);
|
|
int _46 = atomic_fetch_max_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed);
|
|
int _48 = atomic_exchange_explicit((volatile device atomic_int*)&ssbo.i32, 1, memory_order_relaxed);
|
|
int _52;
|
|
do
|
|
{
|
|
_52 = 10;
|
|
} while (!atomic_compare_exchange_weak_explicit((volatile device atomic_int*)&ssbo.i32, &_52, 2, memory_order_relaxed, memory_order_relaxed) && _52 == 10);
|
|
shared_u32 = 10u;
|
|
shared_i32 = 10;
|
|
uint _57 = atomic_fetch_add_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed);
|
|
uint _58 = atomic_fetch_or_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed);
|
|
uint _59 = atomic_fetch_xor_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed);
|
|
uint _60 = atomic_fetch_and_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed);
|
|
uint _61 = atomic_fetch_min_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed);
|
|
uint _62 = atomic_fetch_max_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed);
|
|
uint _63 = atomic_exchange_explicit((volatile threadgroup atomic_uint*)&shared_u32, 1u, memory_order_relaxed);
|
|
uint _64;
|
|
do
|
|
{
|
|
_64 = 10u;
|
|
} while (!atomic_compare_exchange_weak_explicit((volatile threadgroup atomic_uint*)&shared_u32, &_64, 2u, memory_order_relaxed, memory_order_relaxed) && _64 == 10u);
|
|
int _65 = atomic_fetch_add_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed);
|
|
int _66 = atomic_fetch_or_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed);
|
|
int _67 = atomic_fetch_xor_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed);
|
|
int _68 = atomic_fetch_and_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed);
|
|
int _69 = atomic_fetch_min_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed);
|
|
int _70 = atomic_fetch_max_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed);
|
|
int _71 = atomic_exchange_explicit((volatile threadgroup atomic_int*)&shared_i32, 1, memory_order_relaxed);
|
|
int _72;
|
|
do
|
|
{
|
|
_72 = 10;
|
|
} while (!atomic_compare_exchange_weak_explicit((volatile threadgroup atomic_int*)&shared_i32, &_72, 2, memory_order_relaxed, memory_order_relaxed) && _72 == 10);
|
|
}
|
|
|