types.hpp: implement min_v<>, max_v<>, SignedInt, UnsignedInt, FPInt concepts

Restrict smax to only work with signed values for consistency.
Cleanup <climits> includes.
Cleanup <limits> includes.
This commit is contained in:
Nekotekina 2021-05-22 21:46:10 +03:00
parent 4b239a0b87
commit 2491aad6f2
48 changed files with 190 additions and 168 deletions

View File

@ -1,7 +1,6 @@
#pragma once
#include <string>
#include <limits>
#include "Utilities/StrFmt.h"
enum class cpu_disasm_mode
@ -93,7 +92,7 @@ protected:
{
const auto v = static_cast<std::make_signed_t<T>>(value);
if (v == std::numeric_limits<std::make_signed_t<T>>::min())
if (v == smin)
{
// for INTx_MIN
return fmt::format("-0x%x", v);

View File

@ -902,12 +902,12 @@ u32 cpu_thread::get_pc() const
case 0x55:
{
const auto ctrl = static_cast<const rsx::thread*>(this)->ctrl;
return ctrl ? ctrl->get : UINT32_MAX;
return ctrl ? ctrl->get.load() : umax;
}
default: break;
}
return pc ? atomic_storage<u32>::load(*pc) : UINT32_MAX;
return pc ? atomic_storage<u32>::load(*pc) : u32{umax};
}
u32* cpu_thread::get_pc2()

View File

@ -1375,7 +1375,7 @@ inline llvm_xor<T1, llvm_const_int<typename is_llvm_expr<T1>::type>> operator ^(
template <typename T1>
inline llvm_xor<T1, llvm_const_int<typename is_llvm_expr<T1>::type, true>> operator ~(T1&& a1)
{
return {a1, {UINT64_MAX}};
return {a1, {u64{umax}}};
}
template <typename A1, typename A2, llvm::CmpInst::Predicate UPred, typename T = llvm_common_t<A1, A2>>

View File

@ -361,7 +361,7 @@ void audio_port::tag(s32 offset)
last_tag_value[tag_nr] = -0.0f;
}
prev_touched_tag_nr = UINT32_MAX;
prev_touched_tag_nr = -1;
}
std::tuple<u32, u32, u32, u32> cell_audio_thread::count_port_buffer_tags()
@ -395,13 +395,13 @@ std::tuple<u32, u32, u32, u32> cell_audio_thread::count_port_buffer_tags()
{
last_val = val;
retouched |= (tag_nr <= port.prev_touched_tag_nr) && port.prev_touched_tag_nr != UINT32_MAX;
retouched |= (tag_nr <= port.prev_touched_tag_nr) && port.prev_touched_tag_nr != umax;
last_touched_tag_nr = tag_nr;
}
}
// Decide whether the buffer is untouched, in progress, incomplete, or complete
if (last_touched_tag_nr == UINT32_MAX)
if (last_touched_tag_nr == umax)
{
// no tag has been touched yet
untouched++;

View File

@ -775,10 +775,7 @@ error_code cellGcmGetCurrentDisplayBufferId(vm::ptr<u8> id)
{
cellGcmSys.warning("cellGcmGetCurrentDisplayBufferId(id=*0x%x)", id);
if ((*id = rsx::get_current_renderer()->current_display_buffer) > UINT8_MAX)
{
fmt::throw_exception("Unexpected");
}
*id = ::narrow<u8>(rsx::get_current_renderer()->current_display_buffer);
return CELL_OK;
}

View File

@ -4608,8 +4608,7 @@ s32 cellSpursCreateJobChain(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, vm::ptr<C
const u64 prio = std::bit_cast<u64>(*priorities);
if (auto err = _spurs::check_job_chain_attribute(UINT32_MAX, jobChainEntry, sizeJobDescriptor, maxGrabbedJob, prio, maxContention
, autoReadyCount, tag1, tag2, 0, 0, 0))
if (auto err = _spurs::check_job_chain_attribute(-1, jobChainEntry, sizeJobDescriptor, maxGrabbedJob, prio, maxContention, autoReadyCount, tag1, tag2, 0, 0, 0))
{
return err;
}

View File

@ -58,7 +58,7 @@ error_code sys_lwcond_signal(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
if ((lwmutex->attribute & SYS_SYNC_ATTR_PROTOCOL_MASK) == SYS_SYNC_RETRY)
{
return _sys_lwcond_signal(ppu, lwcond->lwcond_queue, 0, UINT32_MAX, 2);
return _sys_lwcond_signal(ppu, lwcond->lwcond_queue, 0, u32{umax}, 2);
}
if (lwmutex->vars.owner.load() == ppu.id)
@ -67,7 +67,7 @@ error_code sys_lwcond_signal(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
lwmutex->all_info++;
// call the syscall
if (error_code res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, UINT32_MAX, 1))
if (error_code res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, u32{umax}, 1))
{
if (ppu.test_stopped())
{
@ -95,7 +95,7 @@ error_code sys_lwcond_signal(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
}
// call the syscall
return _sys_lwcond_signal(ppu, lwcond->lwcond_queue, 0, UINT32_MAX, 2);
return _sys_lwcond_signal(ppu, lwcond->lwcond_queue, 0, u32{umax}, 2);
}
// if locking succeeded
@ -106,7 +106,7 @@ error_code sys_lwcond_signal(ppu_thread& ppu, vm::ptr<sys_lwcond_t> lwcond)
});
// call the syscall
if (error_code res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, UINT32_MAX, 3))
if (error_code res = _sys_lwcond_signal(ppu, lwcond->lwcond_queue, lwmutex->sleep_queue, u32{umax}, 3))
{
if (ppu.test_stopped())
{

View File

@ -43,7 +43,7 @@ void lv2_timer_context::operator()()
{
// Set next expiration time and check again
const u64 _next = next + period;
expire.release(_next > next ? _next : UINT64_MAX);
expire.release(_next > next ? _next : umax);
continue;
}
@ -164,7 +164,7 @@ error_code _sys_timer_start(ppu_thread& ppu, u32 timer_id, u64 base_time, u64 pe
// sys_timer_start_periodic() will use current time (TODO: is it correct?)
const u64 expire = base_time ? base_time : start_time + period;
timer.expire = expire > start_time ? expire : UINT64_MAX;
timer.expire = expire > start_time ? expire : umax;
timer.period = period;
timer.state = SYS_TIMER_STATE_RUN;

View File

@ -94,7 +94,7 @@ namespace vm
void reservation_update(u32 addr)
{
u64 old = UINT64_MAX;
u64 old = -1;
const auto cpu = get_current_cpu_thread();
while (true)
@ -705,7 +705,7 @@ namespace vm
}
// Unsharing only happens on deallocation currently, so make sure all further refs are shared
shm->info = UINT32_MAX;
shm->info = 0xffff'ffff;
}
// Obtain existing pointer
@ -1643,11 +1643,11 @@ namespace vm
"vm::g_hook_addr = %p - %p\n"
"vm::g_stat_addr = %p - %p\n"
"vm::g_reservations = %p - %p\n",
g_base_addr, g_base_addr + UINT32_MAX,
g_sudo_addr, g_sudo_addr + UINT32_MAX,
g_base_addr, g_base_addr + 0xffff'ffff,
g_sudo_addr, g_sudo_addr + 0xffff'ffff,
g_exec_addr, g_exec_addr + 0x200000000 - 1,
g_hook_addr, g_hook_addr + 0x800000000 - 1,
g_stat_addr, g_stat_addr + UINT32_MAX,
g_stat_addr, g_stat_addr + 0xffff'ffff,
g_reservations, g_reservations + sizeof(g_reservations) - 1);
std::memset(&g_pages, 0, sizeof(g_pages));

View File

@ -171,7 +171,7 @@ namespace vm
{
const std::make_unsigned_t<std::ptrdiff_t> diff = static_cast<const u8*>(real_ptr) - g_base_addr;
if (diff <= u64{UINT32_MAX} * 2 + 1)
if (diff <= u64{u32{umax}} * 2 + 1)
{
return {vm::addr_t{static_cast<u32>(diff)}, true};
}

View File

@ -133,7 +133,7 @@ public:
// Updates the current_allocated_size metrics
void notify()
{
if (m_get_pos == UINT64_MAX)
if (m_get_pos == umax)
m_current_allocated_size = 0;
else if (m_get_pos < m_put_pos)
m_current_allocated_size = (m_put_pos - m_get_pos - 1);

View File

@ -670,7 +670,7 @@ namespace rsx
invalidate_range = fault_range; // Sections fully inside this range will be invalidated, others will be deemed false positives
// Loop through cache and find pages that overlap the invalidate_range
u32 last_dirty_block = UINT32_MAX;
u32 last_dirty_block = -1;
bool repeat_loop = false;
auto It = m_storage.range_begin(invalidate_range, locked_range, true); // will iterate through locked sections only
@ -685,7 +685,7 @@ namespace rsx
auto &tex = *It;
AUDIT(tex.is_locked()); // we should be iterating locked sections only, but just to make sure...
AUDIT(tex.cache_tag != cache_tag || last_dirty_block != UINT32_MAX); // cache tag should not match during the first loop
AUDIT(tex.cache_tag != cache_tag || last_dirty_block != umax); // cache tag should not match during the first loop
if (tex.cache_tag != cache_tag) //flushable sections can be 'clean' but unlocked. TODO: Handle this better
{
@ -997,7 +997,7 @@ namespace rsx
if (!tex.is_dirty() && (context_mask & static_cast<u32>(tex.get_context())))
{
if (required_pitch && !rsx::pitch_compatible<false>(&tex, required_pitch, UINT16_MAX))
if (required_pitch && !rsx::pitch_compatible<false>(&tex, required_pitch, -1))
{
continue;
}
@ -1784,7 +1784,7 @@ namespace rsx
if (result_is_valid)
{
// Check for possible duplicates
usz max_safe_sections = UINT32_MAX;
usz max_safe_sections = u32{umax};
switch (result.external_subresource_desc.op)
{
case deferred_request_command::atlas_gather:

View File

@ -48,8 +48,8 @@ namespace rsx {
{
switch (prot)
{
case utils::protection::no: if (no++ == UINT8_MAX) fmt::throw_exception("add(protection::no) overflow with NO==%d", UINT8_MAX); return;
case utils::protection::ro: if (ro++ == UINT8_MAX) fmt::throw_exception("add(protection::ro) overflow with RO==%d", UINT8_MAX); return;
case utils::protection::no: if (no++ == umax) fmt::throw_exception("add(protection::no) overflow"); return;
case utils::protection::ro: if (ro++ == umax) fmt::throw_exception("add(protection::ro) overflow"); return;
default: fmt::throw_exception("Unreachable");
}
}

View File

@ -196,7 +196,7 @@ namespace rsx
if (history_size == 0)
{
// We need some history to be able to take a guess
return UINT32_MAX;
return -1;
}
else if (history_size == 1)
{
@ -209,7 +209,7 @@ namespace rsx
const u32 stop_when_found_matches = 4;
u32 matches_found = 0;
u32 guess = UINT32_MAX;
u32 guess = -1;
for (u32 i = 0; i < history_size; i++)
{
@ -249,7 +249,7 @@ namespace rsx
void calculate_next_guess(bool reset)
{
if (reset || m_guessed_writes == UINT32_MAX || m_writes_since_last_flush > m_guessed_writes)
if (reset || m_guessed_writes == umax || m_writes_since_last_flush > m_guessed_writes)
{
m_guessed_writes = guess_number_of_writes();
}
@ -260,7 +260,7 @@ namespace rsx
{
confidence = starting_confidence;
m_writes_since_last_flush = 0;
m_guessed_writes = UINT32_MAX;
m_guessed_writes = -1;
write_history.clear();
}

View File

@ -73,14 +73,14 @@ namespace rsx
idx(0)
{
if (_block->empty())
idx = UINT32_MAX;
idx = u32{umax};
}
private:
// Members
block_list *block;
list_iterator list_it = {};
size_type idx = UINT32_MAX;
size_type idx = u32{umax};
size_type array_idx = 0;
inline void next()
@ -88,7 +88,7 @@ namespace rsx
++idx;
if (idx >= block->size())
{
idx = UINT32_MAX;
idx = u32{umax};
return;
}
@ -134,7 +134,7 @@ namespace rsx
// Constructor, Destructor
ranged_storage_block_list() :
m_data_it(m_data.end()),
m_array_idx(UINT32_MAX),
m_array_idx(-1),
m_capacity(0)
{}

View File

@ -375,7 +375,7 @@ namespace gl
for (const auto &res : configuration.texture_raw_data)
{
load_simple_image(res.get(), false, UINT32_MAX);
load_simple_image(res.get(), false, -1);
}
configuration.free_resources();

View File

@ -187,7 +187,7 @@ gl::vertex_upload_info GLGSRender::set_vertex_buffer()
//TODO: make vertex cache keep local data beyond frame boundaries and hook notify command
bool in_cache = false;
bool to_store = false;
u32 storage_address = UINT32_MAX;
u32 storage_address = -1;
if (m_vertex_layout.interleaved_blocks.size() == 1 &&
rsx::method_registers.current_draw_clause.command != rsx::draw_command::inlined_array)

View File

@ -20,7 +20,6 @@
#include <sys/types.h>
#include <pwd.h>
#include <libgen.h>
#include <limits.h>
#endif
#ifdef __APPLE__
@ -508,7 +507,7 @@ namespace rsx
auto renderer = get_font();
f32 text_extents_w = 0.f;
u16 clip_width = clip_text ? w : UINT16_MAX;
u16 clip_width = clip_text ? w : umax;
std::vector<vertex> result = renderer->render_text(string, clip_width, wrap_text);
if (!result.empty())
@ -1005,7 +1004,7 @@ namespace rsx
set_text(text);
}
bool auto_resize(bool grow_only = false, u16 limit_w = UINT16_MAX, u16 limit_h = UINT16_MAX)
bool auto_resize(bool grow_only = false, u16 limit_w = -1, u16 limit_h = -1)
{
u16 new_width, new_height;
u16 old_width = w, old_height = h;

View File

@ -181,7 +181,7 @@ namespace rsx
overlay_element caret;
auto renderer = get_font();
const auto caret_loc = renderer->get_char_offset(text.c_str(), caret_position, clip_text ? w : UINT16_MAX, wrap_text);
const auto caret_loc = renderer->get_char_offset(text.c_str(), caret_position, clip_text ? w : -1, wrap_text);
caret.set_pos(static_cast<u16>(caret_loc.first) + padding_left + x, static_cast<u16>(caret_loc.second) + padding_top + y);
caret.set_size(1, static_cast<u16>(renderer->get_size_px() + 2));

View File

@ -414,7 +414,7 @@ namespace rsx
std::vector<vertex> result;
f32 unused_x, unused_y;
render_text_ex(result, unused_x, unused_y, text, UINT32_MAX, max_width, wrap);
render_text_ex(result, unused_x, unused_y, text, -1, max_width, wrap);
return result;
}

View File

@ -74,9 +74,9 @@ namespace rsx
void render_text_ex(std::vector<vertex>& result, f32& x_advance, f32& y_advance, const char32_t* text, u32 char_limit, u16 max_width, bool wrap);
std::vector<vertex> render_text(const char32_t* text, u16 max_width = UINT16_MAX, bool wrap = false);
std::vector<vertex> render_text(const char32_t* text, u16 max_width = -1, bool wrap = false);
std::pair<f32, f32> get_char_offset(const char32_t* text, u16 max_length, u16 max_width = UINT16_MAX, bool wrap = false);
std::pair<f32, f32> get_char_offset(const char32_t* text, u16 max_length, u16 max_width = -1, bool wrap = false);
bool matches(const char* name, int size) const { return font_name == name && static_cast<int>(size_pt) == size; }
std::string_view get_name() const { return font_name; }

View File

@ -73,7 +73,7 @@ namespace rsx
compiled_resource m_cached_resource;
u32 flags = 0;
u32 char_limit = UINT32_MAX;
u32 char_limit = umax;
std::vector<osk_panel> m_panels;
usz m_panel_index = 0;

View File

@ -700,7 +700,7 @@ namespace rsx
m_datapoints.push_back(datapoint);
// Calculate new min/max
m_min = std::numeric_limits<float>::max();
m_min = max_v<f32>;
m_max = 0.0f;
// Make sure min/max reflects the data being displayed, not the entire datapoints vector

View File

@ -19,8 +19,8 @@ namespace rsx
// Non-interactable UI element
struct overlay
{
u32 uid = UINT32_MAX;
u32 type_index = UINT32_MAX;
u32 uid = umax;
u32 type_index = umax;
u16 virtual_width = 1280;
u16 virtual_height = 720;

View File

@ -11,7 +11,7 @@ struct temp_register
bool aliased_h1 = false;
bool last_write_half[4] = { false, false, false, false };
u32 real_index = UINT32_MAX;
u32 real_index = -1;
u32 h0_writes = 0u; // Number of writes to the first 64-bits of the register
u32 h1_writes = 0u; // Number of writes to the last 64-bits of the register
@ -54,7 +54,7 @@ struct temp_register
h1_writes++;
}
if (real_index == UINT32_MAX)
if (real_index == umax)
{
if (half_register)
real_index = index >> 1;

View File

@ -37,7 +37,7 @@ vertex_program_utils::vertex_program_metadata vertex_program_utils::analyse_vert
//u32 first_instruction_address = entry;
std::stack<u32> call_stack;
std::pair<u32, u32> instruction_range = { UINT32_MAX, 0 };
std::pair<u32, u32> instruction_range{umax, 0};
std::bitset<512> instructions_to_patch;
bool has_branch_instruction = false;
@ -330,7 +330,7 @@ usz fragment_program_utils::get_fragment_program_ucode_size(const void* ptr)
fragment_program_utils::fragment_program_metadata fragment_program_utils::analyse_fragment_program(const void* ptr)
{
fragment_program_utils::fragment_program_metadata result{};
result.program_start_offset = UINT32_MAX;
result.program_start_offset = -1;
const auto instBuffer = ptr;
s32 index = 0;

View File

@ -439,7 +439,7 @@ std::string VertexProgramDecompiler::Decompile()
last_label_addr = *m_prog.jump_table.rbegin();
}
auto find_jump_lvl = [this](u32 address)
auto find_jump_lvl = [this](u32 address) -> u32
{
u32 jump = 1;
@ -451,7 +451,7 @@ std::string VertexProgramDecompiler::Decompile()
++jump;
}
return UINT32_MAX;
return -1;
};
auto do_function_call = [this, &i](const std::string& condition)
@ -509,7 +509,7 @@ std::string VertexProgramDecompiler::Decompile()
if (m_prog.entry != m_prog.base_address)
{
jump_position = find_jump_lvl(m_prog.entry - m_prog.base_address);
ensure(jump_position != UINT32_MAX);
ensure(jump_position != umax);
}
AddCode(fmt::format("int jump_position = %u;", jump_position));
@ -547,7 +547,7 @@ std::string VertexProgramDecompiler::Decompile()
{
//TODO: Subroutines can also have arbitrary jumps!
u32 jump_position = find_jump_lvl(i);
if (is_has_BRA || jump_position != UINT32_MAX)
if (is_has_BRA || jump_position != umax)
{
m_cur_instr->close_scopes++;
AddCode("}");

View File

@ -558,7 +558,7 @@ namespace rsx
if (reg >= range.first && reg < range.first + range.second)
{
const u32 remaining = std::min<u32>(fifo_ctrl->get_remaining_args_count() + 1,
(fifo_ctrl->last_cmd() & RSX_METHOD_NON_INCREMENT_CMD_MASK) ? UINT32_MAX : (range.first + range.second) - reg);
(fifo_ctrl->last_cmd() & RSX_METHOD_NON_INCREMENT_CMD_MASK) ? -1 : (range.first + range.second) - reg);
commands.back().rsx_command.first = (fifo_ctrl->last_cmd() & RSX_METHOD_NON_INCREMENT_CMD_MASK) | (reg << 2) | (remaining << 18);

View File

@ -2129,7 +2129,7 @@ namespace rsx
const s32 default_frequency_mask = (1 << 8);
const s32 swap_storage_mask = (1 << 29);
const s32 volatile_storage_mask = (1 << 30);
const s32 modulo_op_frequency_mask = (INT32_MIN);
const s32 modulo_op_frequency_mask = smin;
const u32 modulo_mask = rsx::method_registers.frequency_divider_operation_mask();
const auto max_index = (first_vertex + vertex_count) - 1;
@ -3321,17 +3321,17 @@ namespace rsx
switch (type)
{
case CELL_GCM_ZPASS_PIXEL_CNT:
value = value ? UINT16_MAX : 0;
value = value ? u16{umax} : 0;
break;
case CELL_GCM_ZCULL_STATS3:
value = value ? 0 : UINT16_MAX;
value = value ? 0 : u16{umax};
break;
case CELL_GCM_ZCULL_STATS2:
case CELL_GCM_ZCULL_STATS1:
case CELL_GCM_ZCULL_STATS:
default:
//Not implemented
value = UINT32_MAX;
value = -1;
break;
}

View File

@ -486,7 +486,7 @@ namespace rsx
virtual void begin_occlusion_query(occlusion_query_info* /*query*/) {}
virtual void end_occlusion_query(occlusion_query_info* /*query*/) {}
virtual bool check_occlusion_query_status(occlusion_query_info* /*query*/) { return true; }
virtual void get_occlusion_query_result(occlusion_query_info* query) { query->result = UINT32_MAX; }
virtual void get_occlusion_query_result(occlusion_query_info* query) { query->result = -1; }
virtual void discard_occlusion_query(occlusion_query_info* /*query*/) {}
};

View File

@ -983,14 +983,14 @@ void VKGSRender::end()
if (m_current_command_buffer->flags & vk::command_buffer::cb_load_occluson_task)
{
u32 occlusion_id = m_occlusion_query_manager->allocate_query(*m_current_command_buffer);
if (occlusion_id == UINT32_MAX)
if (occlusion_id == umax)
{
// Force flush
rsx_log.error("[Performance Warning] Out of free occlusion slots. Forcing hard sync.");
ZCULL_control::sync(this);
occlusion_id = m_occlusion_query_manager->allocate_query(*m_current_command_buffer);
if (occlusion_id == UINT32_MAX)
if (occlusion_id == umax)
{
//rsx_log.error("Occlusion pool overflow");
if (m_current_task) m_current_task->result = 1;

View File

@ -109,7 +109,7 @@ void VKFragmentDecompilerThread::insertOutputs(std::stringstream & OS)
if (m_parr.HasParam(PF_PARAM_NONE, reg_type, table[i].second))
{
OS << "layout(location=" << std::to_string(output_index++) << ") " << "out vec4 " << table[i].first << ";\n";
vk_prog->output_color_masks[i] = UINT32_MAX;
vk_prog->output_color_masks[i] = -1;
}
}
}

View File

@ -203,7 +203,7 @@ namespace vk
std::vector<std::unique_ptr<vk::buffer_view>> buffer_views_to_clean;
u32 present_image = UINT32_MAX;
u32 present_image = -1;
command_buffer_chunk* swap_command_buffer = nullptr;
//Heap pointers

View File

@ -551,7 +551,7 @@ namespace vk
for (const auto &res : configuration.texture_raw_data)
{
upload_simple_texture(dev, cmd, upload_heap, storage_key++, res->w, res->h, 1, false, false, res->data, UINT32_MAX);
upload_simple_texture(dev, cmd, upload_heap, storage_key++, res->w, res->h, 1, false, false, res->data, -1);
}
configuration.free_resources();
@ -615,7 +615,7 @@ namespace vk
font->get_glyph_data(bytes);
return upload_simple_texture(cmd.get_command_pool().get_owner(), cmd, upload_heap, key, image_size.width, image_size.height, image_size.depth,
true, false, bytes.data(), UINT32_MAX);
true, false, bytes.data(), -1);
}
vk::image_view* ui_overlay_renderer::find_temp_image(rsx::overlays::image_info* desc, vk::command_buffer& cmd, vk::data_heap& upload_heap, u32 owner_uid)

View File

@ -27,7 +27,7 @@ void VKGSRender::reinitialize_swapchain()
for (auto &ctx : frame_context_storage)
{
if (ctx.present_image == UINT32_MAX)
if (ctx.present_image == umax)
continue;
// Release present image by presenting it
@ -77,7 +77,7 @@ void VKGSRender::reinitialize_swapchain()
void VKGSRender::present(vk::frame_context_t *ctx)
{
ensure(ctx->present_image != UINT32_MAX);
ensure(ctx->present_image != umax);
// Partial CS flush
ctx->swap_command_buffer->flush();
@ -100,7 +100,7 @@ void VKGSRender::present(vk::frame_context_t *ctx)
}
// Presentation image released; reset value
ctx->present_image = UINT32_MAX;
ctx->present_image = -1;
}
void VKGSRender::advance_queued_frames()
@ -498,7 +498,7 @@ void VKGSRender::flip(const rsx::display_flip_info_t& info)
}
// Prepare surface for new frame. Set no timeout here so that we wait for the next image if need be
ensure(m_current_frame->present_image == UINT32_MAX);
ensure(m_current_frame->present_image == umax);
ensure(m_current_frame->swap_command_buffer == nullptr);
u64 timeout = m_swapchain->get_swap_image_count() <= VK_MAX_ASYNC_FRAMES? 0ull: 100000000ull;
@ -541,7 +541,7 @@ void VKGSRender::flip(const rsx::display_flip_info_t& info)
}
// Confirm that the driver did not silently fail
ensure(m_current_frame->present_image != UINT32_MAX);
ensure(m_current_frame->present_image != umax);
// Calculate output dimensions. Done after swapchain acquisition in case it was recreated.
coordi aspect_ratio;

View File

@ -178,11 +178,11 @@ namespace
{
if (index_type == rsx::index_array_type::u16)
{
index_count = rsx::remove_restart_index(static_cast<u16*>(buf), reinterpret_cast<u16*>(tmp.data()), index_count, u16{UINT16_MAX});
index_count = rsx::remove_restart_index(static_cast<u16*>(buf), reinterpret_cast<u16*>(tmp.data()), index_count, u16{umax});
}
else
{
index_count = rsx::remove_restart_index(static_cast<u32*>(buf), reinterpret_cast<u32*>(tmp.data()), index_count, u32{UINT32_MAX});
index_count = rsx::remove_restart_index(static_cast<u32*>(buf), reinterpret_cast<u32*>(tmp.data()), index_count, u32{umax});
}
}
@ -238,8 +238,8 @@ vk::vertex_upload_info VKGSRender::upload_vertex_data()
//Do actual vertex upload
auto required = calculate_memory_requirements(m_vertex_layout, vertex_base, vertex_count);
u32 persistent_range_base = UINT32_MAX, volatile_range_base = UINT32_MAX;
usz persistent_offset = UINT64_MAX, volatile_offset = UINT64_MAX;
u32 persistent_range_base = -1, volatile_range_base = -1;
usz persistent_offset = -1, volatile_offset = -1;
if (required.first > 0)
{
@ -248,7 +248,7 @@ vk::vertex_upload_info VKGSRender::upload_vertex_data()
//TODO: make vertex cache keep local data beyond frame boundaries and hook notify command
bool in_cache = false;
bool to_store = false;
u32 storage_address = UINT32_MAX;
u32 storage_address = -1;
if (m_vertex_layout.interleaved_blocks.size() == 1 &&
rsx::method_registers.current_draw_clause.command != rsx::draw_command::inlined_array)
@ -302,7 +302,7 @@ vk::vertex_upload_info VKGSRender::upload_vertex_data()
}
else
{
if (required.first > 0 && persistent_offset != UINT64_MAX)
if (required.first > 0 && persistent_offset != umax)
{
void *persistent_mapping = m_attrib_ring_info.map(persistent_offset, required.first);
write_vertex_data_to_memory(m_vertex_layout, vertex_base, vertex_count, persistent_mapping, nullptr);
@ -335,7 +335,7 @@ vk::vertex_upload_info VKGSRender::upload_vertex_data()
vk::clear_status_interrupt(vk::heap_changed);
}
if (persistent_range_base != UINT32_MAX)
if (persistent_range_base != umax)
{
if (!m_persistent_attribute_storage || !m_persistent_attribute_storage->in_range(persistent_range_base, required.first, persistent_range_base))
{
@ -351,7 +351,7 @@ vk::vertex_upload_info VKGSRender::upload_vertex_data()
}
}
if (volatile_range_base != UINT32_MAX)
if (volatile_range_base != umax)
{
if (!m_volatile_attribute_storage || !m_volatile_attribute_storage->in_range(volatile_range_base, required.second, volatile_range_base))
{

View File

@ -426,7 +426,7 @@ namespace vk
vkGetDeviceQueue(dev, graphics_queue_idx, 0, &m_graphics_queue);
vkGetDeviceQueue(dev, transfer_queue_idx, transfer_queue_sub_index, &m_transfer_queue);
if (present_queue_idx != UINT32_MAX)
if (present_queue_idx != umax)
{
vkGetDeviceQueue(dev, present_queue_idx, 0, &m_present_queue);
}

View File

@ -321,9 +321,9 @@ namespace vk
vkGetPhysicalDeviceSurfaceSupportKHR(dev, index, m_surface, &supports_present[index]);
}
u32 graphics_queue_idx = UINT32_MAX;
u32 present_queue_idx = UINT32_MAX;
u32 transfer_queue_idx = UINT32_MAX;
u32 graphics_queue_idx = -1;
u32 present_queue_idx = -1;
u32 transfer_queue_idx = -1;
auto test_queue_family = [&](u32 index, u32 desired_flags)
{
@ -339,7 +339,7 @@ namespace vk
for (u32 i = 0; i < device_queues; ++i)
{
// 1. Test for a present queue possibly one that also supports present
if (present_queue_idx == UINT32_MAX && supports_present[i])
if (present_queue_idx == umax && supports_present[i])
{
present_queue_idx = i;
if (test_queue_family(i, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT))
@ -348,7 +348,7 @@ namespace vk
}
}
// 2. Check for graphics support
else if (graphics_queue_idx == UINT32_MAX && test_queue_family(i, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT))
else if (graphics_queue_idx == umax && test_queue_family(i, VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT))
{
graphics_queue_idx = i;
if (supports_present[i])
@ -357,13 +357,13 @@ namespace vk
}
}
// 3. Check if transfer + compute is available
else if (transfer_queue_idx == UINT32_MAX && test_queue_family(i, VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))
else if (transfer_queue_idx == umax && test_queue_family(i, VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))
{
transfer_queue_idx = i;
}
}
if (graphics_queue_idx == UINT32_MAX)
if (graphics_queue_idx == umax)
{
rsx_log.fatal("Failed to find a suitable graphics queue");
return nullptr;
@ -380,7 +380,7 @@ namespace vk
//Native(sw) swapchain
rsx_log.error("It is not possible for the currently selected GPU to present to the window (Likely caused by NVIDIA driver running the current display)");
rsx_log.warning("Falling back to software present support (native windowing API)");
auto swapchain = new swapchain_NATIVE(dev, UINT32_MAX, graphics_queue_idx, transfer_queue_idx);
auto swapchain = new swapchain_NATIVE(dev, -1, graphics_queue_idx, transfer_queue_idx);
swapchain->create(window_handle);
return swapchain;
}

View File

@ -621,7 +621,7 @@ namespace vk
return false;
}
if (surface_descriptors.currentExtent.width != UINT32_MAX)
if (surface_descriptors.currentExtent.width != umax)
{
if (surface_descriptors.currentExtent.width == 0 || surface_descriptors.currentExtent.height == 0)
{

View File

@ -367,7 +367,7 @@ namespace rsx
for (u8 index = 0; index < 32; ++index)
{
const auto address = data.vp_jump_table[index];
if (address == UINT16_MAX)
if (address == u16{umax})
{
// End of list marker
break;
@ -415,7 +415,7 @@ namespace rsx
else
{
// End of list marker
data_block.vp_jump_table[index] = UINT16_MAX;
data_block.vp_jump_table[index] = u16{umax};
break;
}
}

View File

@ -33,7 +33,6 @@ DYNAMIC_IMPORT("ntdll.dll", NtSetTimerResolution, NTSTATUS(ULONG DesiredResoluti
#include <spawn.h>
#include <sys/wait.h>
#include <stdlib.h>
#include <limits.h>
#endif
#ifdef __linux__

View File

@ -278,7 +278,7 @@ void AutoPauseConfigDialog::OnUpdateValue() const
{
bool ok;
const ullong value = m_id->text().toULongLong(&ok, 16);
const bool is_ok = ok && value <= UINT32_MAX;
const bool is_ok = ok && value <= u32{umax};
m_current_converted->setText(tr("Current value: %1 (%2)").arg(value, 8, 16).arg(is_ok ? tr("OK") : tr("Conversion failed")));
}

View File

@ -478,7 +478,7 @@ void debugger_frame::keyPressEvent(QKeyEvent* event)
// Next instruction according to code flow
// Known branch targets are selected over next PC for conditional branches
// Indirect branches (unknown targets, such as function return) do not proceed to any instruction
std::array<u32, 2> res{UINT32_MAX, UINT32_MAX};
std::array<u32, 2> res{umax, umax};
switch (cpu->id_type())
{
@ -499,7 +499,7 @@ void debugger_frame::keyPressEvent(QKeyEvent* event)
default: break;
}
if (const size_t pos = std::basic_string_view<u32>(res.data(), 2).find_last_not_of(UINT32_MAX); pos != umax)
if (const size_t pos = std::basic_string_view<u32>(res.data(), 2).find_last_not_of(umax); pos != umax)
m_debugger_list->ShowAddress(res[pos] - std::max(row, 0) * 4, true, true);
return;
@ -562,7 +562,7 @@ cpu_thread* debugger_frame::get_cpu()
std::function<cpu_thread*()> debugger_frame::make_check_cpu(cpu_thread* cpu)
{
const u32 id = cpu ? cpu->id : UINT32_MAX;
const u32 id = cpu ? cpu->id : umax;
const u32 type = id >> 24;
std::shared_ptr<cpu_thread> shared = type == 1 ? static_cast<std::shared_ptr<cpu_thread>>(idm::get<named_thread<ppu_thread>>(id)) :

View File

@ -84,7 +84,7 @@ instruction_editor_dialog::instruction_editor_dialog(QWidget *parent, u32 _pc, C
bool ok;
const ulong opcode = m_instr->text().toULong(&ok, 16);
if (!ok || opcode > UINT32_MAX)
if (!ok || opcode > u32{umax})
{
QMessageBox::critical(this, tr("Error"), tr("Failed to parse PPU instruction."));
return;

View File

@ -574,7 +574,7 @@ void kernel_explorer::Update()
const auto first_spu = spu.group->threads[0].get();
// Always show information of the first thread in group
// Or if information differs from that thread
// Or if information differs from that thread
if (&spu == first_spu || std::any_of(std::begin(spu.spup), std::end(spu.spup), [&](const auto& port)
{
// Flag to avoid reporting information if no SPU ports are connected
@ -597,7 +597,7 @@ void kernel_explorer::Update()
// Avoid duplication of information between threads which is common
add_leaf(spu_thread_tree, qstr(fmt::format("SPU Ports: As SPU 0x%07x", first_spu->lv2_id)));
}
for (const auto& [key, queue] : spu.spuq)
{
if (lv2_obj::check(queue))
@ -635,7 +635,7 @@ void kernel_explorer::Update()
if (!pspurs)
{
if (arg < UINT32_MAX && arg % 0x80 == 0 && vm::check_addr(arg, vm::page_readable, pspurs.size()))
if (arg < u32{umax} && arg % 0x80 == 0 && vm::check_addr(arg, vm::page_readable, pspurs.size()))
{
pspurs.set(static_cast<u32>(arg));
}

View File

@ -26,7 +26,7 @@ constexpr auto qstr = QString::fromStdString;
struct gui_listener : logs::listener
{
atomic_t<logs::level> enabled{logs::level{UCHAR_MAX}};
atomic_t<logs::level> enabled{logs::level{0xff}};
struct packet_t
{

View File

@ -254,7 +254,7 @@ void memory_string_searcher::OnSearch()
u64 addr_max = addr;
const u64 end_mem = std::min<u64>(utils::align<u64>(addr + 1, block_size) + 0x1000, UINT32_MAX);
const u64 end_mem = std::min<u64>(utils::align<u64>(addr + 1, block_size) + 0x1000, u32{umax});
// Determine allocation size quickly
while (addr_max < end_mem && vm::check_addr(static_cast<u32>(addr_max), vm::page_1m_size))
@ -294,7 +294,7 @@ void memory_string_searcher::OnSearch()
// Allow overlapping strings
first_char++;
}
}
}
else
{
while (first_char = section.find_first_of(wstr[0], first_char), first_char != umax)

View File

@ -505,141 +505,170 @@ struct get_int_impl<16>
using stype = s128;
};
enum class f16 : u16{};
using f32 = float;
using f64 = double;
template <typename T>
concept UnsignedInt = std::is_unsigned_v<std::common_type_t<T>> || std::is_same_v<std::common_type_t<T>, u128>;
template <typename T>
concept SignedInt = (std::is_signed_v<std::common_type_t<T>> && std::is_integral_v<std::common_type_t<T>>) || std::is_same_v<std::common_type_t<T>, s128>;
template <typename T>
concept FPInt = std::is_floating_point_v<std::common_type_t<T>> || std::is_same_v<std::common_type_t<T>, f16>;
template <typename T>
constexpr T min_v;
template <UnsignedInt T>
constexpr std::common_type_t<T> min_v<T> = 0;
template <SignedInt T>
constexpr std::common_type_t<T> min_v<T> = static_cast<std::common_type_t<T>>(-1) << (sizeof(std::common_type_t<T>) * 8 - 1);
template <>
constexpr inline f16 min_v<f16>{0xfbffu};
template <>
constexpr inline f32 min_v<f32> = std::bit_cast<f32, u32>(0xff'7fffffu);
template <>
constexpr inline f64 min_v<f64> = std::bit_cast<f64, u64>(0xffe'7ffff'ffffffffu);
template <FPInt T>
constexpr std::common_type_t<T> min_v<T> = min_v<std::common_type_t<T>>;
template <typename T>
constexpr T max_v;
template <UnsignedInt T>
constexpr std::common_type_t<T> max_v<T> = -1;
template <SignedInt T>
constexpr std::common_type_t<T> max_v<T> = static_cast<std::common_type_t<T>>(~min_v<T>);
template <>
constexpr inline f16 max_v<f16>{0x7bffu};
template <>
constexpr inline f32 max_v<f32> = std::bit_cast<f32, u32>(0x7f'7fffffu);
template <>
constexpr inline f64 max_v<f64> = std::bit_cast<f64, u64>(0x7fe'fffff'ffffffffu);
template <FPInt T>
constexpr std::common_type_t<T> max_v<T> = max_v<std::common_type_t<T>>;
// Return magic value for any unsigned type
constexpr struct umax_impl_t
{
template <typename T>
static constexpr T value = static_cast<T>(-1);
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_unsigned_v<CT>) || (std::is_same_v<CT, u128>)
template <UnsignedInt T>
constexpr bool operator==(const T& rhs) const
{
return rhs == value<CT>;
return rhs == max_v<T>;
}
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_unsigned_v<CT>) || (std::is_same_v<CT, u128>)
template <UnsignedInt T>
constexpr std::strong_ordering operator<=>(const T& rhs) const
{
return rhs == value<CT> ? std::strong_ordering::equal : std::strong_ordering::greater;
return rhs == max_v<T> ? std::strong_ordering::equal : std::strong_ordering::greater;
}
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_unsigned_v<CT>) || (std::is_same_v<CT, u128>)
template <UnsignedInt T>
constexpr operator T() const
{
return value<CT>;
return max_v<T>;
}
} umax;
constexpr struct smin_impl_t
{
template <typename T>
static constexpr T value = static_cast<T>(-1) << (sizeof(T) * 8 - 1);
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_signed_v<CT>) || (std::is_same_v<CT, s128>)
template <SignedInt T>
constexpr bool operator==(const T& rhs) const
{
return rhs == value<CT>;
return rhs == min_v<T>;
}
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_signed_v<CT>) || (std::is_same_v<CT, s128>)
template <SignedInt T>
constexpr std::strong_ordering operator<=>(const T& rhs) const
{
return rhs == value<CT> ? std::strong_ordering::equal : std::strong_ordering::less;
return rhs == min_v<T> ? std::strong_ordering::equal : std::strong_ordering::less;
}
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_signed_v<CT>) || (std::is_same_v<CT, s128>)
template <SignedInt T>
constexpr operator T() const
{
return value<CT>;
return min_v<T>;
}
} smin;
constexpr struct smax_impl_t
{
template <typename T>
static constexpr T value = static_cast<T>(~smin_impl_t::value<T>);
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<CT>) || (std::is_same_v<CT, s128>) || (std::is_same_v<CT, u128>)
template <SignedInt T>
constexpr bool operator==(const T& rhs) const
{
return rhs == value<CT>;
return rhs == max_v<T>;
}
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<CT>) || (std::is_same_v<CT, s128>) || (std::is_same_v<CT, u128>)
template <SignedInt T>
constexpr std::strong_ordering operator<=>(const T& rhs) const
{
if constexpr (std::is_signed_v<CT> || std::is_same_v<CT, s128>)
{
return rhs == value<CT> ? std::strong_ordering::equal : std::strong_ordering::greater;
}
else
{
return value<CT> <=> rhs;
}
return rhs == max_v<T> ? std::strong_ordering::equal : std::strong_ordering::greater;
}
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<CT>) || (std::is_same_v<CT, s128>) || (std::is_same_v<CT, u128>)
template <SignedInt T>
constexpr operator T() const
{
return value<CT>;
return max_v<T>;
}
} smax;
// Compare signed or unsigned type with its max value
constexpr struct amax_impl_t
{
template <typename T>
static constexpr T value = (std::is_unsigned_v<T> || std::is_same_v<T, u128>) ? umax_impl_t::value<T> : smax_impl_t::value<T>;
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<CT>) || (std::is_same_v<CT, s128>) || (std::is_same_v<CT, u128>)
template <typename T> requires SignedInt<T> || UnsignedInt<T>
constexpr bool operator ==(const T& rhs) const
{
return rhs == value<CT>;
return rhs == max_v<T>;
}
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<CT>) || (std::is_same_v<CT, s128>) || (std::is_same_v<CT, u128>)
template <typename T> requires SignedInt<T> || UnsignedInt<T>
constexpr std::strong_ordering operator <=>(const T& rhs) const
{
return value<CT> <=> rhs;
return max_v<T> <=> rhs;
}
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<CT>) || (std::is_same_v<CT, s128>) || (std::is_same_v<CT, u128>)
template <typename T> requires SignedInt<T> || UnsignedInt<T>
constexpr operator T() const
{
return value<CT>;
return max_v<T>;
}
} amax;
// Compare signed or unsigned type with its minimal value (like zero or INT_MIN)
constexpr struct amin_impl_t
{
template <typename T>
static constexpr T value = (std::is_signed_v<T> || std::is_same_v<T, s128>) ? smin_impl_t::value<T> : 0;
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<CT>) || (std::is_same_v<CT, s128>) || (std::is_same_v<CT, u128>)
template <typename T> requires SignedInt<T> || UnsignedInt<T>
constexpr bool operator ==(const T& rhs) const
{
return rhs == value<CT>;
return rhs == min_v<T>;
}
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<T>) || (std::is_same_v<T, s128>) || (std::is_same_v<T, u128>)
template <typename T> requires SignedInt<T> || UnsignedInt<T>
constexpr std::strong_ordering operator <=>(const T& rhs) const
{
return value<CT> <=> rhs;
return min_v<T> <=> rhs;
}
template <typename T, typename CT = std::common_type_t<T>> requires (std::is_integral_v<T>) || (std::is_same_v<T, s128>) || (std::is_same_v<T, u128>)
template <typename T> requires SignedInt<T> || UnsignedInt<T>
constexpr operator T() const
{
return value<CT>;
return min_v<T>;
}
} amin;
enum class f16 : u16{};
using f32 = float;
using f64 = double;
template <typename T, typename T2>
inline u32 offset32(T T2::*const mptr)
{