diff --git a/Utilities/Config.cpp b/Utilities/Config.cpp index cde0ba6365..a59cba9cec 100644 --- a/Utilities/Config.cpp +++ b/Utilities/Config.cpp @@ -1,4 +1,4 @@ -#include "stdafx.h" +#include "stdafx.h" #include "Config.h" #include "Utilities/types.h" diff --git a/Utilities/File.cpp b/Utilities/File.cpp index ca15bfac6b..4463881742 100644 --- a/Utilities/File.cpp +++ b/Utilities/File.cpp @@ -1,4 +1,4 @@ -#include "File.h" +#include "File.h" #include "mutex.h" #include "StrFmt.h" #include "BEType.h" @@ -41,10 +41,10 @@ static std::unique_ptr to_wchar(const std::string& source) std::memcpy(buffer.get() + 32768 + 4, L"UNC\\", 4 * sizeof(wchar_t)); } - verify("to_wchar" HERE), MultiByteToWideChar(CP_UTF8, 0, source.c_str(), size, buffer.get() + 32768 + (unc ? 8 : 4), size); + ensure(MultiByteToWideChar(CP_UTF8, 0, source.c_str(), size, buffer.get() + 32768 + (unc ? 8 : 4), size)); // "to_wchar" // Canonicalize wide path (replace '/', ".", "..", \\ repetitions, etc) - verify("to_wchar" HERE), GetFullPathNameW(buffer.get() + 32768, 32768, buffer.get(), nullptr) - 1 < 32768 - 1; + ensure(GetFullPathNameW(buffer.get() + 32768, 32768, buffer.get(), nullptr) - 1 < 32768 - 1); // "to_wchar" return buffer; } @@ -63,7 +63,7 @@ static void to_utf8(std::string& out, const wchar_t* source) const int result = WideCharToMultiByte(CP_UTF8, 0, source, static_cast(length) + 1, &out.front(), buf_size, NULL, NULL); // Fix the size - out.resize(verify("to_utf8" HERE, result) - 1); + out.resize(ensure(result) - 1); } static time_t to_time(const ULARGE_INTEGER& ft) @@ -315,7 +315,7 @@ std::shared_ptr fs::get_virtual_device(const std::string& path) std::shared_ptr fs::set_virtual_device(const std::string& name, const std::shared_ptr& device) { - verify(HERE), name.starts_with("//"), name[2] != '/'; + ensure(name.starts_with("//") && name[2] != '/'); return get_device_manager().set_device(name, device); } @@ -355,7 +355,7 @@ bool fs::stat(const std::string& path, stat_t& info) if (!GetFileAttributesExW(to_wchar(std::string(epath) + '/').get(), GetFileExInfoStandard, &attrs)) { g_tls_error = to_error(GetLastError()); - return false; + return false; } info.is_directory = true; // Handle drives as directories @@ -404,7 +404,7 @@ bool fs::stat(const std::string& path, stat_t& info) if (const DWORD err = GetLastError(); err != ERROR_NO_MORE_FILES) { g_tls_error = to_error(err); - return false; + return false; } g_tls_error = fs::error::noent; @@ -990,7 +990,7 @@ fs::file::file(const std::string& path, bs_t mode) stat_t stat() override { FILE_BASIC_INFO basic_info; - verify("file::stat" HERE), GetFileInformationByHandleEx(m_handle, FileBasicInfo, &basic_info, sizeof(FILE_BASIC_INFO)); + ensure(GetFileInformationByHandleEx(m_handle, FileBasicInfo, &basic_info, sizeof(FILE_BASIC_INFO))); // "file::stat" stat_t info; info.is_directory = (basic_info.FileAttributes & FILE_ATTRIBUTE_DIRECTORY) != 0; @@ -1008,7 +1008,7 @@ fs::file::file(const std::string& path, bs_t mode) void sync() override { - verify("file::sync" HERE), FlushFileBuffers(m_handle); + ensure(FlushFileBuffers(m_handle)); // "file::sync" } bool trunc(u64 length) override @@ -1031,7 +1031,7 @@ fs::file::file(const std::string& path, bs_t mode) const int size = narrow(count, "file::read" HERE); DWORD nread; - verify("file::read" HERE), ReadFile(m_handle, buffer, size, &nread, NULL); + ensure(ReadFile(m_handle, buffer, size, &nread, NULL)); // "file::read" return nread; } @@ -1042,7 +1042,7 @@ fs::file::file(const std::string& path, bs_t mode) const int size = narrow(count, "file::write" HERE); DWORD nwritten; - verify("file::write" HERE), WriteFile(m_handle, buffer, size, &nwritten, NULL); + ensure(WriteFile(m_handle, buffer, size, &nwritten, NULL)); // "file::write" return nwritten; } @@ -1070,7 +1070,7 @@ fs::file::file(const std::string& path, bs_t mode) u64 size() override { LARGE_INTEGER size; - verify("file::size" HERE), GetFileSizeEx(m_handle, &size); + ensure(GetFileSizeEx(m_handle, &size)); // "file::size" return size.QuadPart; } @@ -1119,7 +1119,7 @@ fs::file::file(const std::string& path, bs_t mode) if (mode & fs::trunc && mode & (fs::lock + fs::unread) && mode & fs::write) { // Postpone truncation in order to avoid using O_TRUNC on a locked file - verify(HERE), ::ftruncate(fd, 0) == 0; + ensure(::ftruncate(fd, 0) == 0); } class unix_file final : public file_base @@ -1140,7 +1140,7 @@ fs::file::file(const std::string& path, bs_t mode) stat_t stat() override { struct ::stat file_info; - verify("file::stat" HERE), ::fstat(m_fd, &file_info) == 0; + ensure(::fstat(m_fd, &file_info) == 0); // "file::stat" stat_t info; info.is_directory = S_ISDIR(file_info.st_mode); @@ -1158,7 +1158,7 @@ fs::file::file(const std::string& path, bs_t mode) void sync() override { - verify("file::sync" HERE), ::fsync(m_fd) == 0; + ensure(::fsync(m_fd) == 0); // "file::sync" } bool trunc(u64 length) override @@ -1175,7 +1175,7 @@ fs::file::file(const std::string& path, bs_t mode) u64 read(void* buffer, u64 count) override { const auto result = ::read(m_fd, buffer, count); - verify("file::read" HERE), result != -1; + ensure(result != -1); // "file::read" return result; } @@ -1183,7 +1183,7 @@ fs::file::file(const std::string& path, bs_t mode) u64 write(const void* buffer, u64 count) override { const auto result = ::write(m_fd, buffer, count); - verify("file::write" HERE), result != -1; + ensure(result != -1); // "file::write" return result; } @@ -1210,7 +1210,7 @@ fs::file::file(const std::string& path, bs_t mode) u64 size() override { struct ::stat file_info; - verify("file::size" HERE), ::fstat(m_fd, &file_info) == 0; + ensure(::fstat(m_fd, &file_info) == 0); // "file::size" return file_info.st_size; } @@ -1226,7 +1226,7 @@ fs::file::file(const std::string& path, bs_t mode) static_assert(offsetof(iovec, iov_len) == offsetof(iovec_clone, iov_len), "Weird iovec::iov_len offset"); const auto result = ::writev(m_fd, reinterpret_cast(buffers), buf_count); - verify("file::write_gather" HERE), result != -1; + ensure(result != -1); // "file::write_gather" return result; } @@ -1386,7 +1386,7 @@ bool fs::dir::open(const std::string& path) add_entry(found); } - verify("dir::read" HERE), ERROR_NO_MORE_FILES == GetLastError(); + ensure(ERROR_NO_MORE_FILES == GetLastError()); // "dir::read" FindClose(handle); } diff --git a/Utilities/JIT.cpp b/Utilities/JIT.cpp index 5ce10fa8b0..898e9dd9df 100644 --- a/Utilities/JIT.cpp +++ b/Utilities/JIT.cpp @@ -1,4 +1,4 @@ -#include "types.h" +#include "types.h" #include "JIT.h" #include "StrFmt.h" #include "File.h" diff --git a/Utilities/StrFmt.cpp b/Utilities/StrFmt.cpp index a3293326f0..4b851a1818 100644 --- a/Utilities/StrFmt.cpp +++ b/Utilities/StrFmt.cpp @@ -1,4 +1,4 @@ -#include "StrFmt.h" +#include "StrFmt.h" #include "BEType.h" #include "StrUtil.h" #include "cfmt.h" @@ -249,7 +249,7 @@ namespace fmt thread_ctrl::emergency_exit(msg); } - void raw_verify_error(const char* msg, const fmt_type_info* sup, u64 arg) + void raw_verify_error(const src_loc& loc) { std::string out{"Verification failed"}; @@ -257,26 +257,31 @@ namespace fmt #ifdef _WIN32 if (DWORD error = GetLastError()) { - fmt::append(out, " (e=%#x)", error); + fmt::append(out, " (e=%#x):", error); } #else if (int error = errno) { - fmt::append(out, " (e=%d)", error); + fmt::append(out, " (e=%d):", error); } #endif - if (sup) + if (loc.col != umax) { - out += " ("; - sup->fmt_string(out, arg); // Print value - out += ")"; + fmt::append(out, "\n(in file %s:%s[:%s]", loc.file, loc.line, loc.col); + } + else + { + fmt::append(out, "\n(in file %s:%s", loc.file, loc.line); } - if (msg) + if (loc.func && *loc.func) { - out += ": "; - out += msg; + fmt::append(out, ", in function %s)", loc.func); + } + else + { + out += ')'; } thread_ctrl::emergency_exit(out); diff --git a/Utilities/Thread.cpp b/Utilities/Thread.cpp index 432dd31cbe..53e57ba5c4 100644 --- a/Utilities/Thread.cpp +++ b/Utilities/Thread.cpp @@ -1,4 +1,4 @@ -#include "stdafx.h" +#include "stdafx.h" #include "Emu/System.h" #include "Emu/Cell/SPUThread.h" #include "Emu/Cell/PPUThread.h" @@ -1872,7 +1872,7 @@ void thread_base::start() // Receive "that" native thread handle, sent "this" thread_base const u64 _self = reinterpret_cast(atomic_storage::load(*tls)); m_thread.release(_self); - verify(HERE), _self != reinterpret_cast(this); + ensure(_self != reinterpret_cast(this)); atomic_storage::store(*tls, this); s_thread_pool[pos].notify_one(); return; @@ -1880,9 +1880,10 @@ void thread_base::start() #ifdef _WIN32 m_thread = ::_beginthreadex(nullptr, 0, entry_point, this, CREATE_SUSPENDED, nullptr); - verify("thread_ctrl::start" HERE), m_thread, ::ResumeThread(reinterpret_cast(+m_thread)) != -1; + ensure(m_thread); + ensure(::ResumeThread(reinterpret_cast(+m_thread)) != -1); #else - verify("thread_ctrl::start" HERE), pthread_create(reinterpret_cast(&m_thread.raw()), nullptr, entry_point, this) == 0; + ensure(pthread_create(reinterpret_cast(&m_thread.raw()), nullptr, entry_point, this) == 0); #endif } diff --git a/Utilities/cond.cpp b/Utilities/cond.cpp index d0901e2ff8..3b10d24e60 100644 --- a/Utilities/cond.cpp +++ b/Utilities/cond.cpp @@ -1,4 +1,4 @@ -#include "cond.h" +#include "cond.h" #include "sync.h" #include "lockless.h" @@ -9,7 +9,7 @@ void cond_variable::imp_wait(u32 _old, u64 _timeout) noexcept { // Not supposed to fail - verify(HERE), _old; + ensure(_old); // Wait with timeout m_value.wait(_old, c_signal_mask, atomic_wait_timeout{_timeout > max_timeout ? UINT64_MAX : _timeout * 1000}); diff --git a/Utilities/mutex.cpp b/Utilities/mutex.cpp index 2e5eb815db..6f097f964e 100644 --- a/Utilities/mutex.cpp +++ b/Utilities/mutex.cpp @@ -2,7 +2,7 @@ void shared_mutex::imp_lock_shared(u32 val) { - verify("shared_mutex underflow" HERE), val < c_err; + ensure(val < c_err); // "shared_mutex underflow" for (int i = 0; i < 10; i++) { @@ -23,14 +23,14 @@ void shared_mutex::imp_lock_shared(u32 val) return; } - verify("shared_mutex overflow" HERE), (old % c_sig) + c_one < c_sig; + ensure((old % c_sig) + c_one < c_sig); // "shared_mutex overflow" imp_wait(); lock_downgrade(); } void shared_mutex::imp_unlock_shared(u32 old) { - verify("shared_mutex underflow" HERE), old - 1 < c_err; + ensure(old - 1 < c_err); // "shared_mutex underflow" // Check reader count, notify the writer if necessary if ((old - 1) % c_one == 0) @@ -71,7 +71,7 @@ void shared_mutex::imp_signal() void shared_mutex::imp_lock(u32 val) { - verify("shared_mutex underflow" HERE), val < c_err; + ensure(val < c_err); // "shared_mutex underflow" for (int i = 0; i < 10; i++) { @@ -90,13 +90,13 @@ void shared_mutex::imp_lock(u32 val) return; } - verify("shared_mutex overflow" HERE), (old % c_sig) + c_one < c_sig; + ensure((old % c_sig) + c_one < c_sig); // "shared_mutex overflow" imp_wait(); } void shared_mutex::imp_unlock(u32 old) { - verify("shared_mutex underflow" HERE), old - c_one < c_err; + ensure(old - c_one < c_err); // "shared_mutex underflow" // 1) Notify the next writer if necessary // 2) Notify all readers otherwise if necessary (currently indistinguishable from writers) @@ -121,7 +121,7 @@ void shared_mutex::imp_lock_upgrade() // Convert to writer lock const u32 old = m_value.fetch_add(c_one - 1); - verify("shared_mutex overflow" HERE), (old % c_sig) + c_one - 1 < c_sig; + ensure((old % c_sig) + c_one - 1 < c_sig); // "shared_mutex overflow" if (old % c_one == 1) { diff --git a/Utilities/sema.cpp b/Utilities/sema.cpp index e4e3e7345f..355454141f 100644 --- a/Utilities/sema.cpp +++ b/Utilities/sema.cpp @@ -52,7 +52,7 @@ void semaphore_base::imp_wait() void semaphore_base::imp_post(s32 _old) { - verify("semaphore_base: overflow" HERE), _old < 0; + ensure(_old < 0); // "semaphore_base: overflow" m_value.notify_one(); } diff --git a/Utilities/sysinfo.cpp b/Utilities/sysinfo.cpp index 852f4d9e54..f96b0303a8 100755 --- a/Utilities/sysinfo.cpp +++ b/Utilities/sysinfo.cpp @@ -1,4 +1,4 @@ -#include "sysinfo.h" +#include "sysinfo.h" #include "StrFmt.h" #include "File.h" #include "Emu/system_config.h" diff --git a/Utilities/types.h b/Utilities/types.h index a496ea1f0e..af7c45c7d9 100644 --- a/Utilities/types.h +++ b/Utilities/types.h @@ -178,8 +178,8 @@ namespace std #endif using steady_clock = std::conditional< - std::chrono::high_resolution_clock::is_steady, - std::chrono::high_resolution_clock, std::chrono::steady_clock>::type; + std::chrono::high_resolution_clock::is_steady, + std::chrono::high_resolution_clock, std::chrono::steady_clock>::type; // Get integral type from type size template @@ -224,7 +224,7 @@ using get_sint_t = typename get_int_impl::stype; template std::remove_cvref_t as_rvalue(T&& obj) { - return std::forward(obj); + return std::forward(obj); } // Formatting helper, type-specific preprocessing for improving safety and functionality @@ -605,8 +605,8 @@ union alignas(2) f16 // See http://stackoverflow.com/a/26779139 // The conversion doesn't handle NaN/Inf u32 raw = ((_u16 & 0x8000) << 16) | // Sign (just moved) - (((_u16 & 0x7c00) + 0x1C000) << 13) | // Exponent ( exp - 15 + 127) - ((_u16 & 0x03FF) << 13); // Mantissa + (((_u16 & 0x7c00) + 0x1C000) << 13) | // Exponent ( exp - 15 + 127) + ((_u16 & 0x03FF) << 13); // Mantissa return std::bit_cast(raw); } @@ -760,75 +760,41 @@ constexpr u64 operator""_u64(const char* s, std::size_t /*length*/) } } +#if !defined(__INTELLISENSE__) && !__has_builtin(__builtin_COLUMN) && !defined(_MSC_VER) +constexpr unsigned __builtin_COLUMN() +{ + return -1; +} +#endif + +struct src_loc +{ + u32 line; + u32 col; + const char* file; + const char* func; +}; + namespace fmt { [[noreturn]] void raw_error(const char* msg); - [[noreturn]] void raw_verify_error(const char* msg, const fmt_type_info* sup, u64 arg); + [[noreturn]] void raw_verify_error(const src_loc& loc); [[noreturn]] void raw_narrow_error(const char* msg, const fmt_type_info* sup, u64 arg); } -struct verify_func +template +constexpr decltype(auto) ensure(T&& arg, + u32 line = __builtin_LINE(), + u32 col = __builtin_COLUMN(), + const char* file = __builtin_FILE(), + const char* func = __builtin_FUNCTION()) noexcept { - template - bool operator()(T&& value) const + if (std::forward(arg)) [[likely]] { - if (std::forward(value)) - { - return true; - } - - return false; - } -}; - -template -struct verify_impl -{ - const char* cause; - - template - auto operator,(T&& value) const - { - // Verification (can be safely disabled) - if (!verify_func()(std::forward(value))) - { - fmt::raw_verify_error(cause, nullptr, N); - } - - return verify_impl{cause}; - } -}; - -// Verification helper, checks several conditions delimited with comma operator -inline auto verify(const char* cause) -{ - return verify_impl<0>{cause}; -} - -// Verification helper (returns value or lvalue reference, may require to use verify_move instead) -template -inline T verify(const char* cause, T&& value, F&& pred = F()) -{ - if (!pred(std::forward(value))) - { - using unref = std::remove_const_t>; - fmt::raw_verify_error(cause, fmt::get_type_info>(), fmt_unveil::get(value)); + return std::forward(arg); } - return std::forward(value); -} - -// Verification helper (must be used in return expression or in place of std::move) -template -inline std::remove_reference_t&& verify_move(const char* cause, T&& value, F&& pred = F()) -{ - if (!pred(std::forward(value))) - { - using unref = std::remove_const_t>; - fmt::raw_verify_error(cause, fmt::get_type_info>(), fmt_unveil::get(value)); - } - - return std::move(value); + fmt::raw_verify_error({line, col, file, func}); } // narrow() function details diff --git a/rpcs3/Emu/Audio/AudioDumper.cpp b/rpcs3/Emu/Audio/AudioDumper.cpp index 3ffef9cfc0..909892593f 100644 --- a/rpcs3/Emu/Audio/AudioDumper.cpp +++ b/rpcs3/Emu/Audio/AudioDumper.cpp @@ -33,7 +33,8 @@ void AudioDumper::WriteData(const void* buffer, u32 size) { if (GetCh()) { - verify(HERE), size, m_output.write(buffer, size) == size; + ensure(size); + ensure(m_output.write(buffer, size) == size); m_header.Size += size; m_header.RIFF.Size += size; } diff --git a/rpcs3/Emu/CPU/CPUThread.cpp b/rpcs3/Emu/CPU/CPUThread.cpp index 2bfae8f20b..98670a5ac3 100644 --- a/rpcs3/Emu/CPU/CPUThread.cpp +++ b/rpcs3/Emu/CPU/CPUThread.cpp @@ -464,7 +464,7 @@ void cpu_thread::operator()() if (progress == umax && std::exchange(wait_set, false)) { // Operation finished: need to clean wait flag - verify(HERE), !_cpu->check_state(); + ensure(!_cpu->check_state()); return; } }); @@ -484,7 +484,7 @@ void cpu_thread::operator()() if (progress == umax && std::exchange(wait_set, false)) { - verify(HERE), !_cpu->check_state(); + ensure(!_cpu->check_state()); return; } }; @@ -693,7 +693,7 @@ bool cpu_thread::check_state() noexcept cpu_counter::add(this); } - verify(HERE), cpu_can_stop || !retval; + ensure(cpu_can_stop || !retval); return retval; } @@ -859,7 +859,7 @@ std::string cpu_thread::dump_misc() const bool cpu_thread::suspend_work::push(cpu_thread* _this) noexcept { // Can't allow pre-set wait bit (it'd be a problem) - verify(HERE), !_this || !(_this->state & cpu_flag::wait); + ensure(!_this || !(_this->state & cpu_flag::wait)); do { @@ -998,7 +998,7 @@ bool cpu_thread::suspend_work::push(cpu_thread* _this) noexcept } // Finalization (last increment) - verify(HERE), g_suspend_counter++ & 1; + ensure(g_suspend_counter++ & 1); cpu_counter::for_all_cpu(copy2, [&](cpu_thread* cpu) { diff --git a/rpcs3/Emu/CPU/CPUTranslator.cpp b/rpcs3/Emu/CPU/CPUTranslator.cpp index 78eec02488..8bc4c85cde 100644 --- a/rpcs3/Emu/CPU/CPUTranslator.cpp +++ b/rpcs3/Emu/CPU/CPUTranslator.cpp @@ -85,7 +85,7 @@ llvm::Value* cpu_translator::bitcast(llvm::Value* val, llvm::Type* type) if (const auto c1 = llvm::dyn_cast(val)) { - return verify(HERE, llvm::ConstantFoldCastOperand(llvm::Instruction::BitCast, c1, type, m_module->getDataLayout())); + return ensure(llvm::ConstantFoldCastOperand(llvm::Instruction::BitCast, c1, type, m_module->getDataLayout())); } return m_ir->CreateBitCast(val, type); @@ -203,8 +203,8 @@ llvm::Constant* cpu_translator::make_const_vector(v128 v, llvm::Type* t) return llvm::ConstantInt::get(t, llvm::APInt(128, llvm::makeArrayRef(reinterpret_cast(v._bytes), 2))); } - verify(HERE), t->isVectorTy(); - verify(HERE), 128 == t->getScalarSizeInBits() * llvm::cast(t)->getNumElements(); + ensure(t->isVectorTy()); + ensure(128 == t->getScalarSizeInBits() * llvm::cast(t)->getNumElements()); const auto sct = t->getScalarType(); diff --git a/rpcs3/Emu/Cell/Modules/cellDmux.cpp b/rpcs3/Emu/Cell/Modules/cellDmux.cpp index 106fb6254e..3a39cac69e 100644 --- a/rpcs3/Emu/Cell/Modules/cellDmux.cpp +++ b/rpcs3/Emu/Cell/Modules/cellDmux.cpp @@ -811,7 +811,7 @@ void ElementaryStream::push_au(u32 size, u64 dts, u64 pts, u64 userdata, bool ra u32 addr; { std::lock_guard lock(m_mutex); - verify(HERE), !is_full(size); + ensure(!is_full(size)); if (put + size + 128 > memAddr + memSize) { @@ -852,7 +852,7 @@ void ElementaryStream::push_au(u32 size, u64 dts, u64 pts, u64 userdata, bool ra put_count++; } - verify(HERE), entries.push(addr, &dmux->is_closed); + ensure(entries.push(addr, &dmux->is_closed)); } void ElementaryStream::push(DemuxerStream& stream, u32 size) diff --git a/rpcs3/Emu/Cell/Modules/cellGame.cpp b/rpcs3/Emu/Cell/Modules/cellGame.cpp index 1c548d5207..0d1f0b3293 100644 --- a/rpcs3/Emu/Cell/Modules/cellGame.cpp +++ b/rpcs3/Emu/Cell/Modules/cellGame.cpp @@ -177,7 +177,7 @@ error_code cellHddGameCheck(ppu_thread& ppu, u32 version, vm::cptr dirName std::string game_dir = dirName.get_ptr(); // TODO: Find error code - verify(HERE), game_dir.size() == 9; + ensure(game_dir.size() == 9); const std::string dir = "/dev_hdd0/game/" + game_dir; diff --git a/rpcs3/Emu/Cell/Modules/cellGcmSys.cpp b/rpcs3/Emu/Cell/Modules/cellGcmSys.cpp index e14b59190f..699d2e0c05 100644 --- a/rpcs3/Emu/Cell/Modules/cellGcmSys.cpp +++ b/rpcs3/Emu/Cell/Modules/cellGcmSys.cpp @@ -383,7 +383,7 @@ error_code _cellGcmInitBody(ppu_thread& ppu, vm::pptr contex // Create contexts const auto area = vm::reserve_map(vm::rsx_context, 0, 0x10000000, 0x403); const u32 rsx_ctxaddr = area ? area->alloc(0x400000) : 0; - verify(HERE), rsx_ctxaddr != 0; + ensure(rsx_ctxaddr); g_defaultCommandBufferBegin = ioAddress; g_defaultCommandBufferFragmentCount = cmdSize / (32 * 1024); @@ -990,7 +990,7 @@ error_code cellGcmMapEaIoAddressWithFlags(ppu_thread& ppu, u32 ea, u32 io, u32 s { cellGcmSys.warning("cellGcmMapEaIoAddressWithFlags(ea=0x%x, io=0x%x, size=0x%x, flags=0x%x)", ea, io, size, flags); - verify(HERE), flags == 2 /*CELL_GCM_IOMAP_FLAG_STRICT_ORDERING*/; + ensure(flags == 2 /*CELL_GCM_IOMAP_FLAG_STRICT_ORDERING*/); const auto cfg = g_fxo->get(); std::lock_guard lock(cfg->gcmio_mutex); @@ -1374,7 +1374,7 @@ static std::pair getNextCommandBufferBeginEnd(u32 current) static u32 getOffsetFromAddress(u32 address) { const u32 upper = g_fxo->get()->offsetTable.ioAddress[address >> 20]; // 12 bits - verify(HERE), (upper != 0xFFFF); + ensure(upper != 0xFFFF); return (upper << 20) | (address & 0xFFFFF); } diff --git a/rpcs3/Emu/Cell/Modules/cellMic.cpp b/rpcs3/Emu/Cell/Modules/cellMic.cpp index 3f1c6c5f69..bd449464a2 100644 --- a/rpcs3/Emu/Cell/Modules/cellMic.cpp +++ b/rpcs3/Emu/Cell/Modules/cellMic.cpp @@ -363,7 +363,7 @@ bool microphone_device::has_data() const u32 microphone_device::capture_audio() { - verify(HERE), sample_size > 0; + ensure(sample_size > 0); u32 num_samples = inbuf_size / sample_size; @@ -412,7 +412,7 @@ void microphone_device::get_raw(const u32 num_samples) } break; case microphone_handler::singstar: - verify(HERE), sample_size == 4; + ensure(sample_size == 4); // Mixing the 2 mics as if channels if (input_devices.size() == 2) @@ -466,7 +466,7 @@ void microphone_device::get_dsp(const u32 num_samples) } break; case microphone_handler::singstar: - verify(HERE), sample_size == 4; + ensure(sample_size == 4); // Mixing the 2 mics as if channels if (input_devices.size() == 2) diff --git a/rpcs3/Emu/Cell/Modules/cellPamf.cpp b/rpcs3/Emu/Cell/Modules/cellPamf.cpp index 38c6fa264b..33e2f496f6 100644 --- a/rpcs3/Emu/Cell/Modules/cellPamf.cpp +++ b/rpcs3/Emu/Cell/Modules/cellPamf.cpp @@ -37,7 +37,7 @@ void fmt_class_string::format(std::string& out, u64 arg) error_code pamfStreamTypeToEsFilterId(u8 type, u8 ch, CellCodecEsFilterId& pEsFilterId) { // convert type and ch to EsFilterId - verify(HERE), (ch < 16); + ensure(ch < 16); pEsFilterId.supplementalInfo1 = type == CELL_PAMF_STREAM_TYPE_AVC; pEsFilterId.supplementalInfo2 = 0; @@ -137,7 +137,7 @@ error_code pamfStreamTypeToEsFilterId(u8 type, u8 ch, CellCodecEsFilterId& pEsFi u8 pamfGetStreamType(vm::ptr pSelf, u32 stream) { // TODO: get stream type correctly - verify(HERE), (stream < pSelf->pAddr->stream_count); + ensure(stream < pSelf->pAddr->stream_count); auto& header = pSelf->pAddr->stream_headers[stream]; switch (header.type) @@ -158,7 +158,7 @@ u8 pamfGetStreamType(vm::ptr pSelf, u32 stream) u8 pamfGetStreamChannel(vm::ptr pSelf, u32 stream) { // TODO: get stream channel correctly - verify(HERE), (stream < pSelf->pAddr->stream_count); + ensure(stream < pSelf->pAddr->stream_count); auto& header = pSelf->pAddr->stream_headers[stream]; switch (header.type) @@ -166,29 +166,34 @@ u8 pamfGetStreamChannel(vm::ptr pSelf, u32 stream) case 0x1b: // AVC case 0x02: // M2V { - verify(HERE), (header.fid_major & 0xf0) == 0xe0, header.fid_minor == 0; + ensure((header.fid_major & 0xf0) == 0xe0); + ensure(!header.fid_minor); return header.fid_major % 16; } case 0xdc: // ATRAC3PLUS { - verify(HERE), header.fid_major == 0xbd, (header.fid_minor & 0xf0) == 0; + ensure((header.fid_major == 0xbd)); + ensure((header.fid_minor & 0xf0) == 0); return header.fid_minor % 16; } case 0x80: // LPCM { - verify(HERE), header.fid_major == 0xbd, (header.fid_minor & 0xf0) == 0x40; + ensure((header.fid_major == 0xbd)); + ensure((header.fid_minor & 0xf0) == 0x40); return header.fid_minor % 16; } case 0x81: // AC3 { - verify(HERE), header.fid_major == 0xbd, (header.fid_minor & 0xf0) == 0x30; + ensure((header.fid_major == 0xbd)); + ensure((header.fid_minor & 0xf0) == 0x30); return header.fid_minor % 16; } case 0xdd: { - verify(HERE), header.fid_major == 0xbd, (header.fid_minor & 0xf0) == 0x20; + ensure((header.fid_major == 0xbd)); + ensure((header.fid_minor & 0xf0) == 0x20); return header.fid_minor % 16; } } @@ -473,7 +478,7 @@ error_code cellPamfReaderGetEsFilterId(vm::ptr pSelf, vm::ptr(pSelf->stream) < pSelf->pAddr->stream_count; + ensure(static_cast(pSelf->stream) < pSelf->pAddr->stream_count); auto& header = pSelf->pAddr->stream_headers[pSelf->stream]; pEsFilterId->filterIdMajor = header.fid_major; pEsFilterId->filterIdMinor = header.fid_minor; @@ -486,7 +491,7 @@ error_code cellPamfReaderGetStreamInfo(vm::ptr pSelf, vm::ptr(pSelf->stream) < pSelf->pAddr->stream_count; + ensure(static_cast(pSelf->stream) < pSelf->pAddr->stream_count); auto& header = pSelf->pAddr->stream_headers[pSelf->stream]; const u8 type = pamfGetStreamType(pSelf, pSelf->stream); const u8 ch = pamfGetStreamChannel(pSelf, pSelf->stream); diff --git a/rpcs3/Emu/Cell/Modules/cellPamf.h b/rpcs3/Emu/Cell/Modules/cellPamf.h index 3e7a10d86a..ffe873da66 100644 --- a/rpcs3/Emu/Cell/Modules/cellPamf.h +++ b/rpcs3/Emu/Cell/Modules/cellPamf.h @@ -462,7 +462,8 @@ public: while (u32 res = m_sync.atomic_op([&pos](squeue_sync_var_t& sync) -> u32 { - verify(HERE), sync.count <= sq_size, sync.position < sq_size; + ensure(sync.count <= sq_size); + ensure(sync.position < sq_size); if (sync.push_lock) { @@ -491,7 +492,9 @@ public: m_sync.atomic_op([](squeue_sync_var_t& sync) { - verify(HERE), sync.count <= sq_size, sync.position < sq_size, !!sync.push_lock; + ensure(sync.count <= sq_size); + ensure(sync.position < sq_size); + ensure(!!sync.push_lock); sync.push_lock = 0; sync.count++; }); @@ -522,7 +525,8 @@ public: while (u32 res = m_sync.atomic_op([&pos](squeue_sync_var_t& sync) -> u32 { - verify(HERE), sync.count <= sq_size, sync.position < sq_size; + ensure(sync.count <= sq_size); + ensure(sync.position < sq_size); if (!sync.count) { @@ -551,7 +555,9 @@ public: m_sync.atomic_op([](squeue_sync_var_t& sync) { - verify(HERE), sync.count <= sq_size, sync.position < sq_size, !!sync.pop_lock; + ensure(sync.count <= sq_size); + ensure(sync.position < sq_size); + ensure(!!sync.pop_lock); sync.pop_lock = 0; sync.position++; sync.count--; @@ -583,12 +589,13 @@ public: bool peek(T& data, u32 start_pos, const std::function& test_exit) { - verify(HERE), start_pos < sq_size; + ensure(start_pos < sq_size); u32 pos = 0; while (u32 res = m_sync.atomic_op([&pos, start_pos](squeue_sync_var_t& sync) -> u32 { - verify(HERE), sync.count <= sq_size, sync.position < sq_size; + ensure(sync.count <= sq_size); + ensure(sync.position < sq_size); if (sync.count <= start_pos) { @@ -617,7 +624,9 @@ public: m_sync.atomic_op([](squeue_sync_var_t& sync) { - verify(HERE), sync.count <= sq_size, sync.position < sq_size, !!sync.pop_lock; + ensure(sync.count <= sq_size); + ensure(sync.position < sq_size); + ensure(!!sync.pop_lock); sync.pop_lock = 0; }); @@ -656,7 +665,7 @@ public: public: T& operator [] (u32 index) { - verify(HERE), index < m_count; + ensure(index < m_count); index += m_pos; index = index < sq_size ? index : index - sq_size; return m_data[index]; @@ -669,7 +678,8 @@ public: while (m_sync.atomic_op([&pos, &count](squeue_sync_var_t& sync) -> u32 { - verify(HERE), sync.count <= sq_size, sync.position < sq_size; + ensure(sync.count <= sq_size); + ensure(sync.position < sq_size); if (sync.pop_lock || sync.push_lock) { @@ -691,7 +701,10 @@ public: m_sync.atomic_op([](squeue_sync_var_t& sync) { - verify(HERE), sync.count <= sq_size, sync.position < sq_size, !!sync.pop_lock, !!sync.push_lock; + ensure(sync.count <= sq_size); + ensure(sync.position < sq_size); + ensure(!!sync.pop_lock); + ensure(!!sync.push_lock); sync.pop_lock = 0; sync.push_lock = 0; }); @@ -704,7 +717,8 @@ public: { while (m_sync.atomic_op([](squeue_sync_var_t& sync) -> u32 { - verify(HERE), sync.count <= sq_size, sync.position < sq_size; + ensure(sync.count <= sq_size); + ensure(sync.position < sq_size); if (sync.pop_lock || sync.push_lock) { diff --git a/rpcs3/Emu/Cell/Modules/cellSail.cpp b/rpcs3/Emu/Cell/Modules/cellSail.cpp index 5f157ef9a7..cb5a943c33 100644 --- a/rpcs3/Emu/Cell/Modules/cellSail.cpp +++ b/rpcs3/Emu/Cell/Modules/cellSail.cpp @@ -836,7 +836,7 @@ error_code cellSailPlayerCreateDescriptor(vm::ptr pSelf, s32 str u32 buffer = vm::alloc(size, vm::main); auto bufPtr = vm::cptr::make(buffer); PamfHeader *buf = const_cast(bufPtr.get_ptr()); - verify(HERE), f.read(buf, size) == size; + ensure(f.read(buf, size) == size); u32 sp_ = vm::alloc(sizeof(CellPamfReader), vm::main); auto sp = vm::ptr::make(sp_); u32 reader = cellPamfReaderInitialize(sp, bufPtr, size, 0); diff --git a/rpcs3/Emu/Cell/Modules/cellSpurs.cpp b/rpcs3/Emu/Cell/Modules/cellSpurs.cpp index 31d5a82172..d2cd86e87b 100644 --- a/rpcs3/Emu/Cell/Modules/cellSpurs.cpp +++ b/rpcs3/Emu/Cell/Modules/cellSpurs.cpp @@ -714,7 +714,7 @@ void _spurs::handler_entry(ppu_thread& ppu, vm::ptr spurs) if ((spurs->flags1 & SF1_EXIT_IF_NO_WORK) == 0) { - verify(HERE), (spurs->handlerExiting == 1); + ensure((spurs->handlerExiting == 1)); return sys_ppu_thread_exit(ppu, 0); } @@ -790,16 +790,16 @@ s32 _spurs::wakeup_shutdown_completion_waiter(ppu_thread& ppu, vm::ptrhook(ppu, spurs, wid, wklF->hookArg); - verify(HERE), (wklEvent->load() & 0x01); - verify(HERE), (wklEvent->load() & 0x02); - verify(HERE), (wklEvent->load() & 0x20) == 0; + ensure((wklEvent->load() & 0x01)); + ensure((wklEvent->load() & 0x02)); + ensure((wklEvent->load() & 0x20) == 0); wklEvent->fetch_or(0x20); } s32 rc = CELL_OK; if (!wklF->hook || wklEvent->load() & 0x10) { - verify(HERE), (wklF->x28 == 2u); + ensure((wklF->x28 == 2u)); rc = sys_semaphore_post(ppu, static_cast(wklF->sem), 1); } @@ -2335,8 +2335,8 @@ s32 _spurs::add_workload(ppu_thread& ppu, vm::ptr spurs, vm::ptr u32 index = wnum & 0xf; if (wnum <= 15) { - verify(HERE), (spurs->wklCurrentContention[wnum] & 0xf) == 0; - verify(HERE), (spurs->wklPendingContention[wnum] & 0xf) == 0; + ensure((spurs->wklCurrentContention[wnum] & 0xf) == 0); + ensure((spurs->wklPendingContention[wnum] & 0xf) == 0); spurs->wklState1[wnum] = SPURS_WKL_STATE_PREPARING; spurs->wklStatus1[wnum] = 0; spurs->wklEvent1[wnum] = 0; @@ -2371,8 +2371,8 @@ s32 _spurs::add_workload(ppu_thread& ppu, vm::ptr spurs, vm::ptr } else { - verify(HERE), (spurs->wklCurrentContention[index] & 0xf0) == 0; - verify(HERE), (spurs->wklPendingContention[index] & 0xf0) == 0; + ensure((spurs->wklCurrentContention[index] & 0xf0) == 0); + ensure((spurs->wklPendingContention[index] & 0xf0) == 0); spurs->wklState2[index] = SPURS_WKL_STATE_PREPARING; spurs->wklStatus2[index] = 0; spurs->wklEvent2[index] = 0; @@ -2447,7 +2447,7 @@ s32 _spurs::add_workload(ppu_thread& ppu, vm::ptr spurs, vm::ptr (wnum < CELL_SPURS_MAX_WORKLOAD ? op.wklState1[wnum] : op.wklState2[wnum % 16]) = SPURS_WKL_STATE_RUNNABLE; }); - verify(HERE), (res_wkl <= 31); + ensure((res_wkl <= 31)); vm::light_op(spurs->sysSrvMsgUpdateWorkload, [](atomic_t& v){ v.release(0xff); }); vm::light_op(spurs->sysSrvMessage, [](atomic_t& v){ v.release(0xff); }); return CELL_OK; @@ -2612,7 +2612,7 @@ s32 cellSpursWaitForWorkloadShutdown(ppu_thread& ppu, vm::ptr spurs, if (wait_sema) { - verify(HERE), sys_semaphore_wait(ppu, static_cast(info.sem), 0) == 0; + ensure(sys_semaphore_wait(ppu, static_cast(info.sem), 0) == 0); } // Reverified @@ -2657,7 +2657,7 @@ s32 cellSpursRemoveWorkload(ppu_thread& ppu, vm::ptr spurs, u32 wid) if (spurs->wklFlagReceiver == wid) { - verify(HERE), ppu_execute<&_cellSpursWorkloadFlagReceiver>(ppu, spurs, wid, 0) == 0; + ensure(ppu_execute<&_cellSpursWorkloadFlagReceiver>(ppu, spurs, wid, 0) == 0); } s32 rc; diff --git a/rpcs3/Emu/Cell/Modules/cellSync.cpp b/rpcs3/Emu/Cell/Modules/cellSync.cpp index 3f987770c0..8e53cf0b67 100644 --- a/rpcs3/Emu/Cell/Modules/cellSync.cpp +++ b/rpcs3/Emu/Cell/Modules/cellSync.cpp @@ -959,7 +959,7 @@ error_code _cellSyncLFQueueGetPushPointer(ppu_thread& ppu, vm::ptrm_eq_id, vm::null, 0) == CELL_OK; + ensure(sys_event_queue_receive(ppu, queue->m_eq_id, vm::null, 0) == CELL_OK); var1 = 1; } } @@ -1051,7 +1051,7 @@ error_code _cellSyncLFQueueCompletePushPointer(ppu_thread& ppu, vm::ptr 1 && static_cast(var8) > 1) { - verify(HERE), (16 - var2 <= 1); + ensure((16 - var2 <= 1)); } s32 var11 = (pack >> 10) & 0x1f; @@ -1083,11 +1083,11 @@ error_code _cellSyncLFQueueCompletePushPointer(ppu_thread& ppu, vm::ptrpush2.compare_and_swap_test(old, push2)) { - verify(HERE), (var2 + var4 < 16); + ensure((var2 + var4 < 16)); if (var6 != umax) { - verify(HERE), (queue->push3.compare_and_swap_test(old2, push3)); - verify(HERE), (fpSendSignal); + ensure((queue->push3.compare_and_swap_test(old2, push3))); + ensure((fpSendSignal)); return not_an_error(fpSendSignal(ppu, vm::cast(queue->m_eaSignal.addr(), HERE), var6)); } else @@ -1258,7 +1258,7 @@ error_code _cellSyncLFQueueGetPopPointer(ppu_thread& ppu, vm::ptrm_eq_id, vm::null, 0) == CELL_OK); + ensure((sys_event_queue_receive(ppu, queue->m_eq_id, vm::null, 0) == CELL_OK)); var1 = 1; } } @@ -1356,7 +1356,7 @@ error_code _cellSyncLFQueueCompletePopPointer(ppu_thread& ppu, vm::ptr 1 && static_cast(var8) > 1) { - verify(HERE), (16 - var2 <= 1); + ensure((16 - var2 <= 1)); } s32 var11 = (pack >> 10) & 0x1f; @@ -1386,8 +1386,8 @@ error_code _cellSyncLFQueueCompletePopPointer(ppu_thread& ppu, vm::ptrpop3.compare_and_swap_test(old2, pop3)); - verify(HERE), (fpSendSignal); + ensure((queue->pop3.compare_and_swap_test(old2, pop3))); + ensure((fpSendSignal)); return not_an_error(fpSendSignal(ppu, vm::cast(queue->m_eaSignal.addr(), HERE), var6)); } else diff --git a/rpcs3/Emu/Cell/Modules/cellVdec.cpp b/rpcs3/Emu/Cell/Modules/cellVdec.cpp index b76bf4931d..1a441990ed 100644 --- a/rpcs3/Emu/Cell/Modules/cellVdec.cpp +++ b/rpcs3/Emu/Cell/Modules/cellVdec.cpp @@ -576,7 +576,7 @@ static error_code vdecQueryAttr(s32 type, u32 profile, u32 spec_addr /* may be 0 attr->decoderVerLower = decoderVerLower; attr->decoderVerUpper = 0x4840010; - attr->memSize = !spec_addr ? verify(HERE, memSize) : 4 * 1024 * 1024; + attr->memSize = !spec_addr ? ensure(memSize) : 4 * 1024 * 1024; attr->cmdDepth = 4; return CELL_OK; } @@ -973,7 +973,7 @@ error_code cellVdecGetPicItem(u32 handle, vm::pptr picItem) info->codecType = vdec->type; info->startAddr = 0x00000123; // invalid value (no address for picture) const int buffer_size = av_image_get_buffer_size(vdec->ctx->pix_fmt, vdec->ctx->width, vdec->ctx->height, 1); - verify(HERE), (buffer_size >= 0); + ensure(buffer_size >= 0); info->size = align(buffer_size, 128); info->auNum = 1; info->auPts[0].lower = static_cast(pts); diff --git a/rpcs3/Emu/Cell/Modules/cellVideoOut.cpp b/rpcs3/Emu/Cell/Modules/cellVideoOut.cpp index 73ee91fe8e..8b9dfa0b87 100644 --- a/rpcs3/Emu/Cell/Modules/cellVideoOut.cpp +++ b/rpcs3/Emu/Cell/Modules/cellVideoOut.cpp @@ -238,7 +238,7 @@ error_code cellVideoOutGetConfiguration(u32 videoOut, vm::ptraspect = g_video_out_aspect_id.at(g_cfg.video.aspect_ratio); CellVideoOutResolution res; - verify("Invalid video configuration" HERE), _IntGetResolutionInfo(config->resolutionId, &res) == CELL_OK; + ensure(_IntGetResolutionInfo(config->resolutionId, &res) == CELL_OK); // "Invalid video configuration" config->pitch = 4 * res.width; } diff --git a/rpcs3/Emu/Cell/Modules/sceNp.cpp b/rpcs3/Emu/Cell/Modules/sceNp.cpp index 419ce49892..f0817e050a 100644 --- a/rpcs3/Emu/Cell/Modules/sceNp.cpp +++ b/rpcs3/Emu/Cell/Modules/sceNp.cpp @@ -1,4 +1,4 @@ -#include "stdafx.h" +#include "stdafx.h" #include "Emu/System.h" #include "Emu/VFS.h" #include "Emu/Cell/PPUModule.h" diff --git a/rpcs3/Emu/Cell/Modules/sysPrxForUser.h b/rpcs3/Emu/Cell/Modules/sysPrxForUser.h index 663d45e152..1bb9b27077 100644 --- a/rpcs3/Emu/Cell/Modules/sysPrxForUser.h +++ b/rpcs3/Emu/Cell/Modules/sysPrxForUser.h @@ -34,12 +34,12 @@ struct sys_lwmutex_locker : ppu(ppu) , mutex(mutex) { - verify(HERE), sys_lwmutex_lock(ppu, mutex, 0) == CELL_OK; + ensure(sys_lwmutex_lock(ppu, mutex, 0) == CELL_OK); } ~sys_lwmutex_locker() noexcept(false) { - verify(HERE), sys_lwmutex_unlock(ppu, mutex) == CELL_OK; + ensure(sys_lwmutex_unlock(ppu, mutex) == CELL_OK); } }; diff --git a/rpcs3/Emu/Cell/Modules/sys_io_.cpp b/rpcs3/Emu/Cell/Modules/sys_io_.cpp index a033f9acaf..d8f37c1b75 100644 --- a/rpcs3/Emu/Cell/Modules/sys_io_.cpp +++ b/rpcs3/Emu/Cell/Modules/sys_io_.cpp @@ -34,7 +34,7 @@ extern void libio_sys_config_init() if (cfg->init_ctr++ == 0) { // Belongs to "_cfg_evt_hndlr" thread (8k stack) - cfg->stack_addr = verify(HERE, vm::alloc(0x2000, vm::stack, 4096)); + cfg->stack_addr = (ensure(vm::alloc(0x2000, vm::stack, 4096))); } } @@ -46,7 +46,7 @@ extern void libio_sys_config_end() if (cfg->init_ctr-- == 1) { - verify(HERE), vm::dealloc(std::exchange(cfg->stack_addr, 0), vm::stack); + ensure(vm::dealloc(std::exchange(cfg->stack_addr, 0), vm::stack)); } } diff --git a/rpcs3/Emu/Cell/Modules/sys_libc_.cpp b/rpcs3/Emu/Cell/Modules/sys_libc_.cpp index 2493cf200f..6111ea55ff 100644 --- a/rpcs3/Emu/Cell/Modules/sys_libc_.cpp +++ b/rpcs3/Emu/Cell/Modules/sys_libc_.cpp @@ -92,7 +92,7 @@ s16 __sys_look_ctype_table(s32 ch) { sysPrxForUser.trace("__sys_look_ctype_table(ch=%d)", ch); - verify("__sys_look_ctype_table" HERE), ch >= -1 && ch <= 127; + ensure(ch >= -1 && ch <= 127); // "__sys_look_ctype_table" return s_ctype_table[ch + 1]; } @@ -101,7 +101,7 @@ s32 _sys_tolower(s32 ch) { sysPrxForUser.trace("_sys_tolower(ch=%d)", ch); - verify("_sys_tolower" HERE), ch >= -1 && ch <= 127; + ensure(ch >= -1 && ch <= 127); // "_sys_tolower" return s_ctype_table[ch + 1] & 1 ? ch + 0x20 : ch; } @@ -110,7 +110,7 @@ s32 _sys_toupper(s32 ch) { sysPrxForUser.trace("_sys_toupper(ch=%d)", ch); - verify("_sys_toupper" HERE), ch >= -1 && ch <= 127; + ensure(ch >= -1 && ch <= 127); // "_sys_toupper" return s_ctype_table[ch + 1] & 2 ? ch - 0x20 : ch; } diff --git a/rpcs3/Emu/Cell/Modules/sys_ppu_thread_.cpp b/rpcs3/Emu/Cell/Modules/sys_ppu_thread_.cpp index 8f3dd5c4a4..06e82f803e 100644 --- a/rpcs3/Emu/Cell/Modules/sys_ppu_thread_.cpp +++ b/rpcs3/Emu/Cell/Modules/sys_ppu_thread_.cpp @@ -172,7 +172,7 @@ void sys_ppu_thread_exit(ppu_thread& ppu, u64 val) sysPrxForUser.trace("sys_ppu_thread_exit(val=0x%llx)", val); // Call registered atexit functions - verify(HERE), !sys_lwmutex_lock(ppu, g_ppu_atexit_lwm, 0); + ensure(!sys_lwmutex_lock(ppu, g_ppu_atexit_lwm, 0)); for (auto ptr : *g_ppu_atexit) { @@ -182,7 +182,7 @@ void sys_ppu_thread_exit(ppu_thread& ppu, u64 val) } } - verify(HERE), !sys_lwmutex_unlock(ppu, g_ppu_atexit_lwm); + ensure(!sys_lwmutex_unlock(ppu, g_ppu_atexit_lwm)); // Deallocate TLS ppu_free_tls(vm::cast(ppu.gpr[13], HERE) - 0x7030); @@ -239,7 +239,7 @@ void sys_ppu_thread_once(ppu_thread& ppu, vm::ptr once_ctrl, vm::ptr once_ctrl, vm::ptr= r.imax; + ensure(r.imin >= r.imax); // "Impossible range" } // Fix const values diff --git a/rpcs3/Emu/Cell/PPUInterpreter.cpp b/rpcs3/Emu/Cell/PPUInterpreter.cpp index 6002b49cd9..631e3ab1fd 100644 --- a/rpcs3/Emu/Cell/PPUInterpreter.cpp +++ b/rpcs3/Emu/Cell/PPUInterpreter.cpp @@ -2162,7 +2162,7 @@ bool ppu_interpreter::VSPLTB(ppu_thread& ppu, ppu_opcode_t op) bool ppu_interpreter::VSPLTH(ppu_thread& ppu, ppu_opcode_t op) { auto& d = ppu.vr[op.vd]; - verify(HERE), (op.vuimm < 8); + ensure((op.vuimm < 8)); u16 hword = ppu.vr[op.vb]._u16[7 - op.vuimm]; @@ -2212,7 +2212,7 @@ bool ppu_interpreter::VSPLTISW(ppu_thread& ppu, ppu_opcode_t op) bool ppu_interpreter::VSPLTW(ppu_thread& ppu, ppu_opcode_t op) { auto& d = ppu.vr[op.vd]; - verify(HERE), (op.vuimm < 4); + ensure((op.vuimm < 4)); u32 word = ppu.vr[op.vb]._u32[3 - op.vuimm]; diff --git a/rpcs3/Emu/Cell/PPUModule.cpp b/rpcs3/Emu/Cell/PPUModule.cpp index 9b1b881e43..9b9a989ee5 100644 --- a/rpcs3/Emu/Cell/PPUModule.cpp +++ b/rpcs3/Emu/Cell/PPUModule.cpp @@ -1624,7 +1624,7 @@ void ppu_load_exec(const ppu_exec_object& elf) if (prog.p_type == 0x1u /* LOAD */ && prog.p_memsz && (prog.p_flags & 0x2) == 0u /* W */) { // Set memory protection to read-only when necessary - verify(HERE), vm::page_protect(addr, ::align(size, 0x1000), 0, 0, vm::page_writable); + ensure(vm::page_protect(addr, ::align(size, 0x1000), 0, 0, vm::page_writable)); } } } diff --git a/rpcs3/Emu/Cell/PPUThread.cpp b/rpcs3/Emu/Cell/PPUThread.cpp index 9889ffdff0..7e49a3217a 100644 --- a/rpcs3/Emu/Cell/PPUThread.cpp +++ b/rpcs3/Emu/Cell/PPUThread.cpp @@ -1,4 +1,4 @@ -#include "stdafx.h" +#include "stdafx.h" #include "Utilities/sysinfo.h" #include "Utilities/JIT.h" #include "Crypto/sha1.h" @@ -1166,7 +1166,7 @@ extern void sse_cellbe_stvrx_v0(u64 addr, __m128i a); void ppu_trap(ppu_thread& ppu, u64 addr) { - verify(HERE), (addr & (~u64{UINT32_MAX} | 0x3)) == 0; + ensure((addr & (~u64{UINT32_MAX} | 0x3)) == 0); ppu.cia = static_cast(addr); u32 add = static_cast(g_cfg.core.stub_ppu_traps) * 4; diff --git a/rpcs3/Emu/Cell/PPUTranslator.cpp b/rpcs3/Emu/Cell/PPUTranslator.cpp index f815333f1b..050d46cab7 100644 --- a/rpcs3/Emu/Cell/PPUTranslator.cpp +++ b/rpcs3/Emu/Cell/PPUTranslator.cpp @@ -264,12 +264,11 @@ Value* PPUTranslator::GetAddr(u64 _add) Type* PPUTranslator::ScaleType(Type* type, s32 pow2) { - verify(HERE), (type->getScalarType()->isIntegerTy()); - verify(HERE), pow2 > -32, pow2 < 32; + ensure(type->getScalarType()->isIntegerTy()); + ensure(pow2 > -32 && pow2 < 32); uint scaled = type->getScalarSizeInBits(); - - verify(HERE), (scaled & (scaled - 1)) == 0; + ensure((scaled & (scaled - 1)) == 0); if (pow2 > 0) { @@ -280,7 +279,7 @@ Type* PPUTranslator::ScaleType(Type* type, s32 pow2) scaled >>= -pow2; } - verify(HERE), (scaled != 0); + ensure(scaled); const auto new_type = m_ir->getIntNTy(scaled); const auto vec_type = dyn_cast(type); return vec_type ? VectorType::get(new_type, vec_type->getNumElements(), false) : cast(new_type); diff --git a/rpcs3/Emu/Cell/SPUASMJITRecompiler.cpp b/rpcs3/Emu/Cell/SPUASMJITRecompiler.cpp index f2f81bd79f..5bfee56018 100644 --- a/rpcs3/Emu/Cell/SPUASMJITRecompiler.cpp +++ b/rpcs3/Emu/Cell/SPUASMJITRecompiler.cpp @@ -289,7 +289,7 @@ spu_function_t spu_recompiler::compile(spu_program&& _func) const u32 starta = start & -64; const u32 enda = ::align(end, 64); const u32 sizea = (enda - starta) / 64; - verify(HERE), sizea; + ensure(sizea); // Initialize pointers c->lea(x86::rax, x86::qword_ptr(label_code)); @@ -370,7 +370,7 @@ spu_function_t spu_recompiler::compile(spu_program&& _func) const u32 starta = start & -32; const u32 enda = ::align(end, 32); const u32 sizea = (enda - starta) / 32; - verify(HERE), sizea; + ensure(sizea); if (sizea == 1) { @@ -492,7 +492,7 @@ spu_function_t spu_recompiler::compile(spu_program&& _func) const u32 starta = start & -32; const u32 enda = ::align(end, 32); const u32 sizea = (enda - starta) / 32; - verify(HERE), sizea; + ensure(sizea); if (sizea == 1) { @@ -1154,7 +1154,7 @@ void spu_recompiler::branch_indirect(spu_opcode_t op, bool jt, bool ret) const u32 end = instr_labels.rbegin()->first + 4; // Load local indirect jump address, check local bounds - verify(HERE), start == m_base; + ensure(start == m_base); Label fail = c->newLabel(); c->mov(qw1->r32(), *addr); c->sub(qw1->r32(), pc0->r32()); diff --git a/rpcs3/Emu/Cell/SPURecompiler.cpp b/rpcs3/Emu/Cell/SPURecompiler.cpp index d4f469a141..f5344d5234 100644 --- a/rpcs3/Emu/Cell/SPURecompiler.cpp +++ b/rpcs3/Emu/Cell/SPURecompiler.cpp @@ -708,7 +708,7 @@ spu_function_t spu_runtime::rebuild_ubertrampoline(u32 id_inst) // Write jump instruction with rel32 immediate auto make_jump = [&](u8 op, auto target) { - verify("Asm overflow" HERE), raw + 8 <= wxptr + size0 * 22 + 16; + ensure(raw + 8 <= wxptr + size0 * 22 + 16); // Fallback to dispatch if no target const u64 taddr = target ? reinterpret_cast(target) : reinterpret_cast(tr_dispatch); @@ -716,13 +716,13 @@ spu_function_t spu_runtime::rebuild_ubertrampoline(u32 id_inst) // Compute the distance const s64 rel = taddr - reinterpret_cast(raw) - (op != 0xe9 ? 6 : 5); - verify(HERE), rel >= INT32_MIN, rel <= INT32_MAX; + ensure(rel >= INT32_MIN && rel <= INT32_MAX); if (op != 0xe9) { // First jcc byte *raw++ = 0x0f; - verify(HERE), (op >> 4) == 0x8; + ensure((op >> 4) == 0x8); } *raw++ = op; @@ -757,7 +757,7 @@ spu_function_t spu_runtime::rebuild_ubertrampoline(u32 id_inst) u32 size2 = w.size - size1; std::advance(it2, w.size / 2); - while (verify("spu_runtime::work::level overflow" HERE, w.level != 0xffff)) + while (ensure(w.level < UINT16_MAX)) { it = it2; size1 = w.size - size2; @@ -844,7 +844,7 @@ spu_function_t spu_runtime::rebuild_ubertrampoline(u32 id_inst) break; } - verify(HERE), it != w.beg; + ensure(it != w.beg); size1--; size2++; } @@ -857,7 +857,7 @@ spu_function_t spu_runtime::rebuild_ubertrampoline(u32 id_inst) } // Emit 32-bit comparison - verify("Asm overflow" HERE), raw + 12 <= wxptr + size0 * 22 + 16; + ensure(raw + 12 <= wxptr + size0 * 22 + 16); // "Asm overflow" if (w.from != w.level) { @@ -1512,7 +1512,7 @@ spu_program spu_recompiler_base::analyse(const be_t* ls, u32 entry_point) jt_abs.clear(); } - verify(HERE), jt_abs.size() != jt_rel.size(); + ensure(jt_abs.size() != jt_rel.size()); } if (jt_abs.size() >= jt_rel.size()) @@ -1939,7 +1939,7 @@ spu_program spu_recompiler_base::analyse(const be_t* ls, u32 entry_point) } else if (u32& raw_val = result.data[new_size]) { - verify(HERE), raw_val == std::bit_cast>(data); + ensure(raw_val == std::bit_cast>(data)); } else { @@ -3428,7 +3428,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator else if (!callee) { // Create branch patchpoint if chunk == nullptr - verify(HERE), m_finfo, !m_finfo->fn || m_function == m_finfo->chunk; + ensure(m_finfo && (!m_finfo->fn || m_function == m_finfo->chunk)); // Register under a unique linkable name const std::string ppname = fmt::format("%s-pp-%u", m_hash, m_pp_id++); @@ -3448,7 +3448,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator base_pc = m_ir->getInt32(0); } - verify(HERE), callee; + ensure(callee); auto call = m_ir->CreateCall(callee, {m_thread, m_lsptr, base_pc ? base_pc : m_base_pc}); auto func = m_finfo ? m_finfo->chunk : llvm::dyn_cast(callee.getCallee()); call->setCallingConv(func->getCallingConv()); @@ -3484,7 +3484,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator r3 = get_reg_fixed(3).value; } - const auto _call = m_ir->CreateCall(verify(HERE, fn), {m_thread, m_lsptr, m_base_pc, sp, r3}); + const auto _call = m_ir->CreateCall(ensure(fn), {m_thread, m_lsptr, m_base_pc, sp, r3}); _call->setCallingConv(fn->getCallingConv()); @@ -3590,7 +3590,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator if (absolute) { - verify(HERE), !m_finfo->fn; + ensure(!m_finfo->fn); const auto next = llvm::BasicBlock::Create(m_context, "", m_function); const auto fail = llvm::BasicBlock::Create(m_context, "", m_function); @@ -3632,7 +3632,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator if (absolute) { - verify(HERE), !m_finfo->fn; + ensure(!m_finfo->fn); m_ir->CreateStore(m_ir->getInt32(target), spu_ptr(&spu_thread::pc), true); } @@ -3646,7 +3646,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator return result; } - verify(HERE), !absolute; + ensure(!absolute); auto& result = m_blocks[target].block; @@ -3790,7 +3790,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator llvm::Value* double_to_xfloat(llvm::Value* val) { - verify("double_to_xfloat" HERE), val, val->getType() == get_type(); + ensure(val && val->getType() == get_type()); const auto d = double_as_uint64(val); const auto s = m_ir->CreateAnd(m_ir->CreateLShr(d, 32), 0x80000000); @@ -3801,7 +3801,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator llvm::Value* xfloat_to_double(llvm::Value* val) { - verify("xfloat_to_double" HERE), val, val->getType() == get_type(); + ensure(val && val->getType() == get_type()); const auto x = m_ir->CreateZExt(val, get_type()); const auto s = m_ir->CreateShl(m_ir->CreateAnd(x, 0x80000000), 32); @@ -3815,7 +3815,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator // Clamp double values to ±Smax, flush values smaller than ±Smin to positive zero llvm::Value* xfloat_in_double(llvm::Value* val) { - verify("xfloat_in_double" HERE), val, val->getType() == get_type(); + ensure(val && val->getType() == get_type()); const auto smax = uint64_as_double(splat(0x47ffffffe0000000).eval(m_ir)); const auto smin = uint64_as_double(splat(0x3810000000000000).eval(m_ir)); @@ -4002,7 +4002,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator llvm::StoreInst* dummy{}; // Check - verify(HERE), !m_block || m_regmod[m_pos / 4] == index; + ensure(!m_block || m_regmod[m_pos / 4] == index); // Test for special case const bool is_xfloat = value->getType() == get_type(); @@ -4562,7 +4562,7 @@ public: m_ir->SetInsertPoint(cblock); - verify(HERE), bfound->second.block_end->getTerminator(); + ensure(bfound->second.block_end->getTerminator()); } _phi->addIncoming(value, bfound->second.block_end); @@ -4668,7 +4668,7 @@ public: m_ir->CreateBr(add_block(target)); } - verify(HERE), m_block->block_end; + ensure(m_block->block_end); } } diff --git a/rpcs3/Emu/Cell/SPUThread.cpp b/rpcs3/Emu/Cell/SPUThread.cpp index fc8a4e712d..cdce8dd2a2 100644 --- a/rpcs3/Emu/Cell/SPUThread.cpp +++ b/rpcs3/Emu/Cell/SPUThread.cpp @@ -335,7 +335,7 @@ namespace spu busy_wait(count); } - verify(HERE), !spu.check_state(); + ensure(!spu.check_state()); } atomic_instruction_table[pc_offset]++; @@ -1540,7 +1540,7 @@ void spu_thread::cpu_return() { ch_in_mbox.clear(); - if (verify(HERE, group->running--) == 1) + if (ensure(group->running)-- == 1) { { std::lock_guard lock(group->mutex); @@ -1712,12 +1712,12 @@ spu_thread::spu_thread(lv2_spu_group* group, u32 index, std::string_view name, u if (!group) { - verify(HERE), vm::get(vm::spu)->falloc(RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * index, SPU_LS_SIZE, &shm); + ensure(vm::get(vm::spu)->falloc(RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * index, SPU_LS_SIZE, &shm)); } else { // 0x1000 indicates falloc to allocate page with no access rights in base memory - verify(HERE), vm::get(vm::spu)->falloc(SPU_FAKE_BASE_ADDR + SPU_LS_SIZE * (cpu_thread::id & 0xffffff), SPU_LS_SIZE, &shm, 0x1000); + ensure(vm::get(vm::spu)->falloc(SPU_FAKE_BASE_ADDR + SPU_LS_SIZE * (cpu_thread::id & 0xffffff), SPU_LS_SIZE, &shm, 0x1000)); } vm::writer_lock(0); @@ -1726,7 +1726,7 @@ spu_thread::spu_thread(lv2_spu_group* group, u32 index, std::string_view name, u { // Map LS mirrors const auto ptr = addr + (i * SPU_LS_SIZE); - verify(HERE), shm->map_critical(ptr) == ptr; + ensure(shm->map_critical(ptr) == ptr); } // Use the middle mirror @@ -3592,7 +3592,7 @@ u32 spu_thread::get_ch_count(u32 ch) default: break; } - verify(HERE), ch < 128u; + ensure(ch < 128u); spu_log.error("Unknown/illegal channel in RCHCNT (ch=%s)", spu_ch_name[ch]); return 0; // Default count } @@ -4310,7 +4310,7 @@ bool spu_thread::stop_and_signal(u32 code) if (is_stopped()) { // The thread group cannot be stopped while waiting for an event - verify(HERE), !(state & cpu_flag::stop); + ensure(!(state & cpu_flag::stop)); return false; } diff --git a/rpcs3/Emu/Cell/lv2/sys_cond.cpp b/rpcs3/Emu/Cell/lv2/sys_cond.cpp index 009083ee37..5641afc3d6 100644 --- a/rpcs3/Emu/Cell/lv2/sys_cond.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_cond.cpp @@ -128,7 +128,7 @@ error_code sys_cond_signal_all(ppu_thread& ppu, u32 cond_id) { if (cond.mutex->try_own(*cpu, cpu->id)) { - verify(HERE), !std::exchange(result, cpu); + ensure(!std::exchange(result, cpu)); } } @@ -169,7 +169,7 @@ error_code sys_cond_signal_to(ppu_thread& ppu, u32 cond_id, u32 thread_id) { if (cpu->id == thread_id) { - verify(HERE), cond.unqueue(cond.sq, cpu); + ensure(cond.unqueue(cond.sq, cpu)); cond.waiters--; @@ -296,7 +296,7 @@ error_code sys_cond_wait(ppu_thread& ppu, u32 cond_id, u64 timeout) } // Verify ownership - verify(HERE), cond->mutex->owner >> 1 == ppu.id; + ensure(cond->mutex->owner >> 1 == ppu.id); // Restore the recursive value cond->mutex->lock_count.release(static_cast(cond.ret)); diff --git a/rpcs3/Emu/Cell/lv2/sys_fs.cpp b/rpcs3/Emu/Cell/lv2/sys_fs.cpp index 8bcf6e0cd8..ba14ae948a 100644 --- a/rpcs3/Emu/Cell/lv2/sys_fs.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_fs.cpp @@ -207,7 +207,7 @@ struct lv2_file::file_view : fs::file_base const u64 old_pos = m_file->file.pos(); const u64 new_pos = m_file->file.seek(m_off + m_pos); const u64 result = m_file->file.read(buffer, size); - verify(HERE), old_pos == m_file->file.seek(old_pos); + ensure(old_pos == m_file->file.seek(old_pos)); m_pos += result; return result; @@ -1306,7 +1306,7 @@ error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr _arg, u32 ? file->op_read(arg->buf, arg->size) : file->op_write(arg->buf, arg->size); - verify(HERE), old_pos == file->file.seek(old_pos); + ensure(old_pos == file->file.seek(old_pos)); arg->out_code = CELL_OK; return CELL_OK; diff --git a/rpcs3/Emu/Cell/lv2/sys_lwcond.cpp b/rpcs3/Emu/Cell/lv2/sys_lwcond.cpp index fa537e8b47..1e46fd4b28 100644 --- a/rpcs3/Emu/Cell/lv2/sys_lwcond.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_lwcond.cpp @@ -129,7 +129,7 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6 if (mode != 2) { - verify(HERE), !mutex->signaled; + ensure(!mutex->signaled); std::lock_guard lock(mutex->mutex); if (mode == 3 && !mutex->sq.empty()) [[unlikely]] @@ -140,7 +140,7 @@ error_code _sys_lwcond_signal(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id, u6 } else if (mode == 1) { - verify(HERE), mutex->add_waiter(result); + ensure(mutex->add_waiter(result)); result = nullptr; } } @@ -229,9 +229,9 @@ error_code _sys_lwcond_signal_all(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id if (mode == 1) { - verify(HERE), !mutex->signaled; + ensure(!mutex->signaled); std::lock_guard lock(mutex->mutex); - verify(HERE), mutex->add_waiter(cpu); + ensure(mutex->add_waiter(cpu)); } else { diff --git a/rpcs3/Emu/Cell/lv2/sys_memory.cpp b/rpcs3/Emu/Cell/lv2/sys_memory.cpp index 718c3132fb..3340cb8295 100644 --- a/rpcs3/Emu/Cell/lv2/sys_memory.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_memory.cpp @@ -61,7 +61,7 @@ error_code sys_memory_allocate(cpu_thread& cpu, u32 size, u64 flags, vm::ptralloc(size, nullptr, align)) { - verify(HERE), !g_fxo->get()->addrs[addr >> 16].exchange(dct); + ensure(!g_fxo->get()->addrs[addr >> 16].exchange(dct)); if (alloc_addr) { @@ -132,7 +132,7 @@ error_code sys_memory_allocate_from_container(cpu_thread& cpu, u32 size, u32 cid { if (u32 addr = area->alloc(size)) { - verify(HERE), !g_fxo->get()->addrs[addr >> 16].exchange(ct.ptr.get()); + ensure(!g_fxo->get()->addrs[addr >> 16].exchange(ct.ptr.get())); if (alloc_addr) { @@ -164,7 +164,7 @@ error_code sys_memory_free(cpu_thread& cpu, u32 addr) return {CELL_EINVAL, addr}; } - const auto size = verify(HERE, vm::dealloc(addr)); + const auto size = (ensure(vm::dealloc(addr))); reader_lock{id_manager::g_mutex}, ct->used -= size; return CELL_OK; } diff --git a/rpcs3/Emu/Cell/lv2/sys_net.cpp b/rpcs3/Emu/Cell/lv2/sys_net.cpp index 4011eec9ef..1ca1b5ef16 100644 --- a/rpcs3/Emu/Cell/lv2/sys_net.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_net.cpp @@ -1392,7 +1392,7 @@ error_code sys_net_bnet_accept(ppu_thread& ppu, s32 s, vm::ptr if (addr) { - verify(HERE), native_addr.ss_family == AF_INET; + ensure(native_addr.ss_family == AF_INET); vm::ptr paddr = vm::cast(addr.addr()); @@ -1825,7 +1825,7 @@ error_code sys_net_bnet_getpeername(ppu_thread& ppu, s32 s, vm::ptr(&native_addr), &native_addrlen) == 0) { - verify(HERE), native_addr.ss_family == AF_INET; + ensure(native_addr.ss_family == AF_INET); return {}; } @@ -1883,7 +1883,7 @@ error_code sys_net_bnet_getsockname(ppu_thread& ppu, s32 s, vm::ptr(&native_addr), &native_addrlen) == 0) { - verify(HERE), native_addr.ss_family == AF_INET; + ensure(native_addr.ss_family == AF_INET); return {}; } @@ -2494,7 +2494,7 @@ error_code sys_net_bnet_recvfrom(ppu_thread& ppu, s32 s, vm::ptr buf, u32 // addr is set earlier for P2P socket if (addr && type != SYS_NET_SOCK_DGRAM_P2P && type != SYS_NET_SOCK_STREAM_P2P) { - verify(HERE), native_addr.ss_family == AF_INET; + ensure(native_addr.ss_family == AF_INET); vm::ptr paddr = vm::cast(addr.addr()); @@ -2661,7 +2661,7 @@ error_code sys_net_bnet_sendto(ppu_thread& ppu, s32 s, vm::cptr buf, u32 l if (nph->is_dns(s)) { const s32 ret_analyzer = nph->analyze_dns_packet(s, reinterpret_cast(_buf.data()), len); - + // If we're not connected just never send the packet and pretend we did if (!nph->get_net_status()) { @@ -2816,7 +2816,7 @@ error_code sys_net_bnet_setsockopt(ppu_thread& ppu, s32 s, s32 level, s32 optnam } return {}; - } + } if (level == SYS_NET_SOL_SOCKET) { diff --git a/rpcs3/Emu/Cell/lv2/sys_ppu_thread.cpp b/rpcs3/Emu/Cell/lv2/sys_ppu_thread.cpp index a619c80a47..65cb1ba2e3 100644 --- a/rpcs3/Emu/Cell/lv2/sys_ppu_thread.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_ppu_thread.cpp @@ -1,4 +1,4 @@ -#include "stdafx.h" +#include "stdafx.h" #include "sys_ppu_thread.h" #include "Emu/IdManager.h" @@ -159,7 +159,7 @@ error_code sys_ppu_thread_join(ppu_thread& ppu, u32 thread_id, vm::ptr vptr const u64 vret = thread->gpr[3]; // Cleanup - verify(HERE), idm::remove_verify>(thread_id, std::move(thread.ptr)); + ensure(idm::remove_verify>(thread_id, std::move(thread.ptr))); if (!vptr) { @@ -221,7 +221,7 @@ error_code sys_ppu_thread_detach(ppu_thread& ppu, u32 thread_id) if (thread.ret == CELL_EAGAIN) { - verify(HERE), idm::remove>(thread_id); + ensure(idm::remove>(thread_id)); } return CELL_OK; diff --git a/rpcs3/Emu/Cell/lv2/sys_prx.cpp b/rpcs3/Emu/Cell/lv2/sys_prx.cpp index ddc5486104..d0ffbbc519 100644 --- a/rpcs3/Emu/Cell/lv2/sys_prx.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_prx.cpp @@ -428,7 +428,7 @@ error_code _sys_prx_start_module(ppu_thread& ppu, u32 id, u64 flags, vm::ptrstate.compare_and_swap_test(PRX_STATE_STARTING, PRX_STATE_STARTED); + ensure(prx->state.compare_and_swap_test(PRX_STATE_STARTING, PRX_STATE_STARTED)); return CELL_OK; } default: @@ -506,7 +506,7 @@ error_code _sys_prx_stop_module(ppu_thread& ppu, u32 id, u64 flags, vm::ptrstate.compare_and_swap_test(PRX_STATE_STOPPING, PRX_STATE_STOPPED); + ensure(prx->state.compare_and_swap_test(PRX_STATE_STOPPING, PRX_STATE_STOPPED)); return CELL_OK; } case 1: diff --git a/rpcs3/Emu/Cell/lv2/sys_rsx.cpp b/rpcs3/Emu/Cell/lv2/sys_rsx.cpp index f5823bfa1d..9fd8f4954b 100644 --- a/rpcs3/Emu/Cell/lv2/sys_rsx.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_rsx.cpp @@ -466,7 +466,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64 if ((a4 & 0x80000000) != 0) { // NOTE: There currently seem to only be 2 active heads on PS3 - verify(HERE), a3 < 2; + ensure(a3 < 2); // last half byte gives buffer, 0xf seems to trigger just last queued u8 idx_check = a4 & 0xf; @@ -506,7 +506,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64 case 0x103: // Display Queue { // NOTE: There currently seem to only be 2 active heads on PS3 - verify(HERE), a3 < 2; + ensure(a3 < 2); driverInfo.head[a3].lastQueuedBufferId = static_cast(a4); driverInfo.head[a3].flipFlags |= 0x40000000 | (1 << a4); @@ -565,7 +565,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64 } // NOTE: There currently seem to only be 2 active heads on PS3 - verify(HERE), a3 < 2; + ensure(a3 < 2); driverInfo.head[a3].flipFlags.atomic_op([&](be_t& flipStatus) { @@ -584,7 +584,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64 //a5 high bits = ret.pitch = (pitch / 0x100) << 8; //a5 low bits = ret.format = base | ((base + ((size - 1) / 0x10000)) << 13) | (comp << 26) | (1 << 30); - verify(HERE), a3 < std::size(render->tiles); + ensure(a3 < std::size(render->tiles)); if (!render->is_fifo_idle()) { @@ -626,7 +626,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64 } // Hardcoded value in gcm - verify(HERE), !!(a5 & (1 << 30)); + ensure(a5 & (1 << 30)); } std::lock_guard lock(rsx_cfg->mutex); @@ -669,7 +669,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64 //a6 high = status0 = (zcullDir << 1) | (zcullFormat << 2) | ((sFunc & 0xF) << 12) | (sRef << 16) | (sMask << 24); //a6 low = status1 = (0x2000 << 0) | (0x20 << 16); - verify(HERE), a3 < std::size(render->zculls); + ensure(a3 < std::size(render->zculls)); if (!render->is_fifo_idle()) { @@ -699,7 +699,8 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64 } // Hardcoded values in gcm - verify(HERE), !!(a4 & (1ull << 32)), (a6 & 0xFFFFFFFF) == 0u + ((0x2000 << 0) | (0x20 << 16)); + ensure(a4 & (1ull << 32)); + ensure((a6 & 0xFFFFFFFF) == 0u + ((0x2000 << 0) | (0x20 << 16))); } std::lock_guard lock(rsx_cfg->mutex); @@ -752,7 +753,7 @@ error_code sys_rsx_context_attribute(u32 context_id, u32 package_id, u64 a3, u64 case 0xFED: // hack: vblank command { // NOTE: There currently seem to only be 2 active heads on PS3 - verify(HERE), a3 < 2; + ensure(a3 < 2); // todo: this is wrong and should be 'second' vblank handler and freq, but since currently everything is reported as being 59.94, this should be fine vm::_ref(render->device_addr + 0x30) = 1; diff --git a/rpcs3/Emu/Cell/lv2/sys_rwlock.cpp b/rpcs3/Emu/Cell/lv2/sys_rwlock.cpp index 4124ecedd1..b4b687e650 100644 --- a/rpcs3/Emu/Cell/lv2/sys_rwlock.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_rwlock.cpp @@ -259,7 +259,7 @@ error_code sys_rwlock_runlock(ppu_thread& ppu, u32 rw_lock_id) { rwlock->owner = 0; - verify(HERE), rwlock->rq.empty(); + ensure(rwlock->rq.empty()); } } } diff --git a/rpcs3/Emu/Cell/lv2/sys_semaphore.cpp b/rpcs3/Emu/Cell/lv2/sys_semaphore.cpp index 4de5297a89..267e9c8dfe 100644 --- a/rpcs3/Emu/Cell/lv2/sys_semaphore.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_semaphore.cpp @@ -150,13 +150,13 @@ error_code sys_semaphore_wait(ppu_thread& ppu, u32 sem_id, u64 timeout) break; } - verify(HERE), 0 > sem->val.fetch_op([](s32& val) + ensure(0 > sem->val.fetch_op([](s32& val) { if (val < 0) { val++; } - }); + })); ppu.gpr[3] = CELL_ETIMEDOUT; break; @@ -255,7 +255,7 @@ error_code sys_semaphore_post(ppu_thread& ppu, u32 sem_id, s32 count) for (s32 i = 0; i < to_awake; i++) { - sem->append(verify(HERE, sem->schedule(sem->sq, sem->protocol))); + sem->append((ensure(sem->schedule(sem->sq, sem->protocol)))); } if (to_awake > 0) diff --git a/rpcs3/Emu/Cell/lv2/sys_spu.cpp b/rpcs3/Emu/Cell/lv2/sys_spu.cpp index 3ee8cb6c3a..6ded237af8 100644 --- a/rpcs3/Emu/Cell/lv2/sys_spu.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_spu.cpp @@ -289,7 +289,7 @@ error_code _sys_spu_image_close(ppu_thread& ppu, vm::ptr img) return CELL_ESRCH; } - verify(HERE), vm::dealloc(handle->segs.addr(), vm::main); + ensure(vm::dealloc(handle->segs.addr(), vm::main)); return CELL_OK; } @@ -403,7 +403,7 @@ error_code sys_spu_thread_initialize(ppu_thread& ppu, vm::ptr thread, u32 g const u32 tid = (inited << 24) | (group_id & 0xffffff); - verify(HERE), idm::import>([&]() + ensure(idm::import>([&]() { std::string full_name = fmt::format("SPU[0x%07x] ", tid); @@ -416,7 +416,7 @@ error_code sys_spu_thread_initialize(ppu_thread& ppu, vm::ptr thread, u32 g group->threads[inited] = spu; group->threads_map[spu_num] = static_cast(inited); return spu; - }); + })); *thread = tid; @@ -682,7 +682,7 @@ error_code sys_spu_thread_group_destroy(ppu_thread& ppu, u32 id) if (auto thread = t.get()) { // Deallocate LS - verify(HERE), vm::get(vm::spu)->dealloc(SPU_FAKE_BASE_ADDR + SPU_LS_SIZE * (thread->id & 0xffffff), &thread->shm); + ensure(vm::get(vm::spu)->dealloc(SPU_FAKE_BASE_ADDR + SPU_LS_SIZE * (thread->id & 0xffffff), &thread->shm)); // Remove ID from IDM (destruction will occur in group destructor) idm::remove>(thread->id); @@ -1848,7 +1848,7 @@ error_code sys_raw_spu_create(ppu_thread& ppu, vm::ptr id, vm::ptr at const u32 tid = idm::make>(fmt::format("RawSPU[0x%x] ", index), nullptr, index, "", index); - spu_thread::g_raw_spu_id[index] = verify("RawSPU ID" HERE, tid); + spu_thread::g_raw_spu_id[index] = (ensure(tid)); *id = index; @@ -1901,7 +1901,7 @@ error_code sys_isolated_spu_create(ppu_thread& ppu, vm::ptr id, vm::ptrgpr[5] = v128::from64(0, arg3); thread->gpr[6] = v128::from64(0, arg4); - spu_thread::g_raw_spu_id[index] = verify("IsoSPU ID" HERE, thread->id); + spu_thread::g_raw_spu_id[index] = (ensure(thread->id)); sys_spu_image img; img.load(obj); @@ -1910,7 +1910,7 @@ error_code sys_isolated_spu_create(ppu_thread& ppu, vm::ptr id, vm::ptrls, image_info->segs.get_ptr(), image_info->nsegs); thread->write_reg(ls_addr + RAW_SPU_PROB_OFFSET + SPU_NPC_offs, image_info->e_entry); - verify(HERE), idm::remove_verify(img.entry_point, std::move(image_info)); + ensure(idm::remove_verify(img.entry_point, std::move(image_info))); *id = index; return CELL_OK; diff --git a/rpcs3/Emu/Cell/lv2/sys_ss.cpp b/rpcs3/Emu/Cell/lv2/sys_ss.cpp index 7372e29086..6a2b794250 100644 --- a/rpcs3/Emu/Cell/lv2/sys_ss.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_ss.cpp @@ -97,7 +97,7 @@ error_code sys_ss_access_control_engine(u64 pkg_id, u64 a2, u64 a3) return CELL_ESRCH; } - verify(HERE), a2 == static_cast(process_getpid()); + ensure(a2 == static_cast(process_getpid())); vm::write64(vm::cast(a3), authid); break; } diff --git a/rpcs3/Emu/Cell/lv2/sys_sync.h b/rpcs3/Emu/Cell/lv2/sys_sync.h index 4b4f05de66..0c48779327 100644 --- a/rpcs3/Emu/Cell/lv2/sys_sync.h +++ b/rpcs3/Emu/Cell/lv2/sys_sync.h @@ -165,7 +165,7 @@ public: static void set_priority(cpu_thread& thread, s32 prio) { - verify(HERE), prio + 512u < 3712; + ensure(prio + 512u < 3712); awake(&thread, prio); } diff --git a/rpcs3/Emu/Cell/lv2/sys_time.cpp b/rpcs3/Emu/Cell/lv2/sys_time.cpp index 926f439e74..3a18649e48 100644 --- a/rpcs3/Emu/Cell/lv2/sys_time.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_time.cpp @@ -130,7 +130,7 @@ u64 get_timebased_time() { #ifdef _WIN32 LARGE_INTEGER count; - verify(HERE), QueryPerformanceCounter(&count); + ensure(QueryPerformanceCounter(&count)); const u64 time = count.QuadPart; const u64 freq = s_time_aux_info.perf_freq; @@ -138,7 +138,7 @@ u64 get_timebased_time() return (time / freq * g_timebase_freq + time % freq * g_timebase_freq / freq) * g_cfg.core.clocks_scale / 100u; #else struct timespec ts; - verify(HERE), ::clock_gettime(CLOCK_MONOTONIC, &ts) == 0; + ensure(::clock_gettime(CLOCK_MONOTONIC, &ts) == 0); return (static_cast(ts.tv_sec) * g_timebase_freq + static_cast(ts.tv_nsec) * g_timebase_freq / 1000000000ull) * g_cfg.core.clocks_scale / 100u; #endif @@ -151,7 +151,7 @@ u64 get_system_time() { #ifdef _WIN32 LARGE_INTEGER count; - verify(HERE), QueryPerformanceCounter(&count); + ensure(QueryPerformanceCounter(&count)); const u64 time = count.QuadPart; const u64 freq = s_time_aux_info.perf_freq; @@ -159,7 +159,7 @@ u64 get_system_time() const u64 result = time / freq * 1000000ull + (time % freq) * 1000000ull / freq; #else struct timespec ts; - verify(HERE), ::clock_gettime(CLOCK_MONOTONIC, &ts) == 0; + ensure(::clock_gettime(CLOCK_MONOTONIC, &ts) == 0); const u64 result = static_cast(ts.tv_sec) * 1000000ull + static_cast(ts.tv_nsec) / 1000u; #endif @@ -196,7 +196,7 @@ error_code sys_time_get_current_time(vm::ptr sec, vm::ptr nsec) #ifdef _WIN32 LARGE_INTEGER count; - verify(HERE), QueryPerformanceCounter(&count); + ensure(QueryPerformanceCounter(&count)); const u64 diff_base = count.QuadPart - s_time_aux_info.start_time; @@ -219,7 +219,7 @@ error_code sys_time_get_current_time(vm::ptr sec, vm::ptr nsec) *nsec = time % 1000000000ull; #else struct timespec ts; - verify(HERE), ::clock_gettime(CLOCK_REALTIME, &ts) == 0; + ensure(::clock_gettime(CLOCK_REALTIME, &ts) == 0); if (g_cfg.core.clocks_scale == 100) { diff --git a/rpcs3/Emu/Cell/lv2/sys_usbd.cpp b/rpcs3/Emu/Cell/lv2/sys_usbd.cpp index d4715b5dbc..0f16a56f9f 100644 --- a/rpcs3/Emu/Cell/lv2/sys_usbd.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_usbd.cpp @@ -472,7 +472,7 @@ error_code sys_usbd_initialize(ppu_thread& ppu, vm::ptr handle) std::lock_guard lock(usbh->mutex); // Must not occur (lv2 allows multiple handles, cellUsbd does not) - verify("sys_usbd Initialized twice" HERE), !usbh->is_init.exchange(true); + ensure(!usbh->is_init.exchange(true)); *handle = 0x115B; diff --git a/rpcs3/Emu/Cell/lv2/sys_vm.cpp b/rpcs3/Emu/Cell/lv2/sys_vm.cpp index 54fd763f42..06a231eef3 100644 --- a/rpcs3/Emu/Cell/lv2/sys_vm.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_vm.cpp @@ -76,7 +76,7 @@ error_code sys_vm_memory_map(ppu_thread& ppu, u32 vsize, u32 psize, u32 cid, u64 if (const auto area = vm::find_map(0x10000000, 0x10000000, 2 | (flag & SYS_MEMORY_PAGE_SIZE_MASK))) { // Alloc all memory (shall not fail) - verify(HERE), area->alloc(vsize); + ensure(area->alloc(vsize)); vm::lock_sudo(area->addr, vsize); idm::make(area->addr, vsize, ct, psize); @@ -117,7 +117,7 @@ error_code sys_vm_unmap(ppu_thread& ppu, u32 addr) const auto vmo = idm::withdraw(sys_vm_t::find_id(addr), [&](sys_vm_t& vmo) { // Free block - verify(HERE), vm::unmap(addr); + ensure(vm::unmap(addr)); // Return memory vmo.ct->used -= vmo.psize; diff --git a/rpcs3/Emu/Io/Skylander.cpp b/rpcs3/Emu/Io/Skylander.cpp index ba5b6f3206..02c73c0df7 100644 --- a/rpcs3/Emu/Io/Skylander.cpp +++ b/rpcs3/Emu/Io/Skylander.cpp @@ -75,7 +75,7 @@ void usb_device_skylander::control_transfer(u8 bmRequestType, u8 bRequest, u16 w { case 'A': // Activate command - verify(HERE), buf_size == 2; + ensure(buf_size == 2); q_result = {0x41, buf[1], 0xFF, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; @@ -83,7 +83,7 @@ void usb_device_skylander::control_transfer(u8 bmRequestType, u8 bRequest, u16 w break; case 'C': // Set LEDs colour - verify(HERE), buf_size == 4; + ensure(buf_size == 4); break; case 'M': q_result[0] = 0x4D; @@ -92,7 +92,7 @@ void usb_device_skylander::control_transfer(u8 bmRequestType, u8 bRequest, u16 w break; case 'Q': // Queries a block - verify(HERE), buf_size == 3; + ensure(buf_size == 3); q_result[0] = 'Q'; q_result[1] = 0x10; @@ -107,18 +107,18 @@ void usb_device_skylander::control_transfer(u8 bmRequestType, u8 bRequest, u16 w break; case 'R': // Reset - verify(HERE), buf_size == 2; + ensure(buf_size == 2); q_result = { 0x52, 0x02, 0x0A, 0x03, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; q_queries.push(q_result); break; case 'S': // ? - verify(HERE), buf_size == 1; + ensure(buf_size == 1); break; case 'W': // Write a block - verify(HERE), buf_size == 19; + ensure(buf_size == 19); q_result[0] = 'W'; q_result[1] = 0x10; q_result[2] = buf[2]; @@ -147,7 +147,7 @@ void usb_device_skylander::control_transfer(u8 bmRequestType, u8 bRequest, u16 w void usb_device_skylander::interrupt_transfer(u32 buf_size, u8* buf, u32 endpoint, UsbTransfer* transfer) { - verify(HERE), buf_size == 0x20; + ensure(buf_size == 0x20); transfer->fake = true; transfer->expected_count = buf_size; diff --git a/rpcs3/Emu/Memory/vm.cpp b/rpcs3/Emu/Memory/vm.cpp index 7d513acc01..97f335f3ad 100644 --- a/rpcs3/Emu/Memory/vm.cpp +++ b/rpcs3/Emu/Memory/vm.cpp @@ -1,4 +1,4 @@ -#include "stdafx.h" +#include "stdafx.h" #include "vm_locking.h" #include "vm_ptr.h" #include "vm_ref.h" @@ -686,7 +686,7 @@ namespace vm // 1. To simplify range_lock logic // 2. To make sure it never overlaps with 32-bit addresses // Also check that it's aligned (lowest 16 bits) - verify(HERE), (shm_self & 0xffff'8000'0000'ffff) == range_locked; + ensure((shm_self & 0xffff'8000'0000'ffff) == range_locked); // Find another mirror and map it as shareable too for (auto& ploc : g_locations) @@ -716,7 +716,7 @@ namespace vm u64 shm_self = reinterpret_cast(shm->get()) ^ range_locked; // Check (see above) - verify(HERE), (shm_self & 0xffff'8000'0000'ffff) == range_locked; + ensure((shm_self & 0xffff'8000'0000'ffff) == range_locked); // Map range as shareable for (u32 i = addr / 65536; i < addr / 65536 + size / 65536; i++) @@ -884,7 +884,7 @@ namespace vm else { // Must be consistent - verify(HERE), is_exec == !!(g_pages[i] & page_executable); + ensure(is_exec == !!(g_pages[i] & page_executable)); } size += 4096; @@ -1049,8 +1049,8 @@ namespace vm { perf_meter<"PAGE_LCK"_u64> perf; - verify("lock_sudo" HERE), addr % 4096 == 0; - verify("lock_sudo" HERE), size % 4096 == 0; + ensure(addr % 4096 == 0); + ensure(size % 4096 == 0); if (!utils::memory_lock(g_sudo_addr + addr, size)) { @@ -1075,8 +1075,8 @@ namespace vm if (this->flags & 0x10) { // Mark overflow/underflow guard pages as allocated - verify(HERE), !g_pages[addr / 4096].exchange(page_allocated); - verify(HERE), !g_pages[addr / 4096 + size / 4096 - 1].exchange(page_allocated); + ensure(!g_pages[addr / 4096].exchange(page_allocated)); + ensure(!g_pages[addr / 4096 + size / 4096 - 1].exchange(page_allocated)); } // Map "real" memory pages; provide a function to search for mirrors with private member access @@ -1208,7 +1208,7 @@ namespace vm std::shared_ptr shm; if (m_common) - verify(HERE), !src; + ensure(!src); else if (src) shm = *src; else @@ -1265,7 +1265,7 @@ namespace vm std::shared_ptr shm; if (m_common) - verify(HERE), !src; + ensure(!src); else if (src) shm = *src; else @@ -1306,12 +1306,12 @@ namespace vm if (flags & 0x10) { // Clear guard pages - verify(HERE), g_pages[addr / 4096 - 1].exchange(0) == page_allocated; - verify(HERE), g_pages[addr / 4096 + size / 4096].exchange(0) == page_allocated; + ensure(g_pages[addr / 4096 - 1].exchange(0) == page_allocated); + ensure(g_pages[addr / 4096 + size / 4096].exchange(0) == page_allocated); } // Unmap "real" memory pages - verify(HERE), size == _page_unmap(addr, size, found->second.second.get()); + ensure(size == _page_unmap(addr, size, found->second.second.get())); // Clear stack guards if (flags & 0x10) diff --git a/rpcs3/Emu/RSX/Capture/rsx_replay.cpp b/rpcs3/Emu/RSX/Capture/rsx_replay.cpp index dd8c77a86b..d923209e94 100644 --- a/rpcs3/Emu/RSX/Capture/rsx_replay.cpp +++ b/rpcs3/Emu/RSX/Capture/rsx_replay.cpp @@ -34,7 +34,7 @@ namespace rsx // 'fake' initialize usermemory sys_memory_allocate(*this, buffer_size, SYS_MEMORY_PAGE_SIZE_1M, contextInfo.ptr(&rsx_context::user_addr)); - verify(HERE), (user_mem_addr = contextInfo->user_addr) != 0; + ensure((user_mem_addr = contextInfo->user_addr) != 0); if (sys_rsx_device_map(*this, contextInfo.ptr(&rsx_context::dev_addr), vm::null, 0x8) != CELL_OK) fmt::throw_exception("Capture Replay: sys_rsx_device_map failed!"); diff --git a/rpcs3/Emu/RSX/CgBinaryFragmentProgram.cpp b/rpcs3/Emu/RSX/CgBinaryFragmentProgram.cpp index b6c02093e9..e3d6ceb94d 100644 --- a/rpcs3/Emu/RSX/CgBinaryFragmentProgram.cpp +++ b/rpcs3/Emu/RSX/CgBinaryFragmentProgram.cpp @@ -7,7 +7,7 @@ void CgBinaryDisasm::AddCodeAsm(const std::string& code) { - verify(HERE), (m_opcode < 70); + ensure((m_opcode < 70)); std::string op_name; if (dst.dest_reg == 63) @@ -221,7 +221,7 @@ template std::string CgBinaryDisasm::GetSrcDisAsm(T src) { ret += swizzle; } - + if (src.neg) ret = "-" + ret; if (src.abs) ret = "|" + ret + "|"; @@ -232,7 +232,7 @@ void CgBinaryDisasm::TaskFP() { m_size = 0; u32* data = reinterpret_cast(&m_buffer[m_offset]); - verify(HERE), ((m_buffer_size - m_offset) % sizeof(u32) == 0); + ensure(((m_buffer_size - m_offset) % sizeof(u32) == 0)); for (u32 i = 0; i < (m_buffer_size - m_offset) / sizeof(u32); i++) { // Get BE data @@ -481,7 +481,7 @@ void CgBinaryDisasm::TaskFP() break; } - verify(HERE), m_step % sizeof(u32) == 0; + ensure(m_step % sizeof(u32) == 0); data += m_step / sizeof(u32); } } diff --git a/rpcs3/Emu/RSX/CgBinaryProgram.h b/rpcs3/Emu/RSX/CgBinaryProgram.h index 33637e6c27..e61172e7bd 100644 --- a/rpcs3/Emu/RSX/CgBinaryProgram.h +++ b/rpcs3/Emu/RSX/CgBinaryProgram.h @@ -348,7 +348,7 @@ public: m_offset = prog.ucode; u32* vdata = reinterpret_cast(&m_buffer[m_offset]); - verify(HERE), (m_buffer_size - m_offset) % sizeof(u32) == 0; + ensure((m_buffer_size - m_offset) % sizeof(u32) == 0); for (u32 i = 0; i < (m_buffer_size - m_offset) / sizeof(u32); i++) { vdata[i] = std::bit_cast>(vdata[i]); diff --git a/rpcs3/Emu/RSX/CgBinaryVertexProgram.cpp b/rpcs3/Emu/RSX/CgBinaryVertexProgram.cpp index 3e7dd6c8d6..85328bfeec 100644 --- a/rpcs3/Emu/RSX/CgBinaryVertexProgram.cpp +++ b/rpcs3/Emu/RSX/CgBinaryVertexProgram.cpp @@ -6,13 +6,13 @@ void CgBinaryDisasm::AddScaCodeDisasm(const std::string& code) { - verify(HERE), (m_sca_opcode < 21); + ensure((m_sca_opcode < 21)); m_arb_shader += rsx_vp_sca_op_names[m_sca_opcode] + code + " "; } void CgBinaryDisasm::AddVecCodeDisasm(const std::string& code) { - verify(HERE), (m_vec_opcode < 26); + ensure((m_vec_opcode < 26)); m_arb_shader += rsx_vp_vec_op_names[m_vec_opcode] + code + " "; } @@ -298,7 +298,7 @@ void CgBinaryDisasm::AddCodeCondDisasm(const std::string& dst, const std::string { swizzle.clear(); } - + std::string cond = fmt::format("%s%s", cond_string_table[d0.cond], swizzle.c_str()); AddCodeDisasm(dst + "(" + cond + ") " + ", " + src + ";"); } diff --git a/rpcs3/Emu/RSX/Common/BufferUtils.cpp b/rpcs3/Emu/RSX/Common/BufferUtils.cpp index 8f182877cb..a30fb5fcd7 100644 --- a/rpcs3/Emu/RSX/Common/BufferUtils.cpp +++ b/rpcs3/Emu/RSX/Common/BufferUtils.cpp @@ -581,7 +581,7 @@ namespace void write_vertex_array_data_to_buffer(gsl::span raw_dst_span, gsl::span src_ptr, u32 count, rsx::vertex_base_type type, u32 vector_element_count, u32 attribute_src_stride, u8 dst_stride, bool swap_endianness) { - verify(HERE), (vector_element_count > 0); + ensure((vector_element_count > 0)); const u32 src_read_stride = rsx::get_vertex_type_size_on_host(type, vector_element_count); bool use_stream_no_stride = false; @@ -1042,7 +1042,7 @@ namespace T min_index = invalid_index; T max_index = 0; - verify(HERE), (dst.size() >= 3 * (src.size() - 2)); + ensure((dst.size() >= 3 * (src.size() - 2))); u32 dst_idx = 0; u32 src_idx = 0; @@ -1093,7 +1093,7 @@ namespace T min_index = index_limit(); T max_index = 0; - verify(HERE), (4 * dst.size_bytes() >= 6 * src.size_bytes()); + ensure((4 * dst.size_bytes() >= 6 * src.size_bytes())); u32 dst_idx = 0; u8 set_size = 0; diff --git a/rpcs3/Emu/RSX/Common/FragmentProgramDecompiler.cpp b/rpcs3/Emu/RSX/Common/FragmentProgramDecompiler.cpp index 62a622b6cf..39d32487bd 100644 --- a/rpcs3/Emu/RSX/Common/FragmentProgramDecompiler.cpp +++ b/rpcs3/Emu/RSX/Common/FragmentProgramDecompiler.cpp @@ -126,7 +126,7 @@ void FragmentProgramDecompiler::SetDst(std::string code, u32 flags) u32 reg_index = dst.fp16 ? dst.dest_reg >> 1 : dst.dest_reg; - verify(HERE), reg_index < temp_registers.size(); + ensure(reg_index < temp_registers.size()); if (dst.opcode == RSX_FP_OPCODE_MOV && src0.reg_type == RSX_FP_REGISTER_TYPE_TEMP && @@ -174,7 +174,7 @@ std::string FragmentProgramDecompiler::GetMask() { std::string ret; ret.reserve(5); - + static constexpr std::string_view dst_mask = "xyzw"; ret += '.'; @@ -1266,7 +1266,7 @@ std::string FragmentProgramDecompiler::Decompile() if (dst.end) break; - verify(HERE), m_offset % sizeof(u32) == 0; + ensure(m_offset % sizeof(u32) == 0); data += m_offset / sizeof(u32); } diff --git a/rpcs3/Emu/RSX/Common/FragmentProgramDecompiler.h b/rpcs3/Emu/RSX/Common/FragmentProgramDecompiler.h index a88ec48852..a5e9c20091 100644 --- a/rpcs3/Emu/RSX/Common/FragmentProgramDecompiler.h +++ b/rpcs3/Emu/RSX/Common/FragmentProgramDecompiler.h @@ -66,7 +66,7 @@ struct temp_register bool requires_gather(u8 channel) const { //Data fetched from the single precision register requires merging of the two half registers - verify(HERE), channel < 4; + ensure(channel < 4); if (aliased_h0 && channel < 2) { return last_write_half[channel]; diff --git a/rpcs3/Emu/RSX/Common/ProgramStateCache.cpp b/rpcs3/Emu/RSX/Common/ProgramStateCache.cpp index b4cf28946c..e60a1f2de1 100644 --- a/rpcs3/Emu/RSX/Common/ProgramStateCache.cpp +++ b/rpcs3/Emu/RSX/Common/ProgramStateCache.cpp @@ -53,13 +53,13 @@ vertex_program_utils::vertex_program_metadata vertex_program_utils::analyse_vert while (true) { - verify(HERE), current_instruction < 512; + ensure(current_instruction < 512); if (result.instruction_mask[current_instruction]) { if (!fast_exit) { - if (!has_printed_error) + if (!has_printed_error) { // This can be harmless if a dangling RET was encountered before rsx_log.error("vp_analyser: Possible infinite loop detected"); @@ -198,7 +198,7 @@ vertex_program_utils::vertex_program_metadata vertex_program_utils::analyse_vert if (!has_branch_instruction) { - verify(HERE), instruction_range.first == entry; + ensure(instruction_range.first == entry); std::memcpy(dst_prog.data.data(), data + (instruction_range.first * 4), result.ucode_length); } else diff --git a/rpcs3/Emu/RSX/Common/ProgramStateCache.h b/rpcs3/Emu/RSX/Common/ProgramStateCache.h index 532d938362..69d866081d 100644 --- a/rpcs3/Emu/RSX/Common/ProgramStateCache.h +++ b/rpcs3/Emu/RSX/Common/ProgramStateCache.h @@ -403,7 +403,7 @@ public: if (I == m_fragment_shader_cache.end()) return; - verify(HERE), (dst_buffer.size_bytes() >= ::narrow(I->second.FragmentConstantOffsetCache.size()) * 16u); + ensure((dst_buffer.size_bytes() >= ::narrow(I->second.FragmentConstantOffsetCache.size()) * 16u)); f32* dst = dst_buffer.data(); alignas(16) f32 tmp[4]; diff --git a/rpcs3/Emu/RSX/Common/ShaderParam.h b/rpcs3/Emu/RSX/Common/ShaderParam.h index 637226f065..1f96398d6e 100644 --- a/rpcs3/Emu/RSX/Common/ShaderParam.h +++ b/rpcs3/Emu/RSX/Common/ShaderParam.h @@ -204,7 +204,7 @@ public: auto var_blocks = fmt::split(simple_var, { "." }); - verify(HERE), (!var_blocks.empty()); + ensure((!var_blocks.empty())); name = prefix + var_blocks[0]; diff --git a/rpcs3/Emu/RSX/Common/TextureUtils.cpp b/rpcs3/Emu/RSX/Common/TextureUtils.cpp index c072fd3e09..f14c68b3b1 100644 --- a/rpcs3/Emu/RSX/Common/TextureUtils.cpp +++ b/rpcs3/Emu/RSX/Common/TextureUtils.cpp @@ -930,7 +930,7 @@ namespace rsx } // Mipmap, height and width aren't allowed to be zero - return verify("Texture params" HERE, result) * (cubemap ? 6 : 1); + return (ensure(result) * (cubemap ? 6 : 1)); } size_t get_placed_texture_storage_size(const rsx::fragment_texture& texture, size_t row_pitch_alignment, size_t mipmap_alignment) diff --git a/rpcs3/Emu/RSX/Common/VertexProgramDecompiler.cpp b/rpcs3/Emu/RSX/Common/VertexProgramDecompiler.cpp index d89c462a93..9626a9a640 100644 --- a/rpcs3/Emu/RSX/Common/VertexProgramDecompiler.cpp +++ b/rpcs3/Emu/RSX/Common/VertexProgramDecompiler.cpp @@ -76,7 +76,7 @@ std::string VertexProgramDecompiler::GetDST(bool is_sca) if (!ret.empty()) { // Double assignment. Only possible for vector ops - verify(HERE), !is_sca; + ensure(!is_sca); ret += " = "; } @@ -507,7 +507,7 @@ std::string VertexProgramDecompiler::Decompile() if (m_prog.entry != m_prog.base_address) { jump_position = find_jump_lvl(m_prog.entry - m_prog.base_address); - verify(HERE), jump_position != UINT32_MAX; + ensure(jump_position != UINT32_MAX); } AddCode(fmt::format("int jump_position = %u;", jump_position)); diff --git a/rpcs3/Emu/RSX/Common/surface_store.h b/rpcs3/Emu/RSX/Common/surface_store.h index 2cbb00a7c0..1e9a2c0296 100644 --- a/rpcs3/Emu/RSX/Common/surface_store.h +++ b/rpcs3/Emu/RSX/Common/surface_store.h @@ -146,7 +146,7 @@ namespace rsx } } - verify(HERE), region.target == Traits::get(sink); + ensure(region.target == Traits::get(sink)); orphaned_surfaces.push_back(region.target); data[new_address] = std::move(sink); }; @@ -169,7 +169,7 @@ namespace rsx } // One-time data validity test - verify(HERE), prev_surface; + ensure(prev_surface); if (prev_surface->read_barrier(cmd); !prev_surface->test()) { return; @@ -360,7 +360,7 @@ namespace rsx if (ignore) continue; this_address = surface->base_addr; - verify(HERE), this_address; + ensure(this_address); } const auto parent_region = surface->get_normalized_memory_area(); @@ -405,7 +405,9 @@ namespace rsx auto &storage = surface->is_depth_surface() ? m_depth_stencil_storage : m_render_targets_storage; auto &object = storage[e.first]; - verify(HERE), !src_offset.x, !src_offset.y, object; + ensure(!src_offset.x); + ensure(!src_offset.y); + ensure(object); if (!surface->old_contents.empty()) [[unlikely]] { surface->read_barrier(cmd); @@ -531,7 +533,7 @@ namespace rsx if (!new_surface) { - verify(HERE), store; + ensure(store); new_surface_storage = Traits::create_new_surface(address, format, width, height, pitch, antialias, std::forward(extra_params)...); new_surface = Traits::get(new_surface_storage); allocate_rsx_memory(new_surface); @@ -590,7 +592,8 @@ namespace rsx (*primary_storage)[address] = std::move(new_surface_storage); } - verify(HERE), !old_surface_storage, new_surface->get_spp() == get_format_sample_count(antialias); + ensure(!old_surface_storage); + ensure(new_surface->get_spp() == get_format_sample_count(antialias)); return new_surface; } @@ -602,7 +605,7 @@ namespace rsx void free_rsx_memory(surface_type surface) { - verify("Surface memory double free" HERE), surface->has_refs(); + ensure(surface->has_refs()); // "Surface memory double free" if (const auto memory_size = surface->get_memory_range().length(); m_active_memory_used >= memory_size) [[likely]] @@ -976,7 +979,7 @@ namespace rsx if (write_tag == cache_tag && m_skip_write_updates) { // Nothing to do - verify(HERE), !m_invalidate_on_write; + ensure(!m_invalidate_on_write); return; } @@ -1051,7 +1054,7 @@ namespace rsx free_resource_list(m_render_targets_storage); free_resource_list(m_depth_stencil_storage); - verify(HERE), m_active_memory_used == 0; + ensure(m_active_memory_used == 0); m_bound_depth_stencil = std::make_pair(0, nullptr); m_bound_render_targets_config = { 0, 0 }; diff --git a/rpcs3/Emu/RSX/Common/surface_utils.h b/rpcs3/Emu/RSX/Common/surface_utils.h index 5d449d9a82..79e2dbc2b8 100644 --- a/rpcs3/Emu/RSX/Common/surface_utils.h +++ b/rpcs3/Emu/RSX/Common/surface_utils.h @@ -106,13 +106,13 @@ namespace rsx areai src_rect() const { - verify(HERE), width; + ensure(width); return { src_x, src_y, src_x + width, src_y + height }; } areai dst_rect() const { - verify(HERE), width; + ensure(width); return { dst_x, dst_y, dst_x + u16(width * transfer_scale_x + 0.5f), dst_y + u16(height * transfer_scale_y + 0.5f) }; } }; @@ -349,7 +349,8 @@ namespace rsx #else void queue_tag(u32 address) { - verify(HERE), native_pitch, rsx_pitch; + ensure(native_pitch); + ensure(rsx_pitch); base_addr = address; @@ -444,7 +445,7 @@ namespace rsx template void set_old_contents(T* other) { - verify(HERE), old_contents.empty(); + ensure(old_contents.empty()); if (!other || other->get_rsx_pitch() != this->get_rsx_pitch()) { @@ -460,7 +461,8 @@ namespace rsx void set_old_contents_region(const T& region, bool normalized) { // NOTE: This method will not perform pitch verification! - verify(HERE), region.source, region.source != static_cast(this); + ensure(region.source); + ensure(region.source != static_cast(this)); old_contents.push_back(region.template cast()); auto &slice = old_contents.back(); @@ -621,7 +623,7 @@ namespace rsx if (spp == 1 || sample_layout == rsx::surface_sample_layout::ps3) return; - verify(HERE), access_type != rsx::surface_access::write; + ensure(access_type != rsx::surface_access::write); transform_samples_to_pixels(region); } }; diff --git a/rpcs3/Emu/RSX/Common/texture_cache.h b/rpcs3/Emu/RSX/Common/texture_cache.h index 043baa3b15..589f03f0e4 100644 --- a/rpcs3/Emu/RSX/Common/texture_cache.h +++ b/rpcs3/Emu/RSX/Common/texture_cache.h @@ -519,7 +519,7 @@ namespace rsx { for (auto* section : _set) { - verify(HERE), section->is_flushed() || section->is_dirty(); + ensure(section->is_flushed() || section->is_dirty()); section->discard(/*set_dirty*/ false); } @@ -708,7 +708,7 @@ namespace rsx { if (section1 == section2) count++; } - verify(HERE), count == 1; + ensure(count == 1); } #endif //TEXTURE_CACHE_DEBUG @@ -739,7 +739,7 @@ namespace rsx // Fast code-path for keeping the fault range protection when not flushing anything if (cause.keep_fault_range_protection() && cause.skip_flush() && !trampled_set.sections.empty()) { - verify(HERE), cause != invalidation_cause::committed_as_fbo; + ensure(cause != invalidation_cause::committed_as_fbo); // We discard all sections fully inside fault_range for (auto &obj : trampled_set.sections) @@ -1172,7 +1172,7 @@ namespace rsx auto* region_ptr = find_cached_texture(rsx_range, RSX_GCM_FORMAT_IGNORED, false, false); if (region_ptr && region_ptr->is_locked() && region_ptr->get_context() == texture_upload_context::framebuffer_storage) { - verify(HERE), region_ptr->get_protection() == utils::protection::no; + ensure(region_ptr->get_protection() == utils::protection::no); region_ptr->discard(false); } } @@ -1198,9 +1198,9 @@ namespace rsx if (!region.is_dirty()) { if (flags == memory_read_flags::flush_once) - verify(HERE), m_flush_always_cache.find(memory_range) == m_flush_always_cache.end(); + ensure(m_flush_always_cache.find(memory_range) == m_flush_always_cache.end()); else - verify(HERE), m_flush_always_cache[memory_range] == ®ion; + ensure(m_flush_always_cache[memory_range] == ®ion); } #endif // TEXTURE_CACHE_DEBUG return; @@ -1215,9 +1215,9 @@ namespace rsx #ifdef TEXTURE_CACHE_DEBUG const auto &memory_range = section.get_section_range(); if (flags == memory_read_flags::flush_once) - verify(HERE), m_flush_always_cache[memory_range] == §ion; + ensure(m_flush_always_cache[memory_range] == §ion); else - verify(HERE), m_flush_always_cache.find(memory_range) == m_flush_always_cache.end(); + ensure(m_flush_always_cache.find(memory_range) == m_flush_always_cache.end()); #endif update_flush_always_cache(section, flags == memory_read_flags::flush_always); } @@ -2169,7 +2169,7 @@ namespace rsx surf->get_surface_height(rsx::surface_metrics::pixels) != surf->height()) { // Must go through a scaling operation due to resolution scaling being present - verify(HERE), g_cfg.video.resolution_scale_percent != 100; + ensure(g_cfg.video.resolution_scale_percent != 100); use_null_region = false; } } @@ -2410,7 +2410,7 @@ namespace rsx } else { - verify(HERE), src_is_render_target; + ensure(src_is_render_target); src_is_depth = (typeless_info.src_is_typeless) ? false : src_subres.is_depth; } } @@ -2611,7 +2611,7 @@ namespace rsx if (!cached_dest && !dst_is_render_target) { - verify(HERE), !dest_texture; + ensure(!dest_texture); // Need to calculate the minimum required size that will fit the data, anchored on the rsx_address // If the application starts off with an 'inseted' section, the guessed dimensions may not fit! @@ -2698,7 +2698,7 @@ namespace rsx } } - verify(HERE), cached_dest || dst_is_render_target; + ensure(cached_dest || dst_is_render_target); // Invalidate any cached subresources in modified range notify_surface_changed(dst_range); @@ -2710,7 +2710,7 @@ namespace rsx { // Validate modified range u32 mem_offset = dst_address - cached_dest->get_section_base(); - verify(HERE), (mem_offset + dst_payload_length) <= cached_dest->get_section_size(); + ensure((mem_offset + dst_payload_length) <= cached_dest->get_section_size()); lock.upgrade(); @@ -2749,7 +2749,7 @@ namespace rsx else { // Unlikely situation, but the only one which would allow re-upload from CPU to overlap this section. - verify(HERE), !found->is_flushable(); + ensure(!found->is_flushable()); found->discard(true); } } @@ -2844,7 +2844,7 @@ namespace rsx auto& section = *(It.second); if (section.get_protection() != utils::protection::no) { - verify(HERE), section.exists(); + ensure(section.exists()); AUDIT(section.get_context() == texture_upload_context::framebuffer_storage); AUDIT(section.get_memory_read_flags() == memory_read_flags::flush_always); diff --git a/rpcs3/Emu/RSX/Common/texture_cache_checker.h b/rpcs3/Emu/RSX/Common/texture_cache_checker.h index f4ffd13f15..0f75666648 100644 --- a/rpcs3/Emu/RSX/Common/texture_cache_checker.h +++ b/rpcs3/Emu/RSX/Common/texture_cache_checker.h @@ -71,7 +71,7 @@ namespace rsx { // Initialized to utils::protection::rw static constexpr size_t num_pages = 0x1'0000'0000 / 4096; per_page_info_t _info[num_pages]{0}; - + static_assert(static_cast(utils::protection::rw) == 0, "utils::protection::rw must have value 0 for the above constructor to work"); static constexpr size_t rsx_address_to_index(u32 address) diff --git a/rpcs3/Emu/RSX/Common/texture_cache_helpers.h b/rpcs3/Emu/RSX/Common/texture_cache_helpers.h index 9b8d3cb69e..b7144d55fb 100644 --- a/rpcs3/Emu/RSX/Common/texture_cache_helpers.h +++ b/rpcs3/Emu/RSX/Common/texture_cache_helpers.h @@ -298,10 +298,10 @@ namespace rsx src_y += delta; dst_y += delta; - verify(HERE), dst_y == slice_begin; + ensure(dst_y == slice_begin); } - verify(HERE), dst_y >= slice_begin; + ensure(dst_y >= slice_begin); const auto h = std::min(section_end, slice_end) - dst_y; dst_y = (dst_y - slice_begin); @@ -538,7 +538,7 @@ namespace rsx } // Always make sure the conflict is resolved! - verify(HERE), is_gcm_depth_format(attr2.gcm_format) == is_depth; + ensure(is_gcm_depth_format(attr2.gcm_format) == is_depth); } if (extended_dimension == rsx::texture_dimension_extended::texture_dimension_2d || @@ -546,7 +546,7 @@ namespace rsx { if (extended_dimension == rsx::texture_dimension_extended::texture_dimension_1d) { - verify(HERE), attr.height == 1; + ensure(attr.height == 1); } if ((surface_is_rop_target && g_cfg.video.strict_rendering_mode) || @@ -574,7 +574,7 @@ namespace rsx rsx::texture_dimension_extended::texture_dimension_3d, decoded_remap }; } - verify(HERE), extended_dimension == rsx::texture_dimension_extended::texture_dimension_cubemap; + ensure(extended_dimension == rsx::texture_dimension_extended::texture_dimension_cubemap); return{ texptr->get_surface(rsx::surface_access::read), deferred_request_command::cubemap_unwrap, attr2, {}, @@ -591,7 +591,7 @@ namespace rsx u32 encoded_remap, const texture_channel_remap_t& decoded_remap, int select_hint = -1) { - verify(HERE), (select_hint & 0x1) == select_hint; + ensure((select_hint & 0x1) == select_hint); bool is_depth = (select_hint == 0) ? fbos.back().is_depth : local.back()->is_depth_texture(); bool aspect_mismatch = false; @@ -679,7 +679,7 @@ namespace rsx if (extended_dimension == rsx::texture_dimension_extended::texture_dimension_1d) { - verify(HERE), attr.height == 1; + ensure(attr.height == 1); } if (!fbos.empty()) diff --git a/rpcs3/Emu/RSX/Common/texture_cache_utils.h b/rpcs3/Emu/RSX/Common/texture_cache_utils.h index 40be9c6006..7ec918f557 100644 --- a/rpcs3/Emu/RSX/Common/texture_cache_utils.h +++ b/rpcs3/Emu/RSX/Common/texture_cache_utils.h @@ -351,7 +351,7 @@ namespace rsx void initialize(u32 _index, ranged_storage_type *storage) { - verify(HERE), m_storage == nullptr && storage != nullptr; + ensure(m_storage == nullptr && storage != nullptr); AUDIT(index < num_blocks); m_storage = storage; @@ -1052,7 +1052,7 @@ namespace rsx void initialize(ranged_storage_block_type *block) { - verify(HERE), m_block == nullptr && m_tex_cache == nullptr && m_storage == nullptr; + ensure(m_block == nullptr && m_tex_cache == nullptr && m_storage == nullptr); m_block = block; m_storage = &block->get_storage(); m_tex_cache = &block->get_texture_cache(); diff --git a/rpcs3/Emu/RSX/GL/GLGSRender.cpp b/rpcs3/Emu/RSX/GL/GLGSRender.cpp index f7b53c8320..1cc9a305b1 100644 --- a/rpcs3/Emu/RSX/GL/GLGSRender.cpp +++ b/rpcs3/Emu/RSX/GL/GLGSRender.cpp @@ -52,7 +52,7 @@ void GLGSRender::set_scissor(bool clip_viewport) void GLGSRender::on_init_thread() { - verify(HERE), m_frame; + ensure(m_frame); // NOTES: All contexts have to be created before any is bound to a thread // This allows context sharing to work (both GLRCs passed to wglShareLists have to be idle or you get ERROR_BUSY) @@ -552,7 +552,7 @@ void GLGSRender::clear_surface(u32 arg) if ((arg & 0x3) != 0x3 && !require_mem_load && ds->dirty()) { - verify(HERE), mask; + ensure(mask); // Only one aspect was cleared. Make sure to memory initialize the other before removing dirty flag if (arg == 1) @@ -651,7 +651,7 @@ bool GLGSRender::load_program() if (m_graphics_state & rsx::pipeline_state::invalidate_pipeline_bits) { get_current_fragment_program(fs_sampler_state); - verify(HERE), current_fragment_program.valid; + ensure(current_fragment_program.valid); get_current_vertex_program(vs_sampler_state); @@ -701,7 +701,7 @@ bool GLGSRender::load_program() } else { - verify(HERE), m_program; + ensure(m_program); m_program->sync(); } } @@ -1061,7 +1061,7 @@ void GLGSRender::begin_occlusion_query(rsx::reports::occlusion_query_info* query void GLGSRender::end_occlusion_query(rsx::reports::occlusion_query_info* query) { - verify(HERE), query->active; + ensure(query->active); glEndQuery(GL_ANY_SAMPLES_PASSED); } diff --git a/rpcs3/Emu/RSX/GL/GLHelpers.cpp b/rpcs3/Emu/RSX/GL/GLHelpers.cpp index 1700ff7785..46eb8c8892 100644 --- a/rpcs3/Emu/RSX/GL/GLHelpers.cpp +++ b/rpcs3/Emu/RSX/GL/GLHelpers.cpp @@ -553,7 +553,7 @@ namespace gl } } - verify("Incompatible source and destination format!" HERE), real_src->aspect() == real_dst->aspect(); + ensure(real_src->aspect() == real_dst->aspect()); const bool is_depth_copy = (real_src->aspect() != image_aspect::color); const filter interp = (linear_interpolation && !is_depth_copy) ? filter::linear : filter::nearest; diff --git a/rpcs3/Emu/RSX/GL/GLHelpers.h b/rpcs3/Emu/RSX/GL/GLHelpers.h index 0d1a36eee0..cb5e1098bf 100644 --- a/rpcs3/Emu/RSX/GL/GLHelpers.h +++ b/rpcs3/Emu/RSX/GL/GLHelpers.h @@ -115,7 +115,7 @@ namespace gl bool check_signaled() const { - verify(HERE), m_value != nullptr; + ensure(m_value); if (signaled) return true; @@ -145,7 +145,7 @@ namespace gl bool wait_for_signal() { - verify(HERE), m_value != nullptr; + ensure(m_value); if (signaled == GL_FALSE) { @@ -195,7 +195,7 @@ namespace gl void server_wait_sync() const { - verify(HERE), m_value != nullptr; + ensure(m_value != nullptr); glWaitSync(m_value, 0, GL_TIMEOUT_IGNORED); } }; @@ -721,7 +721,7 @@ namespace gl void data(GLsizeiptr size, const void* data_ = nullptr, GLenum usage = GL_STREAM_DRAW) { - verify(HERE), m_memory_type != memory_type::local; + ensure(m_memory_type != memory_type::local); target target_ = current_target(); save_binding_state save(target_, *this); @@ -731,7 +731,7 @@ namespace gl GLubyte* map(access access_) { - verify(HERE), m_memory_type == memory_type::host_visible; + ensure(m_memory_type == memory_type::host_visible); bind(current_target()); return reinterpret_cast(glMapBuffer(static_cast(current_target()), static_cast(access_))); @@ -739,7 +739,7 @@ namespace gl void unmap() { - verify(HERE), m_memory_type == memory_type::host_visible; + ensure(m_memory_type == memory_type::host_visible); glUnmapBuffer(static_cast(current_target())); } @@ -794,7 +794,7 @@ namespace gl glBufferStorage(static_cast(m_target), size, data, buffer_storage_flags); m_memory_mapping = glMapBufferRange(static_cast(m_target), 0, size, GL_MAP_WRITE_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT); - verify(HERE), m_memory_mapping != nullptr; + ensure(m_memory_mapping != nullptr); m_data_loc = 0; m_size = ::narrow(size); } @@ -894,7 +894,7 @@ namespace gl void reserve_storage_on_heap(u32 alloc_size) override { - verify (HERE), m_memory_mapping == nullptr; + ensure(m_memory_mapping == nullptr); u32 offset = m_data_loc; if (m_data_loc) offset = align(offset, 256); @@ -927,7 +927,7 @@ namespace gl m_alignment_offset = ::narrow(diff_bytes); } - verify(HERE), m_mapped_bytes >= alloc_size; + ensure(m_mapped_bytes >= alloc_size); } std::pair alloc_from_heap(u32 alloc_size, u16 alignment) override @@ -994,7 +994,7 @@ namespace gl void update(buffer *_buffer, u32 offset, u32 range, GLenum format = GL_R8UI) { - verify(HERE), _buffer->size() >= (offset + range); + ensure(_buffer->size() >= (offset + range)); m_buffer = _buffer; m_offset = offset; m_range = range; @@ -1777,7 +1777,7 @@ namespace gl if (aspect_flags & image_aspect::stencil) { constexpr u32 depth_stencil_mask = (image_aspect::depth | image_aspect::stencil); - verify("Invalid aspect mask combination" HERE), (aspect_flags & depth_stencil_mask) != depth_stencil_mask; + ensure((aspect_flags & depth_stencil_mask) != depth_stencil_mask); // "Invalid aspect mask combination" glBindTexture(m_target, m_id); glTexParameteri(m_target, GL_DEPTH_STENCIL_TEXTURE_MODE, GL_STENCIL_INDEX); @@ -1880,7 +1880,7 @@ public: } } - verify(HERE), aspect() & aspect_flags; + ensure(aspect() & aspect_flags); auto mapping = apply_swizzle_remap(get_native_component_layout(), remap); auto view = std::make_unique(this, mapping.data(), aspect_flags); auto result = view.get(); @@ -2110,7 +2110,7 @@ public: { save_binding_state save(m_parent); - verify(HERE), rhs.get_target() == texture::target::texture2D; + ensure(rhs.get_target() == texture::target::texture2D); m_parent.m_resource_bindings[m_id] = rhs.id(); glFramebufferTexture2D(GL_FRAMEBUFFER, m_id, GL_TEXTURE_2D, rhs.id(), 0); } @@ -2315,7 +2315,7 @@ public: return *this; } - verify(HERE), !m_init_fence.is_empty(); // Do not attempt to compile a shader_view!! + ensure(!m_init_fence.is_empty()); // Do not attempt to compile a shader_view!! m_init_fence.server_wait_sync(); glCompileShader(m_id); diff --git a/rpcs3/Emu/RSX/GL/GLPipelineCompiler.cpp b/rpcs3/Emu/RSX/GL/GLPipelineCompiler.cpp index b70803dbe3..c4b21b3254 100644 --- a/rpcs3/Emu/RSX/GL/GLPipelineCompiler.cpp +++ b/rpcs3/Emu/RSX/GL/GLPipelineCompiler.cpp @@ -116,7 +116,7 @@ namespace gl } } - verify(HERE), num_worker_threads >= 1; + ensure(num_worker_threads >= 1); // Create the thread pool g_pipe_compilers = std::make_unique>("RSX.W", num_worker_threads); @@ -136,7 +136,7 @@ namespace gl pipe_compiler* get_pipe_compiler() { - verify(HERE), g_pipe_compilers; + ensure(g_pipe_compilers); int thread_index = g_compiler_index++; return g_pipe_compilers.get()->begin() + (thread_index % g_num_pipe_compilers); diff --git a/rpcs3/Emu/RSX/GL/GLRenderTargets.cpp b/rpcs3/Emu/RSX/GL/GLRenderTargets.cpp index 23052ed1b9..2516115dde 100644 --- a/rpcs3/Emu/RSX/GL/GLRenderTargets.cpp +++ b/rpcs3/Emu/RSX/GL/GLRenderTargets.cpp @@ -191,7 +191,7 @@ void GLGSRender::init_buffers(rsx::framebuffer_creation_context context, bool sk auto rtt = std::get<1>(m_rtts.m_bound_render_targets[i]); color_targets[i] = rtt->id(); - verify("Pitch mismatch!" HERE), rtt->get_rsx_pitch() == m_framebuffer_layout.actual_color_pitch[i]; + ensure(rtt->get_rsx_pitch() == m_framebuffer_layout.actual_color_pitch[i]); // "Pitch mismatch!" m_surface_info[i].address = m_framebuffer_layout.color_addresses[i]; m_surface_info[i].pitch = m_framebuffer_layout.actual_color_pitch[i]; m_surface_info[i].width = m_framebuffer_layout.width; @@ -220,7 +220,7 @@ void GLGSRender::init_buffers(rsx::framebuffer_creation_context context, bool sk auto ds = std::get<1>(m_rtts.m_bound_depth_stencil); depth_stencil_target = ds->id(); - verify("Pitch mismatch!" HERE), std::get<1>(m_rtts.m_bound_depth_stencil)->get_rsx_pitch() == m_framebuffer_layout.actual_zeta_pitch; + ensure(std::get<1>(m_rtts.m_bound_depth_stencil)->get_rsx_pitch() == m_framebuffer_layout.actual_zeta_pitch); // "Pitch mismatch!" m_depth_surface_info.address = m_framebuffer_layout.zeta_address; m_depth_surface_info.pitch = m_framebuffer_layout.actual_zeta_pitch; @@ -528,7 +528,7 @@ void gl::render_target::memory_barrier(gl::command_context& cmd, rsx::surface_ac if (get_internal_format() == src_texture->get_internal_format()) { // Copy data from old contents onto this one - verify(HERE), src_bpp == dst_bpp; + ensure(src_bpp == dst_bpp); } else { diff --git a/rpcs3/Emu/RSX/GL/GLRenderTargets.h b/rpcs3/Emu/RSX/GL/GLRenderTargets.h index 96b3875c4a..86dd44d360 100644 --- a/rpcs3/Emu/RSX/GL/GLRenderTargets.h +++ b/rpcs3/Emu/RSX/GL/GLRenderTargets.h @@ -124,7 +124,7 @@ namespace gl static inline gl::render_target* as_rtt(gl::texture* t) { - return verify(HERE, dynamic_cast(t)); + return ensure(dynamic_cast(t)); } } diff --git a/rpcs3/Emu/RSX/GL/GLShaderInterpreter.cpp b/rpcs3/Emu/RSX/GL/GLShaderInterpreter.cpp index 946627bdaa..a0faa4e203 100644 --- a/rpcs3/Emu/RSX/GL/GLShaderInterpreter.cpp +++ b/rpcs3/Emu/RSX/GL/GLShaderInterpreter.cpp @@ -375,7 +375,7 @@ namespace gl if (reference_mask & (1 << i)) { auto sampler_state = static_cast(descriptors[i].get()); - verify(HERE), sampler_state; + ensure(sampler_state); int pool_id = static_cast(sampler_state->image_type); auto& pool = allocator.pools[pool_id]; diff --git a/rpcs3/Emu/RSX/GL/GLTextOut.h b/rpcs3/Emu/RSX/GL/GLTextOut.h index 77a848677b..d98cbe3e2e 100644 --- a/rpcs3/Emu/RSX/GL/GLTextOut.h +++ b/rpcs3/Emu/RSX/GL/GLTextOut.h @@ -122,7 +122,7 @@ namespace gl { if (!enabled) return; - verify(HERE), initialized; + ensure(initialized); std::vector offsets; std::vector counts; diff --git a/rpcs3/Emu/RSX/GL/GLTexture.cpp b/rpcs3/Emu/RSX/GL/GLTexture.cpp index 2135282085..2812c94237 100644 --- a/rpcs3/Emu/RSX/GL/GLTexture.cpp +++ b/rpcs3/Emu/RSX/GL/GLTexture.cpp @@ -503,7 +503,7 @@ namespace gl } else if (pack_info.type == GL_FLOAT) { - verify(HERE), mem_info->image_size_in_bytes == (mem_info->image_size_in_texels * 4); + ensure(mem_info->image_size_in_bytes == (mem_info->image_size_in_texels * 4)); mem_info->memory_required = (mem_info->image_size_in_texels * 6); initialize_scratch_mem(); @@ -513,7 +513,7 @@ namespace gl } else if (pack_info.type == GL_FLOAT_32_UNSIGNED_INT_24_8_REV) { - verify(HERE), mem_info->image_size_in_bytes == (mem_info->image_size_in_texels * 8); + ensure(mem_info->image_size_in_bytes == (mem_info->image_size_in_texels * 8)); mem_info->memory_required = (mem_info->image_size_in_texels * 12); initialize_scratch_mem(); diff --git a/rpcs3/Emu/RSX/GL/GLTextureCache.h b/rpcs3/Emu/RSX/GL/GLTextureCache.h index 812d387dbb..e3e1a09b62 100644 --- a/rpcs3/Emu/RSX/GL/GLTextureCache.h +++ b/rpcs3/Emu/RSX/GL/GLTextureCache.h @@ -104,7 +104,7 @@ namespace gl synchronized = false; sync_timestamp = 0ull; - verify(HERE), rsx_pitch; + ensure(rsx_pitch); this->rsx_pitch = rsx_pitch; this->width = w; @@ -327,7 +327,7 @@ namespace gl m_fence.wait_for_signal(); - verify(HERE), (offset + size) <= pbo.size(); + ensure(offset + GLsizeiptr{size} <= pbo.size()); pbo.bind(buffer::target::pixel_pack); return glMapBufferRange(GL_PIXEL_PACK_BUFFER, offset, size, GL_MAP_READ_BIT); @@ -352,15 +352,15 @@ namespace gl case gl::texture::type::ubyte: { // byte swapping does not work on byte types, use uint_8_8_8_8 for rgba8 instead to avoid penalty - verify(HERE), !pack_unpack_swap_bytes; + ensure(!pack_unpack_swap_bytes); break; } case gl::texture::type::uint_24_8: { // Swap bytes on D24S8 does not swap the whole dword, just shuffles the 3 bytes for D24 // In this regard, D24S8 is the same structure on both PC and PS3, but the endianness of the whole block is reversed on PS3 - verify(HERE), pack_unpack_swap_bytes == false; - verify(HERE), real_pitch == (width * 4); + ensure(pack_unpack_swap_bytes == false); + ensure(real_pitch == (width * 4)); if (rsx_pitch == real_pitch) [[likely]] { stream_data_to_memory_swapped_u32(dst, dst, valid_length / 4, 4); @@ -708,7 +708,7 @@ namespace gl } else { - verify(HERE), dst_image->get_target() == gl::texture::target::texture2D; + ensure(dst_image->get_target() == gl::texture::target::texture2D); auto _blitter = gl::g_hw_blitter; const areai src_rect = { src_x, src_y, src_x + src_w, src_y + src_h }; @@ -958,7 +958,7 @@ namespace gl const auto swizzle = get_component_mapping(gcm_format, flags); auto image = static_cast(section.get_raw_texture()); - verify(HERE), image != nullptr; + ensure(image); image->set_native_component_layout(swizzle); section.set_view_flags(flags); diff --git a/rpcs3/Emu/RSX/GL/GLVertexBuffers.cpp b/rpcs3/Emu/RSX/GL/GLVertexBuffers.cpp index 371e7f1bfd..c12f7bc6dc 100644 --- a/rpcs3/Emu/RSX/GL/GLVertexBuffers.cpp +++ b/rpcs3/Emu/RSX/GL/GLVertexBuffers.cpp @@ -23,7 +23,7 @@ namespace { // This is an emulated buffer, so our indices only range from 0->original_vertex_array_length const auto element_count = get_index_count(primitive_mode, vertex_count); - verify(HERE), !gl::is_primitive_native(primitive_mode); + ensure(!gl::is_primitive_native(primitive_mode)); auto mapping = dst.alloc_from_heap(element_count * sizeof(u16), 256); auto mapped_buffer = static_cast(mapping.first); @@ -199,7 +199,7 @@ gl::vertex_upload_info GLGSRender::set_vertex_buffer() if (auto cached = m_vertex_cache->find_vertex_range(storage_address, GL_R8UI, required.first)) { - verify(HERE), cached->local_address == storage_address; + ensure(cached->local_address == storage_address); in_cache = true; upload_info.persistent_mapping_offset = cached->offset_in_heap; @@ -224,7 +224,7 @@ gl::vertex_upload_info GLGSRender::set_vertex_buffer() if (!m_persistent_stream_view.in_range(upload_info.persistent_mapping_offset, required.first, upload_info.persistent_mapping_offset)) { - verify(HERE), m_max_texbuffer_size < m_attrib_ring_buffer->size(); + ensure(m_max_texbuffer_size < m_attrib_ring_buffer->size()); const size_t view_size = ((upload_info.persistent_mapping_offset + m_max_texbuffer_size) > m_attrib_ring_buffer->size()) ? (m_attrib_ring_buffer->size() - upload_info.persistent_mapping_offset) : m_max_texbuffer_size; @@ -241,7 +241,7 @@ gl::vertex_upload_info GLGSRender::set_vertex_buffer() if (!m_volatile_stream_view.in_range(upload_info.volatile_mapping_offset, required.second, upload_info.volatile_mapping_offset)) { - verify(HERE), m_max_texbuffer_size < m_attrib_ring_buffer->size(); + ensure(m_max_texbuffer_size < m_attrib_ring_buffer->size()); const size_t view_size = ((upload_info.volatile_mapping_offset + m_max_texbuffer_size) > m_attrib_ring_buffer->size()) ? (m_attrib_ring_buffer->size() - upload_info.volatile_mapping_offset) : m_max_texbuffer_size; diff --git a/rpcs3/Emu/RSX/Overlays/overlay_osk.cpp b/rpcs3/Emu/RSX/Overlays/overlay_osk.cpp index 7bcea2b12a..6b941d06fb 100644 --- a/rpcs3/Emu/RSX/Overlays/overlay_osk.cpp +++ b/rpcs3/Emu/RSX/Overlays/overlay_osk.cpp @@ -103,7 +103,7 @@ namespace rsx { const auto row = (index / num_columns); const auto col = (index % num_columns); - verify(HERE), row < num_rows && col < num_columns; + ensure(row < num_rows && col < num_columns); auto& _cell = m_grid[index++]; _cell.button_flag = props.type_flags; @@ -185,11 +185,11 @@ namespace rsx } } - verify(HERE), num_shift_layers_by_charset.size(); + ensure(num_shift_layers_by_charset.size()); for (u32 layer = 0; layer < num_shift_layers_by_charset.size(); ++layer) { - verify(HERE), num_shift_layers_by_charset[layer]; + ensure(num_shift_layers_by_charset[layer]); } // Reset to first shift layer in the first charset, because the panel changed and we don't know if the layers are similar between panels. @@ -346,7 +346,7 @@ namespace rsx while (true) { const auto current_index = (start_index + count); - verify(HERE), current_index <= index_limit; + ensure(current_index <= index_limit); if (m_grid[current_index].flags & border_flags::right) { diff --git a/rpcs3/Emu/RSX/RSXFIFO.cpp b/rpcs3/Emu/RSX/RSXFIFO.cpp index 8a627d53bc..aae1a614b6 100644 --- a/rpcs3/Emu/RSX/RSXFIFO.cpp +++ b/rpcs3/Emu/RSX/RSXFIFO.cpp @@ -183,7 +183,7 @@ namespace rsx return; } - verify(HERE), !m_remaining_commands; + ensure(!m_remaining_commands); const u32 count = (m_cmd >> 18) & 0x7ff; if (!count) @@ -281,12 +281,13 @@ namespace rsx else { // Not enabled, check if we should try enabling - verify(HERE), total_draw_count > 2000; + ensure(total_draw_count > 2000); if (fifo_hint != load_unoptimizable) { // If its set to unoptimizable, we already tried and it did not work // If it resets to load low (usually after some kind of loading screen) we can try again - verify("Incorrect initial state" HERE), begin_end_ctr == 0, num_collapsed == 0; + ensure(begin_end_ctr == 0); // "Incorrect initial state" + ensure(num_collapsed == 0); enabled = true; } } @@ -589,13 +590,13 @@ namespace rsx case FIFO::EMIT_END: { // Emit end command to close existing scope - //verify(HERE), in_begin_end; + //ensure(in_begin_end); methods[NV4097_SET_BEGIN_END](this, NV4097_SET_BEGIN_END, 0); break; } case FIFO::EMIT_BARRIER: { - //verify(HERE), in_begin_end; + //ensure(in_begin_end); methods[NV4097_SET_BEGIN_END](this, NV4097_SET_BEGIN_END, 0); methods[NV4097_SET_BEGIN_END](this, NV4097_SET_BEGIN_END, m_flattener.get_primitive()); break; diff --git a/rpcs3/Emu/RSX/RSXFragmentProgram.h b/rpcs3/Emu/RSX/RSXFragmentProgram.h index 6197731585..9292c0cc84 100644 --- a/rpcs3/Emu/RSX/RSXFragmentProgram.h +++ b/rpcs3/Emu/RSX/RSXFragmentProgram.h @@ -317,7 +317,7 @@ struct RSXFragmentProgram void clone_data() const { - verify(HERE), ucode_length; + ensure(ucode_length); data.deep_copy(ucode_length); } }; diff --git a/rpcs3/Emu/RSX/RSXOffload.cpp b/rpcs3/Emu/RSX/RSXOffload.cpp index f9b7ede133..02688a75fe 100644 --- a/rpcs3/Emu/RSX/RSXOffload.cpp +++ b/rpcs3/Emu/RSX/RSXOffload.cpp @@ -6,7 +6,6 @@ #include "rsx_utils.h" #include -#include namespace rsx { @@ -139,7 +138,7 @@ namespace rsx // Backend callback void dma_manager::backend_ctrl(u32 request_code, void* args) { - verify(HERE), g_cfg.video.multithreaded_rsx; + ensure(g_cfg.video.multithreaded_rsx); g_fxo->get()->m_enqueued_count++; g_fxo->get()->m_work_queue.push(request_code, args); @@ -192,20 +191,20 @@ namespace rsx void dma_manager::set_mem_fault_flag() { - verify("Access denied" HERE), is_current_thread(); + ensure(is_current_thread()); // "Access denied" m_mem_fault_flag.release(true); } void dma_manager::clear_mem_fault_flag() { - verify("Access denied" HERE), is_current_thread(); + ensure(is_current_thread()); // "Access denied" m_mem_fault_flag.release(false); } // Fault recovery utils::address_range dma_manager::get_fault_range(bool writing) const { - const auto m_current_job = verify(HERE, g_fxo->get()->m_current_job); + const auto m_current_job = (ensure(g_fxo->get()->m_current_job)); void *address = nullptr; u32 range = m_current_job->length; @@ -216,11 +215,11 @@ namespace rsx address = (writing) ? m_current_job->dst : m_current_job->src; break; case vector_copy: - verify(HERE), writing; + ensure(writing); address = m_current_job->dst; break; case index_emulate: - verify(HERE), writing; + ensure(writing); address = m_current_job->dst; range = get_index_count(static_cast(m_current_job->aux_param0), m_current_job->length); break; @@ -232,7 +231,7 @@ namespace rsx const uintptr_t addr = uintptr_t(address); const uintptr_t base = uintptr_t(vm::g_base_addr); - verify(HERE), addr > base; + ensure(addr > base); return utils::address_range::start_length(u32(addr - base), range); } } diff --git a/rpcs3/Emu/RSX/RSXTexture.cpp b/rpcs3/Emu/RSX/RSXTexture.cpp index 0c49dfd1ad..81e66caeca 100644 --- a/rpcs3/Emu/RSX/RSXTexture.cpp +++ b/rpcs3/Emu/RSX/RSXTexture.cpp @@ -75,7 +75,7 @@ namespace rsx else max_mipmap_count = floor_log2(static_cast(std::max(width(), height()))) + 1; - return std::min(verify(HERE, mipmap()), max_mipmap_count); + return std::min(ensure(mipmap()), max_mipmap_count); } rsx::texture_wrap_mode fragment_texture::wrap_s() const @@ -368,7 +368,7 @@ namespace rsx u16 vertex_texture::get_exact_mipmap_count() const { const u16 max_mipmap_count = floor_log2(static_cast(std::max(width(), height()))) + 1; - return std::min(verify(HERE, mipmap()), max_mipmap_count); + return std::min(ensure(mipmap()), max_mipmap_count); } std::pair, std::array> vertex_texture::decoded_remap() const diff --git a/rpcs3/Emu/RSX/RSXThread.cpp b/rpcs3/Emu/RSX/RSXThread.cpp index 11e8bfdd42..42387ad4d6 100644 --- a/rpcs3/Emu/RSX/RSXThread.cpp +++ b/rpcs3/Emu/RSX/RSXThread.cpp @@ -185,7 +185,7 @@ namespace rsx } fmt::throw_exception("Wrong vector size" HERE); case vertex_base_type::cmp: return 4; - case vertex_base_type::ub256: verify(HERE), (size == 4); return sizeof(u8) * 4; + case vertex_base_type::ub256: ensure(size == 4); return sizeof(u8) * 4; default: break; } @@ -348,7 +348,7 @@ namespace rsx { // In this mode, it is possible to skip the cond render while the backend is still processing data. // The backend guarantees that any draw calls emitted during this time will NOT generate any ROP writes - verify(HERE), !cond_render_ctrl.hw_cond_active; + ensure(!cond_render_ctrl.hw_cond_active); // Pending evaluation, use hardware test begin_conditional_rendering(cond_render_ctrl.eval_sources); @@ -357,7 +357,7 @@ namespace rsx { // NOTE: eval_sources list is reversed with newest query first zcull_ctrl->read_barrier(this, cond_render_ctrl.eval_address, cond_render_ctrl.eval_sources.front()); - verify(HERE), !cond_render_ctrl.eval_pending(); + ensure(!cond_render_ctrl.eval_pending()); } } @@ -1184,7 +1184,7 @@ namespace rsx } } - verify(HERE), layout.color_addresses[index]; + ensure(layout.color_addresses[index]); const auto packed_pitch = (layout.width * color_texel_size); if (packed_render) @@ -1581,7 +1581,7 @@ namespace rsx if (!(m_graphics_state & rsx::pipeline_state::vertex_program_dirty)) return; - verify(HERE), !(m_graphics_state & rsx::pipeline_state::vertex_program_ucode_dirty); + ensure(!(m_graphics_state & rsx::pipeline_state::vertex_program_ucode_dirty)); current_vertex_program.output_mask = rsx::method_registers.vertex_attrib_output_mask(); for (u32 textures_ref = current_vp_metadata.referenced_textures_mask, i = 0; textures_ref; textures_ref >>= 1, ++i) @@ -1767,7 +1767,7 @@ namespace rsx if (!(m_graphics_state & rsx::pipeline_state::fragment_program_dirty)) return; - verify(HERE), !(m_graphics_state & rsx::pipeline_state::fragment_program_ucode_dirty); + ensure(!(m_graphics_state & rsx::pipeline_state::fragment_program_ucode_dirty)); m_graphics_state &= ~(rsx::pipeline_state::fragment_program_dirty); @@ -2457,7 +2457,7 @@ namespace rsx //TODO: On sync every sub-unit should finish any pending tasks //Might cause zcull lockup due to zombie 'unclaimed reports' which are not forcefully removed currently - //verify (HERE), async_tasks_pending.load() == 0; + //ensure(async_tasks_pending.load() == 0); } void thread::sync_hint(FIFO_hint /*hint*/, void* args) @@ -2875,7 +2875,7 @@ namespace rsx { // Frame was not queued before flipping on_frame_end(buffer, true); - verify(HERE), m_queued_flip.pop(buffer); + ensure(m_queued_flip.pop(buffer)); } double limit = 0.; @@ -2976,13 +2976,13 @@ namespace rsx if (state) { - verify(HERE), unit_enabled && m_current_task == nullptr; + ensure(unit_enabled && m_current_task == nullptr); allocate_new_query(ptimer); begin_occlusion_query(m_current_task); } else { - verify(HERE), m_current_task; + ensure(m_current_task); if (m_current_task->num_draws) { end_occlusion_query(m_current_task); @@ -3106,7 +3106,7 @@ namespace rsx { // Not the last one in the chain, forward the writing operation to the last writer // Usually comes from truncated queries caused by disabling the testing - verify(HERE), It->query; + ensure(It->query); It->forwarder = forwarder; It->query->owned = true; @@ -3228,7 +3228,7 @@ namespace rsx void ZCULL_control::write(vm::addr_t sink, u64 timestamp, u32 type, u32 value) { - verify(HERE), sink; + ensure(sink); switch (type) { @@ -3323,7 +3323,7 @@ namespace rsx if (query) { - verify(HERE), query->pending; + ensure(query->pending); const bool implemented = (writer.type == CELL_GCM_ZPASS_PIXEL_CNT || writer.type == CELL_GCM_ZCULL_STATS3); if (implemented && !result && query->num_draws) @@ -3354,13 +3354,13 @@ namespace rsx if (!has_unclaimed) { - verify(HERE), processed == m_pending_writes.size(); + ensure(processed == m_pending_writes.size()); m_pending_writes.clear(); } else { auto remaining = m_pending_writes.size() - processed; - verify(HERE), remaining > 0; + ensure(remaining > 0); if (remaining == 1) { @@ -3414,7 +3414,7 @@ namespace rsx if (It->query->num_draws && It->query->sync_tag > m_sync_tag) { ptimer->sync_hint(FIFO_hint::hint_zcull_sync, It->query); - verify(HERE), It->query->sync_tag <= m_sync_tag; + ensure(It->query->sync_tag <= m_sync_tag); } break; @@ -3439,7 +3439,7 @@ namespace rsx if (elapsed > max_zcull_delay_us) { ptimer->sync_hint(FIFO_hint::hint_zcull_sync, front.query); - verify(HERE), front.query->sync_tag <= m_sync_tag; + ensure(front.query->sync_tag <= m_sync_tag); } return; @@ -3475,7 +3475,7 @@ namespace rsx if (query) { - verify(HERE), query->pending; + ensure(query->pending); const bool implemented = (writer.type == CELL_GCM_ZPASS_PIXEL_CNT || writer.type == CELL_GCM_ZCULL_STATS3); if (force_read) @@ -3612,7 +3612,7 @@ namespace rsx if (query->sync_tag > m_sync_tag) [[unlikely]] { ptimer->sync_hint(FIFO_hint::hint_zcull_sync, query); - verify(HERE), m_sync_tag >= query->sync_tag; + ensure(m_sync_tag >= query->sync_tag); } } @@ -3733,7 +3733,7 @@ namespace rsx { if (hw_cond_active) { - verify(HERE), enabled; + ensure(enabled); pthr->end_conditional_rendering(); } @@ -3747,7 +3747,7 @@ namespace rsx { if (hw_cond_active) { - verify(HERE), enabled; + ensure(enabled); pthr->end_conditional_rendering(); } @@ -3765,7 +3765,7 @@ namespace rsx { if (hw_cond_active) { - verify(HERE), enabled; + ensure(enabled); pthr->end_conditional_rendering(); } diff --git a/rpcs3/Emu/RSX/RSXThread.h b/rpcs3/Emu/RSX/RSXThread.h index 2dee9c0abf..29e7bca472 100644 --- a/rpcs3/Emu/RSX/RSXThread.h +++ b/rpcs3/Emu/RSX/RSXThread.h @@ -286,7 +286,7 @@ namespace rsx } } - verify(HERE), _max_index >= _min_index; + ensure(_max_index >= _min_index); return { _min_index, (_max_index - _min_index) + 1 }; } }; diff --git a/rpcs3/Emu/RSX/VK/VKCompute.h b/rpcs3/Emu/RSX/VK/VKCompute.h index 04e3144d0c..318950a8a9 100644 --- a/rpcs3/Emu/RSX/VK/VKCompute.h +++ b/rpcs3/Emu/RSX/VK/VKCompute.h @@ -183,7 +183,7 @@ namespace vk declare_inputs(); } - verify(HERE), m_used_descriptors < VK_MAX_COMPUTE_TASKS; + ensure(m_used_descriptors < VK_MAX_COMPUTE_TASKS); VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.descriptorPool = m_descriptor_pool; @@ -351,7 +351,7 @@ namespace vk void set_parameters(VkCommandBuffer cmd, const u32* params, u8 count) { - verify(HERE), use_push_constants; + ensure(use_push_constants); vkCmdPushConstants(cmd, m_pipeline_layout, VK_SHADER_STAGE_COMPUTE_BIT, 0, count * 4, params); } @@ -460,7 +460,7 @@ namespace vk u32 parameters[4] = { data_length, zeta_offset - data_offset, stencil_offset - data_offset, 0 }; set_parameters(cmd, parameters, 4); - verify(HERE), stencil_offset > data_offset; + ensure(stencil_offset > data_offset); m_ssbo_length = stencil_offset + (data_length / 4) - data_offset; cs_shuffle_base::run(cmd, data, data_length, data_offset); } @@ -751,7 +751,7 @@ namespace vk cs_deswizzle_3d() { - verify("Unsupported block type" HERE), (sizeof(_BlockType) & 3) == 0; + ensure((sizeof(_BlockType) & 3) == 0); // "Unsupported block type" ssbo_count = 2; use_push_constants = true; diff --git a/rpcs3/Emu/RSX/VK/VKDMA.cpp b/rpcs3/Emu/RSX/VK/VKDMA.cpp index 7119925e0c..b56c6064f7 100644 --- a/rpcs3/Emu/RSX/VK/VKDMA.cpp +++ b/rpcs3/Emu/RSX/VK/VKDMA.cpp @@ -24,7 +24,7 @@ namespace vk return inheritance_info.parent->map_range(range); } - verify(HERE), range.start >= base_address; + ensure(range.start >= base_address); u32 start = range.start; start -= base_address; return allocated_memory->map(start, range.length()); @@ -44,7 +44,8 @@ namespace vk void dma_block::init(const render_device& dev, u32 addr, size_t size) { - verify(HERE), size, !(size % s_dma_block_length); + ensure(size); + ensure(!(size % s_dma_block_length)); base_address = addr; allocated_memory = std::make_unique(dev, size, @@ -113,7 +114,7 @@ namespace vk if (!inheritance_info.parent) { auto bit_offset = page_offset / s_bytes_per_entry; - verify(HERE), (bit_offset + bits.size()) <= page_info.size(); + ensure(bit_offset + bits.size() <= page_info.size()); std::memcpy(page_info.data() + bit_offset, bits.data(), bits.size()); } else @@ -149,7 +150,8 @@ namespace vk return inheritance_info.parent->get(range); } - verify(HERE), range.start >= base_address, range.end <= end(); + ensure(range.start >= base_address); + ensure(range.end <= end()); // mark_dirty(range); return { (range.start - base_address), allocated_memory.get() }; @@ -173,7 +175,7 @@ namespace vk void dma_block::set_parent(command_buffer& cmd, dma_block* parent) { - verify(HERE), parent; + ensure(parent); if (inheritance_info.parent == parent) { // Nothing to do @@ -201,7 +203,7 @@ namespace vk void dma_block::extend(command_buffer& cmd, const render_device &dev, size_t new_size) { - verify(HERE), allocated_memory; + ensure(allocated_memory); if (new_size <= allocated_memory->size()) return; @@ -308,7 +310,7 @@ namespace vk } } - verify(HERE), block_head; + ensure(block_head); return block_head->get(map_range); } diff --git a/rpcs3/Emu/RSX/VK/VKDraw.cpp b/rpcs3/Emu/RSX/VK/VKDraw.cpp index dc9b00d0f5..b21fdd9ba6 100644 --- a/rpcs3/Emu/RSX/VK/VKDraw.cpp +++ b/rpcs3/Emu/RSX/VK/VKDraw.cpp @@ -387,15 +387,15 @@ void VKGSRender::bind_texture_env() //case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: break; case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: - verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::blit_engine_dst; + ensure(sampler_state->upload_context == rsx::texture_upload_context::blit_engine_dst); raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); break; case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: - verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::blit_engine_src; + ensure(sampler_state->upload_context == rsx::texture_upload_context::blit_engine_src); raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); break; case VK_IMAGE_LAYOUT_GENERAL: - verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage; + ensure(sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage); if (!sampler_state->is_cyclic_reference) { // This was used in a cyclic ref before, but is missing a barrier @@ -426,7 +426,7 @@ void VKGSRender::bind_texture_env() break; case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: - verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage; + ensure(sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage); raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); break; } @@ -527,15 +527,15 @@ void VKGSRender::bind_texture_env() //case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: break; case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: - verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::blit_engine_dst; + ensure(sampler_state->upload_context == rsx::texture_upload_context::blit_engine_dst); raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); break; case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: - verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::blit_engine_src; + ensure(sampler_state->upload_context == rsx::texture_upload_context::blit_engine_src); raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); break; case VK_IMAGE_LAYOUT_GENERAL: - verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage; + ensure(sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage); if (!sampler_state->is_cyclic_reference) { // Custom barrier, see similar block in FS stage @@ -565,7 +565,7 @@ void VKGSRender::bind_texture_env() break; case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: - verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage; + ensure(sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage); raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); break; } @@ -635,15 +635,15 @@ void VKGSRender::bind_interpreter_texture_env() //case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL: break; case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL: - verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::blit_engine_dst; + ensure(sampler_state->upload_context == rsx::texture_upload_context::blit_engine_dst); raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); break; case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL: - verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::blit_engine_src; + ensure(sampler_state->upload_context == rsx::texture_upload_context::blit_engine_src); raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); break; case VK_IMAGE_LAYOUT_GENERAL: - verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage; + ensure(sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage); if (!sampler_state->is_cyclic_reference) { // This was used in a cyclic ref before, but is missing a barrier @@ -674,7 +674,8 @@ void VKGSRender::bind_interpreter_texture_env() break; case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL: case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL: - verify(HERE), sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage, !sampler_state->is_cyclic_reference; + ensure(sampler_state->upload_context == rsx::texture_upload_context::framebuffer_storage); + ensure(!sampler_state->is_cyclic_reference); raw->change_layout(*m_current_command_buffer, VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL); break; } @@ -794,7 +795,7 @@ void VKGSRender::emit_geometry(u32 sub_index) // Update vertex fetch parameters update_vertex_env(sub_index, upload_info); - verify(HERE), m_vertex_layout_storage; + ensure(m_vertex_layout_storage); if (update_descriptors) { m_program->bind_uniform(persistent_buffer, binding_table.vertex_buffers_first_bind_slot, m_current_frame->descriptor_set); @@ -910,7 +911,7 @@ void VKGSRender::end() m_current_frame->used_descriptors = 0; } - verify(HERE), !m_current_frame->swap_command_buffer; + ensure(!m_current_frame->swap_command_buffer); m_current_frame->flags &= ~frame_context_state::dirty; } diff --git a/rpcs3/Emu/RSX/VK/VKFragmentProgram.cpp b/rpcs3/Emu/RSX/VK/VKFragmentProgram.cpp index 9662cd23fa..ecafe18ba2 100644 --- a/rpcs3/Emu/RSX/VK/VKFragmentProgram.cpp +++ b/rpcs3/Emu/RSX/VK/VKFragmentProgram.cpp @@ -164,7 +164,7 @@ void VKFragmentDecompilerThread::insertConstants(std::stringstream & OS) } } - verify("Too many sampler descriptors!" HERE), location <= m_binding_table.vertex_textures_first_bind_slot; + ensure(location <= m_binding_table.vertex_textures_first_bind_slot); // "Too many sampler descriptors!" std::string constants_block; for (const ParamType& PT : m_parr.params[PF_PARAM_UNIFORM]) diff --git a/rpcs3/Emu/RSX/VK/VKGSRender.cpp b/rpcs3/Emu/RSX/VK/VKGSRender.cpp index 547d48e846..a2313e5e38 100644 --- a/rpcs3/Emu/RSX/VK/VKGSRender.cpp +++ b/rpcs3/Emu/RSX/VK/VKGSRender.cpp @@ -275,7 +275,7 @@ namespace idx++; } - verify(HERE), idx == binding_table.total_descriptor_bindings; + ensure(idx == binding_table.total_descriptor_bindings); std::array push_constants; push_constants[0].offset = 0; @@ -671,7 +671,7 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing) if (g_fxo->get()->is_current_thread()) { // The offloader thread cannot handle flush requests - verify(HERE), !(m_queue_status & flush_queue_state::deadlock); + ensure(!(m_queue_status & flush_queue_state::deadlock)); m_offloader_fault_range = g_fxo->get()->get_fault_range(is_writing); m_offloader_fault_cause = (is_writing) ? rsx::invalidation_cause::write : rsx::invalidation_cause::read; @@ -794,7 +794,7 @@ void VKGSRender::notify_tile_unbound(u32 tile) void VKGSRender::check_heap_status(u32 flags) { - verify(HERE), flags; + ensure(flags); bool heap_critical; if (flags == VK_HEAP_CHECK_ALL) @@ -917,7 +917,7 @@ void VKGSRender::check_descriptors() { // Ease resource pressure if the number of draw calls becomes too high or we are running low on memory resources const auto required_descriptors = rsx::method_registers.current_draw_clause.pass_count(); - verify(HERE), required_descriptors < DESCRIPTOR_MAX_DRAW_CALLS; + ensure(required_descriptors < DESCRIPTOR_MAX_DRAW_CALLS); if ((required_descriptors + m_current_frame->used_descriptors) > DESCRIPTOR_MAX_DRAW_CALLS) { // Should hard sync before resetting descriptors for spec compliance @@ -932,7 +932,7 @@ VkDescriptorSet VKGSRender::allocate_descriptor_set() { if (!m_shader_interpreter.is_interpreter(m_program)) [[likely]] { - verify(HERE), m_current_frame->used_descriptors < DESCRIPTOR_MAX_DRAW_CALLS; + ensure(m_current_frame->used_descriptors < DESCRIPTOR_MAX_DRAW_CALLS); VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.descriptorPool = m_current_frame->descriptor_pool; @@ -1113,7 +1113,7 @@ void VKGSRender::clear_surface(u32 mask) if ((mask & 0x3) != 0x3 && !require_mem_load && ds->state_flags & rsx::surface_state_flags::erase_bkgnd) { - verify(HERE), depth_stencil_mask; + ensure(depth_stencil_mask); if (!g_cfg.video.read_depth_buffer) { @@ -1349,7 +1349,7 @@ void VKGSRender::flush_command_queue(bool hard_sync) void VKGSRender::sync_hint(rsx::FIFO_hint hint, void* args) { - verify(HERE), args; + ensure(args); rsx::thread::sync_hint(hint, args); // Occlusion queries not enabled, do nothing @@ -1470,7 +1470,7 @@ bool VKGSRender::load_program() if (m_graphics_state & rsx::pipeline_state::invalidate_pipeline_bits) { get_current_fragment_program(fs_sampler_state); - verify(HERE), current_fragment_program.valid; + ensure(current_fragment_program.valid); get_current_vertex_program(vs_sampler_state); @@ -1871,7 +1871,7 @@ void VKGSRender::update_vertex_env(u32 id, const vk::vertex_upload_info& vertex_ if (!m_vertex_layout_storage || !m_vertex_layout_storage->in_range(offset32, range32, base_offset)) { - verify("Incompatible driver (MacOS?)" HERE), m_texbuffer_view_size >= m_vertex_layout_stream_info.range; + ensure(m_texbuffer_view_size >= m_vertex_layout_stream_info.range); if (m_vertex_layout_storage) m_current_frame->buffer_views_to_clean.push_back(std::move(m_vertex_layout_storage)); @@ -1914,7 +1914,7 @@ void VKGSRender::init_buffers(rsx::framebuffer_creation_context context, bool) void VKGSRender::close_and_submit_command_buffer(vk::fence* pFence, VkSemaphore wait_semaphore, VkSemaphore signal_semaphore, VkPipelineStageFlags pipeline_stage_flags) { - verify("Recursive calls to submit the current commandbuffer will cause a deadlock" HERE), !m_queue_status.test_and_set(flush_queue_state::flushing); + ensure(!m_queue_status.test_and_set(flush_queue_state::flushing)); // Workaround for deadlock occuring during RSX offloader fault // TODO: Restructure command submission infrastructure to avoid this condition @@ -1960,7 +1960,7 @@ void VKGSRender::close_and_submit_command_buffer(vk::fence* pFence, VkSemaphore #if 0 // Currently unreachable if (m_current_command_buffer->flags & vk::command_buffer::cb_has_conditional_render) { - verify(HERE), m_render_pass_open; + ensure(m_render_pass_open); m_device->cmdEndConditionalRenderingEXT(*m_current_command_buffer); } #endif @@ -1987,7 +1987,7 @@ void VKGSRender::close_and_submit_command_buffer(vk::fence* pFence, VkSemaphore if (force_flush) { - verify(HERE), m_current_command_buffer->submit_fence->flushed; + ensure(m_current_command_buffer->submit_fence->flushed); } m_queue_status.clear(flush_queue_state::flushing); @@ -2087,7 +2087,7 @@ void VKGSRender::prepare_rtts(rsx::framebuffer_creation_context context) m_surface_info[index].address = m_framebuffer_layout.color_addresses[index]; m_surface_info[index].pitch = m_framebuffer_layout.actual_color_pitch[index]; - verify("Pitch mismatch!" HERE), surface->rsx_pitch == m_framebuffer_layout.actual_color_pitch[index]; + ensure(surface->rsx_pitch == m_framebuffer_layout.actual_color_pitch[index]); m_texture_cache.notify_surface_changed(m_surface_info[index].get_memory_range(m_framebuffer_layout.aa_factors)); m_draw_buffers.push_back(index); @@ -2101,7 +2101,7 @@ void VKGSRender::prepare_rtts(rsx::framebuffer_creation_context context) m_depth_surface_info.address = m_framebuffer_layout.zeta_address; m_depth_surface_info.pitch = m_framebuffer_layout.actual_zeta_pitch; - verify("Pitch mismatch!" HERE), ds->rsx_pitch == m_framebuffer_layout.actual_zeta_pitch; + ensure(ds->rsx_pitch == m_framebuffer_layout.actual_zeta_pitch); m_texture_cache.notify_surface_changed(m_depth_surface_info.get_memory_range(m_framebuffer_layout.aa_factors)); } @@ -2258,7 +2258,7 @@ bool VKGSRender::scaled_image_from_memory(rsx::blit_src_info& src, rsx::blit_dst void VKGSRender::begin_occlusion_query(rsx::reports::occlusion_query_info* query) { - verify(HERE), !m_occlusion_query_active; + ensure(!m_occlusion_query_active); query->result = 0; //query->sync_timestamp = get_system_time(); @@ -2269,7 +2269,7 @@ void VKGSRender::begin_occlusion_query(rsx::reports::occlusion_query_info* query void VKGSRender::end_occlusion_query(rsx::reports::occlusion_query_info* query) { - verify(HERE), query == m_active_query_info; + ensure(query == m_active_query_info); // NOTE: flushing the queue is very expensive, do not flush just because query stopped if (m_current_command_buffer->flags & vk::command_buffer::cb_has_open_query) @@ -2360,7 +2360,7 @@ void VKGSRender::discard_occlusion_query(rsx::reports::occlusion_query_info* que void VKGSRender::emergency_query_cleanup(vk::command_buffer* commands) { - verify("Command list mismatch" HERE), commands == static_cast(m_current_command_buffer); + ensure(commands == static_cast(m_current_command_buffer)); if (m_current_command_buffer->flags & vk::command_buffer::cb_has_open_query) { @@ -2372,7 +2372,7 @@ void VKGSRender::emergency_query_cleanup(vk::command_buffer* commands) void VKGSRender::begin_conditional_rendering(const std::vector& sources) { - verify(HERE), !sources.empty(); + ensure(!sources.empty()); // Flag check whether to calculate all entries or only one bool partial_eval; @@ -2474,7 +2474,7 @@ void VKGSRender::begin_conditional_rendering(const std::vector 4; + ensure(dst_offset > 4); if (!partial_eval) { diff --git a/rpcs3/Emu/RSX/VK/VKHelpers.cpp b/rpcs3/Emu/RSX/VK/VKHelpers.cpp index 516e4075cd..391f0812aa 100644 --- a/rpcs3/Emu/RSX/VK/VKHelpers.cpp +++ b/rpcs3/Emu/RSX/VK/VKHelpers.cpp @@ -468,7 +468,7 @@ namespace vk vk::mem_allocator_base* get_current_mem_allocator() { - verify (HERE, g_current_renderer); + ensure(g_current_renderer); return g_current_renderer->get_allocator(); } @@ -919,7 +919,7 @@ namespace vk void advance_frame_counter() { - verify(HERE), g_num_processed_frames <= g_num_total_frames; + ensure(g_num_processed_frames <= g_num_total_frames); g_num_total_frames++; } @@ -1011,7 +1011,7 @@ namespace vk void do_query_cleanup(vk::command_buffer& cmd) { auto renderer = dynamic_cast(rsx::get_current_renderer()); - verify(HERE), renderer; + ensure(renderer); renderer->emergency_query_cleanup(&cmd); } diff --git a/rpcs3/Emu/RSX/VK/VKHelpers.h b/rpcs3/Emu/RSX/VK/VKHelpers.h index 1895a36134..309560861c 100644 --- a/rpcs3/Emu/RSX/VK/VKHelpers.h +++ b/rpcs3/Emu/RSX/VK/VKHelpers.h @@ -601,7 +601,7 @@ namespace vk } else { - verify(HERE), pdev; + ensure(pdev); if (vkEnumerateDeviceExtensionProperties(pdev, layer_name, &count, nullptr) != VK_SUCCESS) return; } @@ -680,7 +680,7 @@ private: } auto getPhysicalDeviceFeatures2KHR = reinterpret_cast(vkGetInstanceProcAddr(parent, "vkGetPhysicalDeviceFeatures2KHR")); - verify("vkGetInstanceProcAddress failed to find entry point!" HERE), getPhysicalDeviceFeatures2KHR; + ensure(getPhysicalDeviceFeatures2KHR); // "vkGetInstanceProcAddress failed to find entry point!" getPhysicalDeviceFeatures2KHR(dev, &features2); shader_types_support.allow_float64 = !!features2.features.shaderFloat64; @@ -1372,7 +1372,7 @@ private: } // Check for hanging queries to avoid driver hang - verify("close and submit of commandbuffer with a hanging query!" HERE), (flags & cb_has_open_query) == 0; + ensure((flags & cb_has_open_query) == 0); // "close and submit of commandbuffer with a hanging query!" if (!pfence) { @@ -1547,7 +1547,7 @@ private: void pop_layout(VkCommandBuffer cmd) { - verify(HERE), !m_layout_stack.empty(); + ensure(!m_layout_stack.empty()); auto layout = m_layout_stack.top(); m_layout_stack.pop(); @@ -1559,7 +1559,7 @@ private: if (current_layout == new_layout) return; - verify(HERE), m_layout_stack.empty(); + ensure(m_layout_stack.empty()); change_image_layout(cmd, this, new_layout); } @@ -1736,7 +1736,7 @@ private: const auto range = vk::get_image_subresource_range(0, 0, info.arrayLayers, info.mipLevels, aspect() & mask); - verify(HERE), range.aspectMask; + ensure(range.aspectMask); auto view = std::make_unique(*get_current_renderer(), this, VK_IMAGE_VIEW_TYPE_MAX_ENUM, real_mapping, range); auto result = view.get(); @@ -3190,7 +3190,7 @@ public: void create(const vk::render_device &dev, VkDescriptorPoolSize *sizes, u32 size_descriptors_count, u32 max_sets, u8 subpool_count) { - verify(HERE), subpool_count; + ensure(subpool_count); VkDescriptorPoolCreateInfo infos = {}; infos.flags = 0; @@ -3542,7 +3542,7 @@ public: VkShaderModule compile() { - verify(HERE), m_handle == VK_NULL_HANDLE; + ensure(m_handle == VK_NULL_HANDLE); if (!vk::compile_glsl_to_spv(m_source, type, m_compiled)) { @@ -3737,7 +3737,8 @@ public: { if (!dirty_ranges.empty()) { - verify (HERE), shadow, heap; + ensure(shadow); + ensure(heap); vkCmdCopyBuffer(cmd, shadow->value, heap->value, ::size32(dirty_ranges), dirty_ranges.data()); dirty_ranges.clear(); diff --git a/rpcs3/Emu/RSX/VK/VKOverlays.h b/rpcs3/Emu/RSX/VK/VKOverlays.h index 7d6abdba4c..cc7eac4a50 100644 --- a/rpcs3/Emu/RSX/VK/VKOverlays.h +++ b/rpcs3/Emu/RSX/VK/VKOverlays.h @@ -270,7 +270,7 @@ namespace vk else program = build_pipeline(key, pass); - verify(HERE), m_used_descriptors < VK_OVERLAY_MAX_DRAW_CALLS; + ensure(m_used_descriptors < VK_OVERLAY_MAX_DRAW_CALLS); VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.descriptorPool = m_descriptor_pool; diff --git a/rpcs3/Emu/RSX/VK/VKPipelineCompiler.cpp b/rpcs3/Emu/RSX/VK/VKPipelineCompiler.cpp index bd9416aac4..d1b8f5664f 100644 --- a/rpcs3/Emu/RSX/VK/VKPipelineCompiler.cpp +++ b/rpcs3/Emu/RSX/VK/VKPipelineCompiler.cpp @@ -108,7 +108,7 @@ namespace vk vp.scissorCount = 1; VkPipelineMultisampleStateCreateInfo ms = create_info.state.ms; - verify("Multisample state mismatch!" HERE), ms.rasterizationSamples == VkSampleCountFlagBits((create_info.renderpass_key >> 16) & 0xF); + ensure(ms.rasterizationSamples == VkSampleCountFlagBits((create_info.renderpass_key >> 16) & 0xF)); // "Multisample state mismatch!" if (ms.rasterizationSamples != VK_SAMPLE_COUNT_1_BIT) { // Update the sample mask pointer @@ -160,7 +160,7 @@ namespace vk const std::vector& vs_inputs, const std::vector& fs_inputs) { // It is very inefficient to defer this as all pointers need to be saved - verify(HERE), flags == COMPILE_INLINE; + ensure(flags == COMPILE_INLINE); return int_compile_graphics_pipe(create_info, pipe_layout, vs_inputs, fs_inputs); } @@ -204,10 +204,10 @@ namespace vk } } - verify(HERE), num_worker_threads >= 1; + ensure(num_worker_threads >= 1); const vk::render_device* dev = vk::get_current_renderer(); - verify("Cannot initialize pipe compiler before creating a logical device" HERE), dev; + ensure(dev); // "Cannot initialize pipe compiler before creating a logical device" // Create the thread pool g_pipe_compilers = std::make_unique>("RSX.W", num_worker_threads); @@ -227,7 +227,7 @@ namespace vk pipe_compiler* get_pipe_compiler() { - verify(HERE), g_pipe_compilers; + ensure(g_pipe_compilers); int thread_index = g_compiler_index++; return g_pipe_compilers.get()->begin() + (thread_index % g_num_pipe_compilers); diff --git a/rpcs3/Emu/RSX/VK/VKPresent.cpp b/rpcs3/Emu/RSX/VK/VKPresent.cpp index 4e455bb53e..910b9df5db 100644 --- a/rpcs3/Emu/RSX/VK/VKPresent.cpp +++ b/rpcs3/Emu/RSX/VK/VKPresent.cpp @@ -73,7 +73,7 @@ void VKGSRender::reinitialize_swapchain() void VKGSRender::present(vk::frame_context_t *ctx) { - verify(HERE), ctx->present_image != UINT32_MAX; + ensure(ctx->present_image != UINT32_MAX); // Partial CS flush ctx->swap_command_buffer->flush(); @@ -129,7 +129,7 @@ void VKGSRender::advance_queued_frames() m_raster_env_ring_info.get_current_put_pos_minus_one()); m_queued_frames.push_back(m_current_frame); - verify(HERE), m_queued_frames.size() <= VK_MAX_ASYNC_FRAMES; + ensure(m_queued_frames.size() <= VK_MAX_ASYNC_FRAMES); m_current_queue_index = (m_current_queue_index + 1) % VK_MAX_ASYNC_FRAMES; m_current_frame = &frame_context_storage[m_current_queue_index]; @@ -140,7 +140,7 @@ void VKGSRender::advance_queued_frames() void VKGSRender::queue_swap_request() { - verify(HERE), !m_current_frame->swap_command_buffer; + ensure(!m_current_frame->swap_command_buffer); m_current_frame->swap_command_buffer = m_current_command_buffer; if (m_swapchain->is_headless()) @@ -174,7 +174,7 @@ void VKGSRender::queue_swap_request() void VKGSRender::frame_context_cleanup(vk::frame_context_t *ctx, bool free_resources) { - verify(HERE), ctx->swap_command_buffer; + ensure(ctx->swap_command_buffer); if (ctx->swap_command_buffer->pending) { @@ -417,7 +417,7 @@ void VKGSRender::flip(const rsx::display_flip_info_t& info) { if (!info.skip_frame) { - verify(HERE), swapchain_unavailable; + ensure(swapchain_unavailable); // Perform a mini-flip here without invoking present code m_current_frame->swap_command_buffer = m_current_command_buffer; @@ -494,8 +494,8 @@ void VKGSRender::flip(const rsx::display_flip_info_t& info) } // Prepare surface for new frame. Set no timeout here so that we wait for the next image if need be - verify(HERE), m_current_frame->present_image == UINT32_MAX; - verify(HERE), m_current_frame->swap_command_buffer == nullptr; + ensure(m_current_frame->present_image == UINT32_MAX); + ensure(m_current_frame->swap_command_buffer == nullptr); u64 timeout = m_swapchain->get_swap_image_count() <= VK_MAX_ASYNC_FRAMES? 0ull: 100000000ull; while (VkResult status = m_swapchain->acquire_next_swapchain_image(m_current_frame->acquire_signal_semaphore, timeout, &m_current_frame->present_image)) @@ -537,7 +537,7 @@ void VKGSRender::flip(const rsx::display_flip_info_t& info) } // Confirm that the driver did not silently fail - verify(HERE), m_current_frame->present_image != UINT32_MAX; + ensure(m_current_frame->present_image != UINT32_MAX); // Calculate output dimensions. Done after swapchain acquisition in case it was recreated. coordi aspect_ratio; @@ -592,12 +592,12 @@ void VKGSRender::flip(const rsx::display_flip_info_t& info) if (!use_full_rgb_range_output || !rsx::fcmp(avconfig->gamma, 1.f) || avconfig->_3d) [[unlikely]] { calibration_src.push_back(dynamic_cast(image_to_flip)); - verify("Image not viewable" HERE), calibration_src.front(); + ensure(calibration_src.front()); if (image_to_flip2) { calibration_src.push_back(dynamic_cast(image_to_flip2)); - verify("Image not viewable" HERE), calibration_src.back(); + ensure(calibration_src.back()); } } @@ -631,7 +631,7 @@ void VKGSRender::flip(const rsx::display_flip_info_t& info) const auto key = vk::get_renderpass_key(m_swapchain->get_surface_format()); single_target_pass = vk::get_renderpass(*m_device, key); - verify("Usupported renderpass configuration" HERE), single_target_pass != VK_NULL_HANDLE; + ensure(single_target_pass != VK_NULL_HANDLE); direct_fbo = vk::get_framebuffer(*m_device, m_swapchain_dims.width, m_swapchain_dims.height, single_target_pass, m_swapchain->get_surface_format(), target_image); direct_fbo->add_ref(); @@ -709,7 +709,7 @@ void VKGSRender::flip(const rsx::display_flip_info_t& info) { const auto key = vk::get_renderpass_key(m_swapchain->get_surface_format()); single_target_pass = vk::get_renderpass(*m_device, key); - verify("Usupported renderpass configuration" HERE), single_target_pass != VK_NULL_HANDLE; + ensure(single_target_pass != VK_NULL_HANDLE); direct_fbo = vk::get_framebuffer(*m_device, m_swapchain_dims.width, m_swapchain_dims.height, single_target_pass, m_swapchain->get_surface_format(), target_image); } diff --git a/rpcs3/Emu/RSX/VK/VKProgramPipeline.cpp b/rpcs3/Emu/RSX/VK/VKProgramPipeline.cpp index 23c910ad0b..dc55f63e82 100644 --- a/rpcs3/Emu/RSX/VK/VKProgramPipeline.cpp +++ b/rpcs3/Emu/RSX/VK/VKProgramPipeline.cpp @@ -41,7 +41,7 @@ namespace vk program& program::load_uniforms(const std::vector& inputs) { - verify("Cannot change uniforms in already linked program!" HERE), !linked; + ensure(!linked); // "Cannot change uniforms in already linked program!" for (auto &item : inputs) { @@ -133,7 +133,7 @@ namespace vk void program::bind_uniform(const VkDescriptorImageInfo & image_descriptor, int texture_unit, ::glsl::program_domain domain, VkDescriptorSet &descriptor_set, bool is_stencil_mirror) { - verify("Unsupported program domain" HERE, domain != ::glsl::program_domain::glsl_compute_program); + ensure(domain != ::glsl::program_domain::glsl_compute_program); u32 binding; if (domain == ::glsl::program_domain::glsl_fragment_program) diff --git a/rpcs3/Emu/RSX/VK/VKQueryPool.cpp b/rpcs3/Emu/RSX/VK/VKQueryPool.cpp index 258080b822..710bcc0b6e 100644 --- a/rpcs3/Emu/RSX/VK/VKQueryPool.cpp +++ b/rpcs3/Emu/RSX/VK/VKQueryPool.cpp @@ -49,7 +49,7 @@ namespace vk query_pool_manager::query_pool_manager(vk::render_device& dev, VkQueryType type, u32 num_entries) { - verify(HERE), num_entries > 0; + ensure(num_entries > 0); owner = &dev; query_type = type; @@ -72,7 +72,7 @@ namespace vk void query_pool_manager::allocate_new_pool(vk::command_buffer& cmd) { - verify(HERE), !m_current_query_pool; + ensure(!m_current_query_pool); const u32 count = ::size32(query_slot_status); m_current_query_pool = std::make_unique(*owner, query_type, count); @@ -124,7 +124,7 @@ namespace vk void query_pool_manager::begin_query(vk::command_buffer& cmd, u32 index) { - verify(HERE), query_slot_status[index].active == false; + ensure(query_slot_status[index].active == false); auto& query_info = query_slot_status[index]; query_info.pool = m_current_query_pool.get(); @@ -166,7 +166,7 @@ namespace vk // Release reference and discard auto& query = query_slot_status[index]; - verify(HERE), query.active; + ensure(query.active); query.pool->release(); if (!query.pool->has_refs()) @@ -206,4 +206,3 @@ namespace vk return ~0u; } } - diff --git a/rpcs3/Emu/RSX/VK/VKRenderTargets.h b/rpcs3/Emu/RSX/VK/VKRenderTargets.h index 01d19fd629..fd5fb50680 100644 --- a/rpcs3/Emu/RSX/VK/VKRenderTargets.h +++ b/rpcs3/Emu/RSX/VK/VKRenderTargets.h @@ -59,7 +59,7 @@ namespace vk if (!is_depth_surface()) [[likely]] { - verify(HERE), current_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + ensure(current_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); // This is the source; finish writing before reading vk::insert_image_memory_barrier( @@ -127,12 +127,12 @@ namespace vk // Unresolve the linear data into planar MSAA data void unresolve(vk::command_buffer& cmd) { - verify(HERE), !(msaa_flags & rsx::surface_state_flags::require_resolve); + ensure(!(msaa_flags & rsx::surface_state_flags::require_resolve)); VkImageSubresourceRange range = { aspect(), 0, 1, 0, 1 }; if (!is_depth_surface()) [[likely]] { - verify(HERE), current_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL; + ensure(current_layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL); // This is the dest; finish reading before writing vk::insert_image_memory_barrier( @@ -329,7 +329,8 @@ namespace vk } // A read barrier should have been called before this! - verify("Read access without explicit barrier" HERE), resolve_surface, !(msaa_flags & rsx::surface_state_flags::require_resolve); + ensure(resolve_surface); // "Read access without explicit barrier" + ensure(!(msaa_flags & rsx::surface_state_flags::require_resolve)); return resolve_surface.get(); } @@ -454,7 +455,7 @@ namespace vk // NOTE: This step CAN introduce MSAA flags! initialize_memory(cmd, read_access); - verify(HERE), state_flags == rsx::surface_state_flags::ready; + ensure(state_flags == rsx::surface_state_flags::ready); on_write(rsx::get_shared_tag(), static_cast(msaa_flags)); } @@ -472,7 +473,7 @@ namespace vk if (!read_access) { // Only do this step when it is needed to start rendering - verify(HERE), resolve_surface; + ensure(resolve_surface); unresolve(cmd); } } @@ -543,7 +544,7 @@ namespace vk { // Might introduce MSAA flags initialize_memory(cmd, false); - verify(HERE), state_flags == rsx::surface_state_flags::ready; + ensure(state_flags == rsx::surface_state_flags::ready); } if (msaa_flags & rsx::surface_state_flags::require_resolve) @@ -597,7 +598,7 @@ namespace vk static inline vk::render_target* as_rtt(vk::image* t) { - return verify(HERE, dynamic_cast(t)); + return ensure(dynamic_cast(t)); } } @@ -936,7 +937,7 @@ namespace rsx const u64 last_finished_frame = vk::get_last_completed_frame_id(); invalidated_resources.remove_if([&](std::unique_ptr &rtt) { - verify(HERE), rtt->frame_tag != 0; + ensure(rtt->frame_tag != 0); if (rtt->unused_check_count() >= 2 && rtt->frame_tag < last_finished_frame) return true; diff --git a/rpcs3/Emu/RSX/VK/VKResolveHelper.h b/rpcs3/Emu/RSX/VK/VKResolveHelper.h index 38e26b8dcb..501aa07b82 100644 --- a/rpcs3/Emu/RSX/VK/VKResolveHelper.h +++ b/rpcs3/Emu/RSX/VK/VKResolveHelper.h @@ -125,7 +125,8 @@ namespace vk void run(VkCommandBuffer cmd, vk::viewable_image* msaa_image, vk::viewable_image* resolve_image) { - verify(HERE), msaa_image->samples() > 1, resolve_image->samples() == 1; + ensure(msaa_image->samples() > 1); + ensure(resolve_image->samples() == 1); multisampled = msaa_image; resolve = resolve_image; diff --git a/rpcs3/Emu/RSX/VK/VKShaderInterpreter.cpp b/rpcs3/Emu/RSX/VK/VKShaderInterpreter.cpp index be9479d88e..28c71fcfac 100644 --- a/rpcs3/Emu/RSX/VK/VKShaderInterpreter.cpp +++ b/rpcs3/Emu/RSX/VK/VKShaderInterpreter.cpp @@ -473,7 +473,7 @@ namespace vk vp.scissorCount = 1; VkPipelineMultisampleStateCreateInfo ms = properties.state.ms; - verify("Multisample state mismatch!" HERE), ms.rasterizationSamples == VkSampleCountFlagBits((properties.renderpass_key >> 16) & 0xF); + ensure(ms.rasterizationSamples == VkSampleCountFlagBits((properties.renderpass_key >> 16) & 0xF)); // "Multisample state mismatch!" if (ms.rasterizationSamples != VK_SAMPLE_COUNT_1_BIT) { // Update the sample mask pointer diff --git a/rpcs3/Emu/RSX/VK/VKTextOut.h b/rpcs3/Emu/RSX/VK/VKTextOut.h index a0754600ee..503d544e1e 100644 --- a/rpcs3/Emu/RSX/VK/VKTextOut.h +++ b/rpcs3/Emu/RSX/VK/VKTextOut.h @@ -200,7 +200,7 @@ namespace vk void load_program(vk::command_buffer &cmd, float scale_x, float scale_y, const float *offsets, size_t nb_offsets, std::array color) { - verify(HERE), m_used_descriptors < 120; + ensure(m_used_descriptors < 120); VkDescriptorSetAllocateInfo alloc_info = {}; alloc_info.descriptorPool = m_descriptor_pool; @@ -255,7 +255,7 @@ namespace vk void init(vk::render_device &dev, VkRenderPass render_pass) { - verify(HERE), render_pass != VK_NULL_HANDLE; + ensure(render_pass != VK_NULL_HANDLE); //At worst case, 1 char = 16*16*8 bytes (average about 24*8), so ~256K for 128 chars. Allocating 512k for verts //uniform params are 8k in size, allocating for 120 lines (max lines at 4k, one column per row. Can be expanded diff --git a/rpcs3/Emu/RSX/VK/VKTexture.cpp b/rpcs3/Emu/RSX/VK/VKTexture.cpp index 0d32d372d6..941b809f28 100644 --- a/rpcs3/Emu/RSX/VK/VKTexture.cpp +++ b/rpcs3/Emu/RSX/VK/VKTexture.cpp @@ -60,8 +60,7 @@ namespace vk void copy_image_to_buffer(VkCommandBuffer cmd, const vk::image* src, const vk::buffer* dst, const VkBufferImageCopy& region, bool swap_bytes) { // Always validate - verify("Invalid image layout!" HERE), - src->current_layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL || src->current_layout == VK_IMAGE_LAYOUT_GENERAL; + ensure(src->current_layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL || src->current_layout == VK_IMAGE_LAYOUT_GENERAL); if (vk::is_renderpass_open(cmd)) { @@ -72,14 +71,14 @@ namespace vk { default: { - verify("Implicit byteswap option not supported for speficied format" HERE), !swap_bytes; + ensure(!swap_bytes); // "Implicit byteswap option not supported for speficied format" vkCmdCopyImageToBuffer(cmd, src->value, src->current_layout, dst->value, 1, ®ion); break; } case VK_FORMAT_D32_SFLOAT: { rsx_log.error("Unsupported transfer (D16_FLOAT)"); // Need real games to test this. - verify(HERE), region.imageSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT; + ensure(region.imageSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT); const u32 out_w = region.bufferRowLength ? region.bufferRowLength : region.imageExtent.width; const u32 out_h = region.bufferImageHeight ? region.bufferImageHeight : region.imageExtent.height; @@ -87,7 +86,7 @@ namespace vk const u32 packed16_length = out_w * out_h * 2; const auto allocation_end = region.bufferOffset + packed32_length + packed16_length; - verify(HERE), dst->size() >= allocation_end; + ensure(dst->size() >= allocation_end); const auto data_offset = u32(region.bufferOffset); const auto z32_offset = align(data_offset + packed16_length, 256); @@ -124,7 +123,7 @@ namespace vk case VK_FORMAT_D24_UNORM_S8_UINT: case VK_FORMAT_D32_SFLOAT_S8_UINT: { - verify(HERE), region.imageSubresource.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT); + ensure(region.imageSubresource.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)); const u32 out_w = region.bufferRowLength? region.bufferRowLength : region.imageExtent.width; const u32 out_h = region.bufferImageHeight? region.bufferImageHeight : region.imageExtent.height; @@ -133,7 +132,7 @@ namespace vk const u32 in_stencil_size = out_w * out_h; const auto allocation_end = region.bufferOffset + packed_length + in_depth_size + in_stencil_size; - verify(HERE), dst->size() >= allocation_end; + ensure(dst->size() >= allocation_end); const auto data_offset = u32(region.bufferOffset); const auto z_offset = align(data_offset + packed_length, 256); @@ -198,8 +197,7 @@ namespace vk void copy_buffer_to_image(VkCommandBuffer cmd, const vk::buffer* src, const vk::image* dst, const VkBufferImageCopy& region) { // Always validate - verify("Invalid image layout!" HERE), - dst->current_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL || dst->current_layout == VK_IMAGE_LAYOUT_GENERAL; + ensure(dst->current_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL || dst->current_layout == VK_IMAGE_LAYOUT_GENERAL); if (vk::is_renderpass_open(cmd)) { @@ -216,7 +214,7 @@ namespace vk case VK_FORMAT_D32_SFLOAT: { rsx_log.error("Unsupported transfer (D16_FLOAT)"); - verify(HERE), region.imageSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT; + ensure(region.imageSubresource.aspectMask == VK_IMAGE_ASPECT_DEPTH_BIT); const u32 out_w = region.bufferRowLength ? region.bufferRowLength : region.imageExtent.width; const u32 out_h = region.bufferImageHeight ? region.bufferImageHeight : region.imageExtent.height; @@ -224,7 +222,7 @@ namespace vk const u32 packed16_length = out_w * out_h * 2; const auto allocation_end = region.bufferOffset + packed32_length + packed16_length; - verify(HERE), src->size() >= allocation_end; + ensure(src->size() >= allocation_end); const auto data_offset = u32(region.bufferOffset); const auto z32_offset = align(data_offset + packed16_length, 256); @@ -259,7 +257,7 @@ namespace vk const u32 in_stencil_size = out_w * out_h; const auto allocation_end = region.bufferOffset + packed_length + in_depth_size + in_stencil_size; - verify("Out of memory (compute heap). Lower your resolution scale setting." HERE), src->size() >= allocation_end; + ensure(src->size() >= allocation_end); // "Out of memory (compute heap). Lower your resolution scale setting." const auto data_offset = u32(region.bufferOffset); const auto z_offset = align(data_offset + packed_length, 256); @@ -538,7 +536,7 @@ namespace vk } else { - verify(HERE), !dst_rect.is_flipped(); + ensure(!dst_rect.is_flipped()); auto stretch_image_typeless_unsafe = [&cmd, filter](vk::image* src, vk::image* dst, vk::image* typeless, const areai& src_rect, const areai& dst_rect, VkImageAspectFlags aspect, VkImageAspectFlags transfer_flags = 0xFF) @@ -709,7 +707,7 @@ namespace vk vk::cs_deswizzle_base* job = nullptr; const auto block_size = (word_size * word_count); - verify(HERE), word_size == 4 || word_size == 2; + ensure(word_size == 4 || word_size == 2); if (!swap_bytes) { @@ -772,7 +770,7 @@ namespace vk } } - verify(HERE), job; + ensure(job); auto next_layer = sections.front().imageSubresource.baseArrayLayer; auto next_level = sections.front().imageSubresource.mipLevel; @@ -782,7 +780,7 @@ namespace vk std::vector> packets; for (unsigned i = 0; i < sections.size(); ++i) { - verify(HERE), sections[i].bufferRowLength; + ensure(sections[i].bufferRowLength); const auto layer = sections[i].imageSubresource.baseArrayLayer; const auto level = sections[i].imageSubresource.mipLevel; @@ -832,7 +830,7 @@ namespace vk section.imageExtent.width, section.imageExtent.height, section.imageExtent.depth, packet.second); } - verify(HERE), dst_offset <= scratch_buf->size(); + ensure(dst_offset <= scratch_buf->size()); } void copy_mipmaped_image_using_buffer(VkCommandBuffer cmd, vk::image* dst_image, @@ -879,7 +877,7 @@ namespace vk else { row_pitch = rsx::align2(layout.width_in_block * block_size_in_bytes, heap_align); - verify(HERE), row_pitch == heap_align; + ensure(row_pitch == heap_align); } image_linear_size = row_pitch * layout.height_in_block * layout.depth; @@ -937,13 +935,13 @@ namespace vk copy_info.bufferOffset = scratch_offset; scratch_offset += image_linear_size; - verify("Out of scratch memory" HERE), (scratch_offset + image_linear_size) <= scratch_buf->size(); + ensure((scratch_offset + image_linear_size) <= scratch_buf->size()); // "Out of scratch memory" } } if (opt.require_swap || opt.require_deswizzle || requires_depth_processing) { - verify(HERE), scratch_buf; + ensure(scratch_buf); vkCmdCopyBuffer(cmd, upload_heap.heap->value, scratch_buf->value, static_cast(buffer_copies.size()), buffer_copies.data()); insert_buffer_memory_barrier(cmd, scratch_buf->value, 0, scratch_offset, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, @@ -982,7 +980,7 @@ namespace vk } else if (scratch_buf) { - verify(HERE), opt.require_deswizzle || opt.require_swap; + ensure(opt.require_deswizzle || opt.require_swap); const auto block_start = copy_regions.front().bufferOffset; insert_buffer_memory_barrier(cmd, scratch_buf->value, block_start, scratch_offset, VK_PIPELINE_STAGE_COMPUTE_SHADER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, @@ -1146,7 +1144,7 @@ namespace vk src_area.flip_vertical(); } - verify("Incompatible source and destination format!" HERE), real_src->aspect() == real_dst->aspect(); + ensure(real_src->aspect() == real_dst->aspect()); // "Incompatible source and destination format!" copy_scaled_image(cmd, real_src, real_dst, src_area, dst_area, 1, formats_are_bitcast_compatible(real_src, real_dst), diff --git a/rpcs3/Emu/RSX/VK/VKTextureCache.h b/rpcs3/Emu/RSX/VK/VKTextureCache.h index efbb4f6932..4d88762cf5 100644 --- a/rpcs3/Emu/RSX/VK/VKTextureCache.h +++ b/rpcs3/Emu/RSX/VK/VKTextureCache.h @@ -50,7 +50,7 @@ namespace vk ASSERT(!exists() || !is_managed() || vram_texture == new_texture); vram_texture = new_texture; - verify(HERE), rsx_pitch; + ensure(rsx_pitch); width = w; height = h; @@ -98,7 +98,9 @@ namespace vk { // Called if a reset occurs, usually via reprotect path after a bad prediction. // Discard the sync event, the next sync, if any, will properly recreate this. - verify(HERE), synchronized, !flushed, dma_fence; + ensure(synchronized); + ensure(!flushed); + ensure(dma_fence); vk::get_resource_manager()->dispose(dma_fence); } @@ -167,7 +169,7 @@ namespace vk void dma_transfer(vk::command_buffer& cmd, vk::image* src, const areai& src_area, const utils::address_range& valid_range, u32 pitch) { - verify(HERE), src->samples() == 1; + ensure(src->samples() == 1); if (!m_device) { @@ -228,7 +230,7 @@ namespace vk } else { - verify(HERE), get_context() == rsx::texture_upload_context::dma; + ensure(get_context() == rsx::texture_upload_context::dma); shuffle_kernel = nullptr; } @@ -326,7 +328,7 @@ namespace vk if (!miss) [[likely]] { - verify(HERE), !synchronized; + ensure(!synchronized); baseclass::on_speculative_flush(); } else @@ -381,7 +383,8 @@ namespace vk transfer_y = offset / rsx_pitch; transfer_x = (offset % rsx_pitch) / internal_bpp; - verify(HERE), transfer_width >= transfer_x, transfer_height >= transfer_y; + ensure(transfer_width >= transfer_x); + ensure(transfer_height >= transfer_y); transfer_width -= transfer_x; transfer_height -= transfer_y; } @@ -390,7 +393,7 @@ namespace vk { const auto row_count = tail / rsx_pitch; - verify(HERE), transfer_height >= row_count; + ensure(transfer_height >= row_count); transfer_height -= row_count; } } @@ -679,7 +682,7 @@ namespace vk src_w = convert_w; } - verify(HERE), src_image->current_layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL || src_image->current_layout == VK_IMAGE_LAYOUT_GENERAL; + ensure(src_image->current_layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL || src_image->current_layout == VK_IMAGE_LAYOUT_GENERAL); // Final aspect mask of the 'final' transfer source const auto new_src_aspect = src_image->aspect(); @@ -707,7 +710,7 @@ namespace vk } else { - verify(HERE), section.dst_z == 0; + ensure(section.dst_z == 0); u16 dst_x = section.dst_x, dst_y = section.dst_y; vk::image* _dst; @@ -1272,7 +1275,7 @@ namespace vk else { // Insert ordering barrier - verify(HERE), preferred_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL; + ensure(preferred_layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL); insert_image_memory_barrier(cmd, image->value, image->current_layout, preferred_layout, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_PIPELINE_STAGE_TRANSFER_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, VK_ACCESS_TRANSFER_WRITE_BIT, @@ -1291,7 +1294,7 @@ namespace vk const VkComponentMapping mapping = apply_component_mapping_flags(gcm_format, expected_flags, rsx::default_remap_vector); auto image = static_cast(section.get_raw_texture()); - verify(HERE), image != nullptr; + ensure(image); image->set_native_component_layout(mapping); section.set_view_flags(expected_flags); @@ -1373,11 +1376,11 @@ namespace vk cmd.submit(m_submit_queue, VK_NULL_HANDLE, VK_NULL_HANDLE, VK_NULL_HANDLE, VK_PIPELINE_STAGE_ALL_COMMANDS_BIT, VK_TRUE); } - verify(HERE), cmd.flags == 0; + ensure(cmd.flags == 0); if (occlusion_query_active) { - verify(HERE), cmd.is_recording(); + ensure(cmd.is_recording()); cmd.flags |= vk::command_buffer::cb_load_occluson_task; } } diff --git a/rpcs3/Emu/RSX/VK/VKVertexBuffers.cpp b/rpcs3/Emu/RSX/VK/VKVertexBuffers.cpp index 1d3efe1fb0..a9c7b0f228 100644 --- a/rpcs3/Emu/RSX/VK/VKVertexBuffers.cpp +++ b/rpcs3/Emu/RSX/VK/VKVertexBuffers.cpp @@ -257,7 +257,7 @@ vk::vertex_upload_info VKGSRender::upload_vertex_data() if (auto cached = m_vertex_cache->find_vertex_range(storage_address, VK_FORMAT_R8_UINT, required.first)) { - verify(HERE), cached->local_address == storage_address; + ensure(cached->local_address == storage_address); in_cache = true; persistent_range_base = cached->offset_in_heap; @@ -338,7 +338,7 @@ vk::vertex_upload_info VKGSRender::upload_vertex_data() { if (!m_persistent_attribute_storage || !m_persistent_attribute_storage->in_range(persistent_range_base, required.first, persistent_range_base)) { - verify("Incompatible driver (MacOS?)" HERE), m_texbuffer_view_size >= required.first; + ensure(m_texbuffer_view_size >= required.first); // "Incompatible driver (MacOS?)" if (m_persistent_attribute_storage) m_current_frame->buffer_views_to_clean.push_back(std::move(m_persistent_attribute_storage)); @@ -354,7 +354,7 @@ vk::vertex_upload_info VKGSRender::upload_vertex_data() { if (!m_volatile_attribute_storage || !m_volatile_attribute_storage->in_range(volatile_range_base, required.second, volatile_range_base)) { - verify("Incompatible driver (MacOS?)" HERE), m_texbuffer_view_size >= required.second; + ensure(m_texbuffer_view_size >= required.second); // "Incompatible driver (MacOS?)" if (m_volatile_attribute_storage) m_current_frame->buffer_views_to_clean.push_back(std::move(m_volatile_attribute_storage)); diff --git a/rpcs3/Emu/RSX/rsx_cache.h b/rpcs3/Emu/RSX/rsx_cache.h index 54f0bab7d2..5842a41159 100644 --- a/rpcs3/Emu/RSX/rsx_cache.h +++ b/rpcs3/Emu/RSX/rsx_cache.h @@ -35,7 +35,7 @@ namespace rsx static inline void memory_protect(const address_range& range, utils::protection prot) { - verify(HERE), range.is_page_range(); + ensure(range.is_page_range()); //rsx_log.error("memory_protect(0x%x, 0x%x, %x)", static_cast(range.start), static_cast(range.length()), static_cast(prot)); utils::memory_protect(vm::base(range.start), range.length(), prot); @@ -84,7 +84,7 @@ namespace rsx AUDIT( (locked_range.start == page_start(range.start)) || (locked_range.start == next_page(range.start)) ); AUDIT( locked_range.end <= page_end(range.end) ); - verify(HERE), locked_range.is_page_range(); + ensure(locked_range.is_page_range()); } public: @@ -94,7 +94,7 @@ namespace rsx void reset(const address_range &memory_range) { - verify(HERE), memory_range.valid() && locked == false; + ensure(memory_range.valid() && locked == false); cpu_range = address_range(memory_range); confirmed_range.invalidate(); @@ -121,7 +121,7 @@ namespace rsx { if (new_prot == protection && !force) return; - verify(HERE), locked_range.is_page_range(); + ensure(locked_range.is_page_range()); AUDIT( !confirmed_range.valid() || confirmed_range.inside(cpu_range) ); #ifdef TEXTURE_CACHE_DEBUG @@ -177,7 +177,7 @@ namespace rsx ASSERT(!locked || locked_range.inside(confirmed_range.to_page_range())); } - verify(HERE), confirmed_range.inside(cpu_range); + ensure(confirmed_range.inside(cpu_range)); init_lockable_range(confirmed_range); } @@ -544,7 +544,7 @@ namespace rsx } } - verify(HERE), processed == entry_count; + ensure(processed == entry_count); } public: diff --git a/rpcs3/Emu/RSX/rsx_methods.cpp b/rpcs3/Emu/RSX/rsx_methods.cpp index 6420fefa64..acd151d445 100644 --- a/rpcs3/Emu/RSX/rsx_methods.cpp +++ b/rpcs3/Emu/RSX/rsx_methods.cpp @@ -201,7 +201,7 @@ namespace rsx const u32 addr = rsx->iomap_table.get_addr(0xf100000 + (index * 0x40)); - verify(HERE), addr != umax; + ensure(addr != umax); vm::_ref>(addr).store( { @@ -267,7 +267,7 @@ namespace rsx static const size_t vertex_subreg = index % increment_per_array_index; const auto vtype = vertex_data_type_from_element_type::type; - verify(HERE), vtype != rsx::vertex_base_type::cmp; + ensure(vtype != rsx::vertex_base_type::cmp); switch (vtype) { @@ -1592,7 +1592,7 @@ namespace rsx void flip_command(thread* rsx, u32, u32 arg) { - verify(HERE), rsx->isHLE; + ensure(rsx->isHLE); rsx->reset(); rsx->request_emu_flip(arg); } diff --git a/rpcs3/Emu/RSX/rsx_methods.h b/rpcs3/Emu/RSX/rsx_methods.h index f935980f50..593485e5dd 100644 --- a/rpcs3/Emu/RSX/rsx_methods.h +++ b/rpcs3/Emu/RSX/rsx_methods.h @@ -123,7 +123,7 @@ namespace rsx void insert_command_barrier(command_barrier_type type, u32 arg) { - verify(HERE), !draw_command_ranges.empty(); + ensure(!draw_command_ranges.empty()); auto _do_barrier_insert = [this](barrier_t&& val) { @@ -234,7 +234,7 @@ namespace rsx { if (draw_command_ranges.empty()) { - verify(HERE), command == rsx::draw_command::inlined_array; + ensure(command == rsx::draw_command::inlined_array); return 0; } @@ -245,7 +245,7 @@ namespace rsx { if (draw_command_ranges.empty()) { - verify(HERE), command == rsx::draw_command::inlined_array; + ensure(command == rsx::draw_command::inlined_array); return 0; } @@ -259,11 +259,11 @@ namespace rsx if (draw_command_ranges.empty()) { - verify(HERE), !inline_vertex_array.empty(); + ensure(!inline_vertex_array.empty()); return true; } - verify(HERE), current_range_index != ~0u; + ensure(current_range_index != ~0u); for (const auto &barrier : draw_command_barriers) { if (barrier.draw_id != current_range_index) @@ -285,7 +285,7 @@ namespace rsx { if (draw_command_ranges.empty()) { - verify(HERE), !inline_vertex_array.empty(); + ensure(!inline_vertex_array.empty()); return 1u; } @@ -293,7 +293,7 @@ namespace rsx if (draw_command_ranges.back().count == 0) { // Dangling barrier - verify(HERE), count > 1; + ensure(count > 1); count--; } @@ -338,7 +338,7 @@ namespace rsx if (draw_command_ranges[current_range_index].count == 0) { // Dangling execution barrier - verify(HERE), current_range_index > 0 && (current_range_index + 1) == draw_command_ranges.size(); + ensure(current_range_index > 0 && (current_range_index + 1) == draw_command_ranges.size()); current_range_index = 0; return false; } @@ -351,7 +351,7 @@ namespace rsx */ void post_execute_cleanup() { - verify(HERE), current_range_index == 0; + ensure(current_range_index == 0); if (draw_command_ranges.size() > 1) { @@ -372,13 +372,13 @@ namespace rsx const draw_range_t& get_range() const { - verify(HERE), current_range_index < draw_command_ranges.size(); + ensure(current_range_index < draw_command_ranges.size()); return draw_command_ranges[current_range_index]; } simple_array get_subranges() const { - verify(HERE), !is_single_draw(); + ensure(!is_single_draw()); const auto range = get_range(); const auto limit = range.first + range.count; @@ -407,7 +407,8 @@ namespace rsx vertex_counter += count; } - verify(HERE), !ret.empty(), previous_barrier < limit; + ensure(!ret.empty()); + ensure(previous_barrier < limit); ret.push_back({ 0, vertex_counter, limit - previous_barrier }); return ret; diff --git a/rpcs3/Emu/RSX/rsx_utils.h b/rpcs3/Emu/RSX/rsx_utils.h index 58d2b21fb9..1c200168ba 100644 --- a/rpcs3/Emu/RSX/rsx_utils.h +++ b/rpcs3/Emu/RSX/rsx_utils.h @@ -144,7 +144,7 @@ namespace rsx address_range get_memory_range() const { - verify(HERE), range.start == address; + ensure(range.start == address); return range; } }; @@ -1025,7 +1025,7 @@ namespace rsx if (_capacity >= size) return; - verify("realloc() failed!" HERE), _data = static_cast(std::realloc(_data, sizeof(Ty) * size)); + ensure(_data = static_cast(std::realloc(_data, sizeof(Ty) * size))); // "realloc() failed!" _capacity = size; } @@ -1057,7 +1057,7 @@ namespace rsx iterator insert(iterator pos, const Ty& val) { - verify(HERE), pos >= _data; + ensure(pos >= _data); const auto _loc = offset(pos); if (_size >= _capacity) @@ -1072,7 +1072,7 @@ namespace rsx return pos; } - verify(HERE), _loc < _size; + ensure(_loc < _size); const auto remaining = (_size - _loc); memmove(pos + 1, pos, remaining * sizeof(Ty)); @@ -1085,7 +1085,7 @@ namespace rsx iterator insert(iterator pos, Ty&& val) { - verify(HERE), pos >= _data; + ensure(pos >= _data); const auto _loc = offset(pos); if (_size >= _capacity) @@ -1100,7 +1100,7 @@ namespace rsx return pos; } - verify(HERE), _loc < _size; + ensure(_loc < _size); const u32 remaining = (_size - _loc); memmove(pos + 1, pos, remaining * sizeof(Ty)); diff --git a/rpcs3/Emu/System.cpp b/rpcs3/Emu/System.cpp index 459a7b3558..b29a8a304a 100644 --- a/rpcs3/Emu/System.cpp +++ b/rpcs3/Emu/System.cpp @@ -1074,7 +1074,7 @@ game_boot_result Emulator::Load(const std::string& title_id, bool add_only, bool // Force lib loading mode g_cfg.core.lib_loading.from_string("Manually load selected libraries"); - verify(HERE), g_cfg.core.lib_loading == lib_loading_type::manual; + ensure(g_cfg.core.lib_loading == lib_loading_type::manual); g_cfg.core.load_libraries.from_default(); // Fake arg (workaround) @@ -1180,7 +1180,7 @@ game_boot_result Emulator::Load(const std::string& title_id, bool add_only, bool else { // Workaround for analyser glitches - verify(HERE), vm::falloc(0x10000, 0xf0000, vm::main); + ensure(vm::falloc(0x10000, 0xf0000, vm::main)); } atomic_t fnext = 0; diff --git a/rpcs3/Emu/VFS.cpp b/rpcs3/Emu/VFS.cpp index 4898b91c5d..d6a1de92eb 100644 --- a/rpcs3/Emu/VFS.cpp +++ b/rpcs3/Emu/VFS.cpp @@ -727,7 +727,7 @@ bool vfs::host::rename(const std::string& from, const std::string& to, const lv2 { if (check_path(fs::escape_path(file.real_path))) { - verify(HERE), file.mp == mp; + ensure(file.mp == mp); file.restore_data.seek_pos = file.file.pos(); file.file.close(); // Actually close it! } @@ -767,7 +767,7 @@ bool vfs::host::rename(const std::string& from, const std::string& to, const lv2 // Reopen with ignored TRUNC, APPEND, CREATE and EXCL flags auto res0 = lv2_file::open_raw(file.real_path, file.flags & CELL_FS_O_ACCMODE, file.mode, file.type, file.mp); file.file = std::move(res0.file); - verify(HERE), file.file.operator bool(); + ensure(file.file.operator bool()); file.file.seek(file.restore_data.seek_pos); } }); diff --git a/rpcs3/Input/pad_thread.h b/rpcs3/Input/pad_thread.h index c4c1c5b5a0..43b197abe4 100644 --- a/rpcs3/Input/pad_thread.h +++ b/rpcs3/Input/pad_thread.h @@ -57,10 +57,10 @@ namespace pad { if (relaxed) { - return g_current.load(); + return g_current.observe(); } - return verify(HERE, g_current.load()); + return ensure(g_current.load()); } static inline void set_enabled(bool enabled) diff --git a/rpcs3/Loader/PSF.cpp b/rpcs3/Loader/PSF.cpp index 690b6b9792..349b68c65b 100644 --- a/rpcs3/Loader/PSF.cpp +++ b/rpcs3/Loader/PSF.cpp @@ -45,7 +45,8 @@ namespace psf , m_max_size(max_size) , m_value_string(value) { - verify(HERE), type == format::string || type == format::array, max_size; + ensure(type == format::string || type == format::array); + ensure(max_size); } entry::entry(u32 value) @@ -61,26 +62,26 @@ namespace psf const std::string& entry::as_string() const { - verify(HERE), m_type == format::string || m_type == format::array; + ensure(m_type == format::string || m_type == format::array); return m_value_string; } u32 entry::as_integer() const { - verify(HERE), m_type == format::integer; + ensure(m_type == format::integer); return m_value_integer; } entry& entry::operator =(const std::string& value) { - verify(HERE), m_type == format::string || m_type == format::array; + ensure(m_type == format::string || m_type == format::array); m_value_string = value; return *this; } entry& entry::operator =(u32 value) { - verify(HERE), m_type == format::integer; + ensure(m_type == format::integer); m_value_integer = value; return *this; } @@ -112,39 +113,37 @@ namespace psf // Get header header_t header; - verify(HERE), stream.read(header); + ensure(stream.read(header)); // Check magic and version - verify(HERE), - header.magic == "\0PSF"_u32, - header.version == 0x101u, - sizeof(header_t) + header.entries_num * sizeof(def_table_t) <= header.off_key_table, - header.off_key_table <= header.off_data_table, - header.off_data_table <= stream.size(); + ensure(header.magic == "\0PSF"_u32); + ensure(header.version == 0x101u); + ensure(sizeof(header_t) + header.entries_num * sizeof(def_table_t) <= header.off_key_table); + ensure(header.off_key_table <= header.off_data_table); + ensure(header.off_data_table <= stream.size()); // Get indices std::vector indices; - verify(HERE), stream.read(indices, header.entries_num); + ensure(stream.read(indices, header.entries_num)); // Get keys std::string keys; - verify(HERE), stream.seek(header.off_key_table) == header.off_key_table; - verify(HERE), stream.read(keys, header.off_data_table - header.off_key_table); + ensure(stream.seek(header.off_key_table) == header.off_key_table); + ensure(stream.read(keys, header.off_data_table - header.off_key_table)); // Load entries for (u32 i = 0; i < header.entries_num; ++i) { - verify(HERE), indices[i].key_off < header.off_data_table - header.off_key_table; + ensure(indices[i].key_off < header.off_data_table - header.off_key_table); // Get key name (null-terminated string) std::string key(keys.data() + indices[i].key_off); // Check entry - verify(HERE), - result.count(key) == 0, - indices[i].param_len <= indices[i].param_max, - indices[i].data_off < stream.size() - header.off_data_table, - indices[i].param_max < stream.size() - indices[i].data_off; + ensure(result.count(key) == 0); + ensure(indices[i].param_len <= indices[i].param_max); + ensure(indices[i].data_off < stream.size() - header.off_data_table); + ensure(indices[i].param_max < stream.size() - indices[i].data_off); // Seek data pointer stream.seek(header.off_data_table + indices[i].data_off); @@ -153,7 +152,7 @@ namespace psf { // Integer data le_t value; - verify(HERE), stream.read(value); + ensure(stream.read(value)); result.emplace(std::piecewise_construct, std::forward_as_tuple(std::move(key)), @@ -163,7 +162,7 @@ namespace psf { // String/array data std::string value; - verify(HERE), stream.read(value, indices[i].param_len); + ensure(stream.read(value, indices[i].param_len)); if (indices[i].param_fmt == format::string) { diff --git a/rpcs3/util/atomic.cpp b/rpcs3/util/atomic.cpp index f3f545299a..7f696a6a2d 100644 --- a/rpcs3/util/atomic.cpp +++ b/rpcs3/util/atomic.cpp @@ -357,7 +357,7 @@ namespace mtx.init(mtx); #endif - verify(HERE), !ptr_ref.exchange((iptr << 17) | 1); + ensure(!ptr_ref.exchange((iptr << 17) | 1)); } void destroy() @@ -650,7 +650,7 @@ static void cond_free(u32 cond_id, u32 tls_slot = -1) // Dereference, destroy on last ref const bool last = cond->ptr_ref.atomic_op([](u64& val) { - verify(HERE), val & s_ref_mask; + ensure(val & s_ref_mask); val--; @@ -963,7 +963,7 @@ void root_info::slot_free(std::uintptr_t iptr, atomic_t* slot, u32 tls_slot const u32 diff = static_cast(slot - _this->slots); - verify(HERE), slot == &_this->slots[diff]; + ensure(slot == &_this->slots[diff]); const u32 cond_id = slot->exchange(0); @@ -977,7 +977,7 @@ void root_info::slot_free(std::uintptr_t iptr, atomic_t* slot, u32 tls_slot // Reset reference counter and allocation bit in every slot curr->bits.atomic_op([&](slot_allocator& bits) { - verify(HERE), bits.ref--; + ensure(bits.ref--); if (_this == curr.current()) { @@ -1226,7 +1226,7 @@ atomic_wait_engine::wait(const void* data, u32 size, __m128i old_value, u64 time default: { SetLastError(status); - fmt::raw_verify_error("Unexpected NtWaitForAlertByThreadId result.", nullptr, 0); + ensure(false); // Unexpected result } } } @@ -1308,7 +1308,7 @@ __vectorcall #endif alert_sema(u32 cond_id, const void* data, u64 tid, u32 size, __m128i mask, __m128i phantom) { - verify(HERE), cond_id; + ensure(cond_id); const auto cond = s_cond_list + cond_id; diff --git a/rpcs3/util/shared_ptr.hpp b/rpcs3/util/shared_ptr.hpp index 2ec55d70be..35d5f98406 100644 --- a/rpcs3/util/shared_ptr.hpp +++ b/rpcs3/util/shared_ptr.hpp @@ -125,7 +125,7 @@ namespace stx single_ptr(single_ptr&& r) noexcept : m_ptr(r.m_ptr) { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); r.m_ptr = nullptr; } @@ -146,7 +146,7 @@ namespace stx template >> single_ptr& operator=(single_ptr&& r) noexcept { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); m_ptr = r.m_ptr; r.m_ptr = nullptr; return *this; @@ -219,7 +219,7 @@ namespace stx template (std::declval())), typename = std::enable_if_t>> explicit operator single_ptr() && noexcept { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); single_ptr r; r.m_ptr = static_cast(std::exchange(m_ptr, nullptr)); @@ -369,7 +369,7 @@ namespace stx shared_ptr(const shared_ptr& r) noexcept : m_ptr(r.m_ptr) { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); if (m_ptr) d()->refs++; } @@ -384,7 +384,7 @@ namespace stx shared_ptr(shared_ptr&& r) noexcept : m_ptr(r.m_ptr) { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); r.m_ptr = nullptr; } @@ -392,7 +392,7 @@ namespace stx shared_ptr(single_ptr&& r) noexcept : m_ptr(r.m_ptr) { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); r.m_ptr = nullptr; } @@ -410,7 +410,7 @@ namespace stx template >> shared_ptr& operator=(const shared_ptr& r) noexcept { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); shared_ptr(r).swap(*this); return *this; } @@ -424,7 +424,7 @@ namespace stx template >> shared_ptr& operator=(shared_ptr&& r) noexcept { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); shared_ptr(std::move(r)).swap(*this); return *this; } @@ -432,7 +432,7 @@ namespace stx template >> shared_ptr& operator=(single_ptr&& r) noexcept { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); shared_ptr(std::move(r)).swap(*this); return *this; } @@ -453,7 +453,7 @@ namespace stx template (std::declval())), typename = std::enable_if_t>> explicit operator single_ptr() && noexcept { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); const auto o = d(); @@ -540,7 +540,7 @@ namespace stx template (std::declval())), typename = std::enable_if_t>> explicit operator shared_ptr() const& noexcept { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); if (m_ptr) { @@ -556,7 +556,7 @@ namespace stx template (std::declval())), typename = std::enable_if_t>> explicit operator shared_ptr() && noexcept { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); shared_ptr r; r.m_ptr = static_cast(std::exchange(m_ptr, nullptr)); @@ -614,7 +614,7 @@ namespace stx atomic_ptr(const shared_ptr& r) noexcept : m_val(reinterpret_cast(r.m_ptr) << c_ref_size) { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); // Obtain a ref + as many refs as an atomic_ptr can additionally reference if (m_val) @@ -625,7 +625,7 @@ namespace stx atomic_ptr(shared_ptr&& r) noexcept : m_val(reinterpret_cast(r.m_ptr) << c_ref_size) { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); r.m_ptr = nullptr; if (m_val) @@ -636,7 +636,7 @@ namespace stx atomic_ptr(single_ptr&& r) noexcept : m_val(reinterpret_cast(r.m_ptr) << c_ref_size) { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); r.m_ptr = nullptr; if (m_val) @@ -668,7 +668,7 @@ namespace stx template >> atomic_ptr& operator=(const shared_ptr& r) noexcept { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); store(r); return *this; } @@ -676,7 +676,7 @@ namespace stx template >> atomic_ptr& operator=(shared_ptr&& r) noexcept { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); store(std::move(r)); return *this; } @@ -684,7 +684,7 @@ namespace stx template >> atomic_ptr& operator=(single_ptr&& r) noexcept { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); store(std::move(r)); return *this; } @@ -948,7 +948,7 @@ namespace stx template >> shared_type compare_and_swap(const shared_ptr& cmp, shared_type exch) { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); shared_type old = cmp; @@ -966,7 +966,7 @@ namespace stx template >> bool compare_and_swap_test(const shared_ptr& cmp, shared_type exch) { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); const uptr _old = reinterpret_cast(cmp.m_ptr); const uptr _new = reinterpret_cast(exch.m_ptr); @@ -1007,7 +1007,7 @@ namespace stx template >> shared_type compare_and_swap(const single_ptr& cmp, shared_type exch) { - verify(HERE), is_same_ptr(); + ensure(is_same_ptr()); shared_type old = cmp; diff --git a/rpcs3/util/vm_native.cpp b/rpcs3/util/vm_native.cpp index 44cafa20c6..5b9992d554 100644 --- a/rpcs3/util/vm_native.cpp +++ b/rpcs3/util/vm_native.cpp @@ -122,25 +122,25 @@ namespace utils void memory_commit(void* pointer, std::size_t size, protection prot) { #ifdef _WIN32 - verify(HERE), ::VirtualAlloc(pointer, size, MEM_COMMIT, +prot); + ensure(::VirtualAlloc(pointer, size, MEM_COMMIT, +prot)); #else const u64 ptr64 = reinterpret_cast(pointer); - verify(HERE), ::mprotect(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), +prot) != -1; - verify(HERE), ::madvise(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), MADV_WILLNEED) != -1; + ensure(::mprotect(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), +prot) != -1); + ensure(::madvise(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), MADV_WILLNEED) != -1); #endif } void memory_decommit(void* pointer, std::size_t size) { #ifdef _WIN32 - verify(HERE), ::VirtualFree(pointer, size, MEM_DECOMMIT); + ensure(::VirtualFree(pointer, size, MEM_DECOMMIT)); #else const u64 ptr64 = reinterpret_cast(pointer); - verify(HERE), ::mmap(pointer, size, PROT_NONE, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast(-1); + ensure(::mmap(pointer, size, PROT_NONE, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast(-1)); #ifdef MADV_FREE - verify(HERE), ::madvise(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), MADV_FREE) != -1; + ensure(::madvise(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), MADV_FREE) != -1); #else - verify(HERE), ::madvise(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), MADV_DONTNEED) != -1; + ensure(::madvise(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), MADV_DONTNEED) != -1); #endif #endif } @@ -153,12 +153,12 @@ namespace utils #else const u64 ptr64 = reinterpret_cast(pointer); #ifdef MADV_FREE - verify(HERE), ::madvise(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), MADV_FREE) != -1; + ensure(::madvise(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), MADV_FREE) != -1); #else - verify(HERE), ::madvise(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), MADV_DONTNEED) != -1; + ensure(::madvise(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), MADV_DONTNEED) != -1); #endif - verify(HERE), ::mmap(pointer, size, +prot, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast(-1); - verify(HERE), ::madvise(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), MADV_WILLNEED) != -1; + ensure(::mmap(pointer, size, +prot, MAP_FIXED | MAP_ANON | MAP_PRIVATE, -1, 0) != reinterpret_cast(-1)); + ensure(::madvise(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), MADV_WILLNEED) != -1); #ifdef MADV_HUGEPAGE if (size % 0x200000 == 0) @@ -170,9 +170,9 @@ namespace utils void memory_release(void* pointer, std::size_t size) { #ifdef _WIN32 - verify(HERE), ::VirtualFree(pointer, 0, MEM_RELEASE); + ensure(::VirtualFree(pointer, 0, MEM_RELEASE)); #else - verify(HERE), ::munmap(pointer, size) != -1; + ensure(::munmap(pointer, size) != -1); #endif } @@ -195,7 +195,7 @@ namespace utils } #else const u64 ptr64 = reinterpret_cast(pointer); - verify(HERE), ::mprotect(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), +prot) != -1; + ensure(::mprotect(reinterpret_cast(ptr64 & -4096), size + (ptr64 & 4095), +prot) != -1); #endif } @@ -215,7 +215,7 @@ namespace utils { #ifdef _WIN32 m_handle = ::CreateFileMappingW(INVALID_HANDLE_VALUE, NULL, PAGE_EXECUTE_READWRITE, 0, m_size, NULL); - verify(HERE), m_handle != INVALID_HANDLE_VALUE; + ensure(m_handle != INVALID_HANDLE_VALUE); #elif __linux__ m_file = -1; #ifdef MFD_HUGETLB @@ -230,8 +230,8 @@ namespace utils m_file = ::memfd_create_("", 0); } - verify(HERE), m_file >= 0; - verify(HERE), ::ftruncate(m_file, m_size) >= 0; + ensure(m_file >= 0); + ensure(::ftruncate(m_file, m_size) >= 0); #else const std::string name = "/rpcs3-mem-" + std::to_string(reinterpret_cast(this)); @@ -242,11 +242,11 @@ namespace utils fmt::throw_exception("Too many open files. Raise the limit and try again."); } - verify(HERE), errno == EEXIST; + ensure(errno == EEXIST); } - verify(HERE), ::shm_unlink(name.c_str()) >= 0; - verify(HERE), ::ftruncate(m_file, m_size) >= 0; + ensure(::shm_unlink(name.c_str()) >= 0); + ensure(::ftruncate(m_file, m_size) >= 0); #endif } @@ -312,12 +312,12 @@ namespace utils // Now cleanup remnants if (aligned > res64) { - verify(HERE), ::munmap(reinterpret_cast(res64), aligned - res64) == 0; + ensure(::munmap(reinterpret_cast(res64), aligned - res64) == 0); } if (aligned < res64 + 0xf000) { - verify(HERE), ::munmap(reinterpret_cast(aligned + m_size), (res64 + 0xf000) - (aligned)) == 0; + ensure(::munmap(reinterpret_cast(aligned + m_size), (res64 + 0xf000) - (aligned)) == 0); } return reinterpret_cast(result);