diff --git a/.gitignore b/.gitignore index c15ad42a..2584b70d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,7 +1,8 @@ build -cmake-build-* +cmake-build* .DS_Store - +.vscode +.vs *.swp *.kdev4 diff --git a/CMakeLists.txt b/CMakeLists.txt index 33e09423..a85d880d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,39 +4,6 @@ project(Sunshine) set(CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}) -# On MSYS2, building a stand-alone binary that links with ffmpeg is not possible, -# Therefore, ffmpeg, libx264 and libx265 must be build from source -if(WIN32) - file( - DOWNLOAD "https://github.com/TheElixZammuto/sunshine-prebuilt/releases/download/1.0.0/pre-compiled.zip" "${CMAKE_CURRENT_BINARY_DIR}/pre-compiled.zip" - TIMEOUT 60 - EXPECTED_HASH SHA256=5d59986bd7f619eaaf82b2dd56b5127b747c9cbe8db61e3b898ff6b485298ed6) - - file(ARCHIVE_EXTRACT - INPUT "${CMAKE_CURRENT_BINARY_DIR}/pre-compiled.zip" - DESTINATION ${CMAKE_CURRENT_BINARY_DIR}/pre-compiled) - set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} -static") - - if(NOT DEFINED SUNSHINE_PREPARED_BINARIES) - set(SUNSHINE_PREPARED_BINARIES "${CMAKE_CURRENT_BINARY_DIR}/pre-compiled/windows") - endif() - - set(FFMPEG_INCLUDE_DIRS - ${SUNSHINE_PREPARED_BINARIES}/include) - set(FFMPEG_LIBRARIES - ${SUNSHINE_PREPARED_BINARIES}/lib/libavcodec.a - ${SUNSHINE_PREPARED_BINARIES}/lib/libavdevice.a - ${SUNSHINE_PREPARED_BINARIES}/lib/libavfilter.a - ${SUNSHINE_PREPARED_BINARIES}/lib/libavformat.a - ${SUNSHINE_PREPARED_BINARIES}/lib/libavutil.a - ${SUNSHINE_PREPARED_BINARIES}/lib/libpostproc.a - ${SUNSHINE_PREPARED_BINARIES}/lib/libswresample.a - ${SUNSHINE_PREPARED_BINARIES}/lib/libswscale.a - ${SUNSHINE_PREPARED_BINARIES}/lib/libx264.a - ${SUNSHINE_PREPARED_BINARIES}/lib/libx265.a - ${SUNSHINE_PREPARED_BINARIES}/lib/libhdr10plus.a - z lzma bcrypt libiconv.a) -endif() add_subdirectory(Simple-Web-Server) add_subdirectory(moonlight-common-c/enet) @@ -108,7 +75,7 @@ if(WIN32) wsock32 ws2_32 iphlpapi - d3d11 dxgi + d3d11 dxgi D3DCompiler setupapi ) diff --git a/assets/shaders/ConvertUVPS.hlsl b/assets/shaders/ConvertUVPS.hlsl new file mode 100644 index 00000000..2b72cddf --- /dev/null +++ b/assets/shaders/ConvertUVPS.hlsl @@ -0,0 +1,33 @@ +Texture2D image : register(t0); + +SamplerState def_sampler : register(s0); + +struct FragTexWide { + float3 uuv : TEXCOORD0; +}; + +cbuffer ColorMatrix : register(b0) { + float4 color_vec_y; + float4 color_vec_u; + float4 color_vec_v; + float2 range_y; + float2 range_uv; +}; + +//-------------------------------------------------------------------------------------- +// Pixel Shader +//-------------------------------------------------------------------------------------- +float2 main_ps(FragTexWide input) : SV_Target +{ + float3 rgb_left = image.Sample(def_sampler, input.uuv.xz).rgb; + float3 rgb_right = image.Sample(def_sampler, input.uuv.yz).rgb; + float3 rgb = (rgb_left + rgb_right) * 0.5; + + float u = dot(color_vec_u.xyz, rgb) + color_vec_u.w; + float v = dot(color_vec_v.xyz, rgb) + color_vec_v.w; + + u = u * range_uv.x + range_uv.y; + v = v * range_uv.x + range_uv.y; + + return float2(u, v * 224.0f/256.0f + 0.0625); +} \ No newline at end of file diff --git a/assets/shaders/ConvertUVVS.hlsl b/assets/shaders/ConvertUVVS.hlsl new file mode 100644 index 00000000..66d97d43 --- /dev/null +++ b/assets/shaders/ConvertUVVS.hlsl @@ -0,0 +1,29 @@ +struct VertTexPosWide { + float3 uuv : TEXCOORD; + float4 pos : SV_POSITION; +}; + +cbuffer info : register(b0) { + float width_i; +}; + +//-------------------------------------------------------------------------------------- +// Vertex Shader +//-------------------------------------------------------------------------------------- +VertTexPosWide main_vs(uint vI : SV_VERTEXID) +{ + float idHigh = float(vI >> 1); + float idLow = float(vI & uint(1)); + + float x = idHigh * 4.0 - 1.0; + float y = idLow * 4.0 - 1.0; + + float u_right = idHigh * 2.0; + float u_left = u_right - width_i; + float v = 1.0 - idLow * 2.0; + + VertTexPosWide vert_out; + vert_out.uuv = float3(u_left, u_right, v); + vert_out.pos = float4(x, y, 0.0, 1.0); + return vert_out; +} \ No newline at end of file diff --git a/assets/shaders/ConvertYPS.hlsl b/assets/shaders/ConvertYPS.hlsl new file mode 100644 index 00000000..386133c8 --- /dev/null +++ b/assets/shaders/ConvertYPS.hlsl @@ -0,0 +1,25 @@ +Texture2D image : register(t0); + +SamplerState def_sampler : register(s0); + +cbuffer ColorMatrix : register(b0) { + float4 color_vec_y; + float4 color_vec_u; + float4 color_vec_v; + float2 range_y; + float2 range_uv; +}; + +struct PS_INPUT +{ + float4 pos : SV_POSITION; + float2 tex : TEXCOORD; +}; + +float main_ps(PS_INPUT frag_in) : SV_Target +{ + float3 rgb = image.Sample(def_sampler, frag_in.tex, 0).rgb; + float y = dot(color_vec_y.xyz, rgb); + + return y * range_y.x + range_y.y; +} \ No newline at end of file diff --git a/assets/shaders/ScenePS.hlsl b/assets/shaders/ScenePS.hlsl new file mode 100644 index 00000000..aa601231 --- /dev/null +++ b/assets/shaders/ScenePS.hlsl @@ -0,0 +1,14 @@ +Texture2D image : register(t0); + +SamplerState def_sampler : register(s0); + +struct PS_INPUT +{ + float4 pos : SV_POSITION; + float2 tex : TEXCOORD; +}; + +float4 main_ps(PS_INPUT frag_in) : SV_Target +{ + return image.Sample(def_sampler, frag_in.tex, 0); +} \ No newline at end of file diff --git a/assets/shaders/SceneVS.hlsl b/assets/shaders/SceneVS.hlsl new file mode 100644 index 00000000..51319ddb --- /dev/null +++ b/assets/shaders/SceneVS.hlsl @@ -0,0 +1,22 @@ +struct PS_INPUT +{ + float4 pos : SV_POSITION; + float2 tex : TEXCOORD; +}; + +PS_INPUT main_vs(uint vI : SV_VERTEXID) +{ + float idHigh = float(vI >> 1); + float idLow = float(vI & uint(1)); + + float x = idHigh * 4.0 - 1.0; + float y = idLow * 4.0 - 1.0; + + float u = idHigh * 2.0; + float v = 1.0 - idLow * 2.0; + + PS_INPUT vert_out; + vert_out.pos = float4(x, y, 0.0, 1.0); + vert_out.tex = float2(u, v); + return vert_out; +} \ No newline at end of file diff --git a/assets/sunshine.conf b/assets/sunshine.conf index 356acd79..12377eda 100644 --- a/assets/sunshine.conf +++ b/assets/sunshine.conf @@ -183,13 +183,11 @@ # amd_preset = balanced # ####### rate control ##### -# auto -- let ffmpeg decide rate control -# constqp -- constant QP mode -# vbr -- variable bitrate -# cbr -- constant bitrate -# cbr_hq -- cbr high quality -# cbr_ld_hq -- cbr low delay high quality -# vbr_hq -- vbr high quality +# auto -- let ffmpeg decide rate control +# constqp -- constant QP mode +# vbr_latency -- Latency Constrained Variable Bitrate +# vbr_peak -- Peak Contrained Variable Bitrate +# cbr -- constant bitrate ########################## # amd_rc = auto diff --git a/sunshine/config.cpp b/sunshine/config.cpp index 88cc4502..9408578c 100644 --- a/sunshine/config.cpp +++ b/sunshine/config.cpp @@ -94,12 +94,10 @@ enum quality_e : int { }; enum rc_e : int { - constqp = 0x0, /**< Constant QP mode */ - vbr = 0x1, /**< Variable bitrate mode */ - cbr = 0x2, /**< Constant bitrate mode */ - cbr_ld_hq = 0x8, /**< low-delay CBR, high quality */ - cbr_hq = 0x10, /**< CBR, high quality (slower) */ - vbr_hq = 0x20 /**< VBR, high quality (slower) */ + constqp, /**< Constant QP mode */ + vbr_latency, /**< Latency Constrained Variable Bitrate */ + vbr_peak, /**< Peak Contrained Variable Bitrate */ + cbr, /**< Constant bitrate mode */ }; enum coder_e : int { @@ -121,11 +119,9 @@ std::optional quality_from_view(const std::string_view &quality) { std::optional rc_from_view(const std::string_view &rc) { #define _CONVERT_(x) if(rc == #x##sv) return x _CONVERT_(constqp); - _CONVERT_(vbr); + _CONVERT_(vbr_latency); + _CONVERT_(vbr_peak); _CONVERT_(cbr); - _CONVERT_(cbr_hq); - _CONVERT_(vbr_hq); - _CONVERT_(cbr_ld_hq); #undef _CONVERT_ return std::nullopt; } diff --git a/sunshine/crypto.cpp b/sunshine/crypto.cpp index 70a98289..398c8e09 100644 --- a/sunshine/crypto.cpp +++ b/sunshine/crypto.cpp @@ -187,10 +187,10 @@ x509_t x509(const std::string_view &x) { BIO_write(io.get(), x.data(), x.size()); - X509 *p = nullptr; + x509_t p; PEM_read_bio_X509(io.get(), &p, nullptr, nullptr); - return x509_t { p }; + return p; } pkey_t pkey(const std::string_view &k) { @@ -198,10 +198,10 @@ pkey_t pkey(const std::string_view &k) { BIO_write(io.get(), k.data(), k.size()); - EVP_PKEY *p = nullptr; + pkey_t p = nullptr; PEM_read_bio_PrivateKey(io.get(), &p, nullptr, nullptr); - return pkey_t { p }; + return p; } std::string pem(x509_t &x509) { diff --git a/sunshine/input.cpp b/sunshine/input.cpp index 402d4ce3..9a41dd03 100644 --- a/sunshine/input.cpp +++ b/sunshine/input.cpp @@ -402,11 +402,18 @@ void passthrough(std::shared_ptr &input, std::vector &&in task_pool.push(passthrough_helper, input, util::cmove(input_data)); } -void reset(){ - for(auto& kp : key_press){ +void reset() { + if(task_id) { + task_pool.cancel(task_id); + } + + // Ensure input is synchronous + task_pool.push([]() { + for(auto& kp : key_press) { platf::keyboard(platf_input, kp.first & 0x00FF, true); key_press[kp.first] = false; } + }); } void init() { diff --git a/sunshine/main.cpp b/sunshine/main.cpp index 2938e69f..15248deb 100644 --- a/sunshine/main.cpp +++ b/sunshine/main.cpp @@ -128,6 +128,10 @@ int main(int argc, char *argv[]) { proc::refresh(config::stream.file_apps); auto deinit_guard = platf::init(); + if(!deinit_guard) { + return 4; + } + input::init(); reed_solomon_init(); if(video::init()) { @@ -141,6 +145,8 @@ int main(int argc, char *argv[]) { stream::rtpThread(shutdown_event); httpThread.join(); + task_pool.stop(); + task_pool.join(); return 0; } diff --git a/sunshine/nvhttp.cpp b/sunshine/nvhttp.cpp index da402706..98f5a79d 100644 --- a/sunshine/nvhttp.cpp +++ b/sunshine/nvhttp.cpp @@ -846,8 +846,22 @@ void start(std::shared_ptr shutdown_event) { return; } - std::thread ssl { &https_server_t::accept_and_run, &https_server }; - std::thread tcp { &http_server_t::accept_and_run, &http_server }; + auto accept_and_run = [&](auto *http_server) { + try { + http_server->accept_and_run(); + } catch(boost::system::system_error &err) { + // It's possible the exception gets thrown after calling http_server->stop() from a different thread + if(shutdown_event->peek()) { + return; + } + + BOOST_LOG(fatal) << "Couldn't start http server to ports ["sv << PORT_HTTPS << ", "sv << PORT_HTTP << "]: "sv << err.what(); + shutdown_event->raise(true); + return; + } + }; + std::thread ssl { accept_and_run, &https_server }; + std::thread tcp { accept_and_run, &http_server }; // Wait for any event shutdown_event->view(); diff --git a/sunshine/platform/common.h b/sunshine/platform/common.h index 140b54b1..fa6b3be7 100644 --- a/sunshine/platform/common.h +++ b/sunshine/platform/common.h @@ -43,6 +43,21 @@ enum class pix_fmt_e { unknown }; +inline std::string_view from_pix_fmt(pix_fmt_e pix_fmt) { +using namespace std::literals; +#define _CONVERT(x) case pix_fmt_e:: x : return #x ## sv + switch(pix_fmt) { + _CONVERT(yuv420p); + _CONVERT(yuv420p10); + _CONVERT(nv12); + _CONVERT(p010); + _CONVERT(unknown); + } +#undef _CONVERT + + return "unknown"sv; +} + struct gamepad_state_t { std::uint16_t buttonFlags; std::uint8_t lt; diff --git a/sunshine/platform/linux/input.cpp b/sunshine/platform/linux/input.cpp index f5696522..6bb5d495 100644 --- a/sunshine/platform/linux/input.cpp +++ b/sunshine/platform/linux/input.cpp @@ -502,5 +502,5 @@ void freeInput(void *p) { delete input; } -std::unique_ptr init() { return nullptr; } +std::unique_ptr init() { return std::make_unique(); } } diff --git a/sunshine/platform/windows/audio.cpp b/sunshine/platform/windows/audio.cpp index a71e4a77..63369ccb 100644 --- a/sunshine/platform/windows/audio.cpp +++ b/sunshine/platform/windows/audio.cpp @@ -81,50 +81,43 @@ public: HRESULT status; - device_enum_t::pointer device_enum_p{}; status = CoCreateInstance( CLSID_MMDeviceEnumerator, nullptr, CLSCTX_ALL, IID_IMMDeviceEnumerator, - (void **) &device_enum_p); - device_enum.reset(device_enum_p); + (void **) &device_enum); - if (FAILED(status)) { + if(FAILED(status)) { BOOST_LOG(error) << "Couldn't create Device Enumerator [0x"sv << util::hex(status).to_string_view() << ']'; return -1; } - device_t::pointer device_p{}; - if(config::audio.sink.empty()) { status = device_enum->GetDefaultAudioEndpoint( eRender, eConsole, - &device_p); + &device); } else { std::wstring_convert, wchar_t> converter; auto wstring_device_id = converter.from_bytes(config::audio.sink); - status = device_enum->GetDevice(wstring_device_id.c_str(), &device_p); + status = device_enum->GetDevice(wstring_device_id.c_str(), &device); } - device.reset(device_p); - if (FAILED(status)) { + if(FAILED(status)) { BOOST_LOG(error) << "Couldn't create audio Device [0x"sv << util::hex(status).to_string_view() << ']'; return -1; } - audio_client_t::pointer audio_client_p{}; status = device->Activate( IID_IAudioClient, CLSCTX_ALL, nullptr, - (void **) &audio_client_p); - audio_client.reset(audio_client_p); + (void **) &audio_client); if (FAILED(status)) { BOOST_LOG(error) << "Couldn't activate audio Device [0x"sv << util::hex(status).to_string_view() << ']'; @@ -132,11 +125,8 @@ public: return -1; } - wave_format_t::pointer wave_format_p{}; - status = audio_client->GetMixFormat(&wave_format_p); - wave_format.reset(wave_format_p); - - if (FAILED(status)) { + status = audio_client->GetMixFormat(&wave_format); + if(FAILED(status)) { BOOST_LOG(error) << "Couldn't acquire Wave Format [0x"sv << util::hex(status).to_string_view() << ']'; return -1; @@ -198,9 +188,7 @@ public: sample_buf = util::buffer_t { frames }; sample_buf_pos = std::begin(sample_buf); - audio_capture_t::pointer audio_capture_p {}; - status = audio_client->GetService(IID_IAudioCaptureClient, (void**)&audio_capture_p); - audio_capture.reset(audio_capture_p); + status = audio_client->GetService(IID_IAudioCaptureClient, (void**)&audio_capture); if (FAILED(status)) { BOOST_LOG(error) << "Couldn't initialize audio capture client [0x"sv << util::hex(status).to_string_view() << ']'; @@ -319,6 +307,12 @@ public: } namespace platf { + +// It's not big enough to justify it's own source file :/ +namespace dxgi { +int init(); +} + std::unique_ptr microphone(std::uint32_t sample_rate) { auto mic = std::make_unique(); @@ -330,6 +324,9 @@ std::unique_ptr microphone(std::uint32_t sample_rate) { } std::unique_ptr init() { + if(dxgi::init()) { + return nullptr; + } return std::make_unique(); } } diff --git a/sunshine/platform/windows/display.h b/sunshine/platform/windows/display.h index 039e6a75..8bc7ae9e 100644 --- a/sunshine/platform/windows/display.h +++ b/sunshine/platform/windows/display.h @@ -32,6 +32,7 @@ using output_t = util::safe_ptr>; using output1_t = util::safe_ptr>; using dup_t = util::safe_ptr>; using texture2d_t = util::safe_ptr>; +using texture1d_t = util::safe_ptr>; using resource_t = util::safe_ptr>; using multithread_t = util::safe_ptr>; diff --git a/sunshine/platform/windows/display_base.cpp b/sunshine/platform/windows/display_base.cpp index 29b53523..8644ed88 100644 --- a/sunshine/platform/windows/display_base.cpp +++ b/sunshine/platform/windows/display_base.cpp @@ -90,17 +90,10 @@ int display_base_t::init() { FreeLibrary(user32); }); */ - - dxgi::factory1_t::pointer factory_p {}; - dxgi::adapter_t::pointer adapter_p {}; - dxgi::output_t::pointer output_p {}; - dxgi::device_t::pointer device_p {}; - dxgi::device_ctx_t::pointer device_ctx_p {}; HRESULT status; - status = CreateDXGIFactory1(IID_IDXGIFactory1, (void**)&factory_p); - factory.reset(factory_p); + status = CreateDXGIFactory1(IID_IDXGIFactory1, (void**)&factory); if(FAILED(status)) { BOOST_LOG(error) << "Failed to create DXGIFactory1 [0x"sv << util::hex(status).to_string_view() << ']'; return -1; @@ -111,7 +104,8 @@ int display_base_t::init() { auto adapter_name = converter.from_bytes(config::video.adapter_name); auto output_name = converter.from_bytes(config::video.output_name); - for(int x = 0; factory_p->EnumAdapters1(x, &adapter_p) != DXGI_ERROR_NOT_FOUND; ++x) { + adapter_t::pointer adapter_p; + for(int x = 0; factory->EnumAdapters1(x, &adapter_p) != DXGI_ERROR_NOT_FOUND; ++x) { dxgi::adapter_t adapter_tmp { adapter_p }; DXGI_ADAPTER_DESC1 adapter_desc; @@ -121,8 +115,9 @@ int display_base_t::init() { continue; } + dxgi::output_t::pointer output_p; for(int y = 0; adapter_tmp->EnumOutputs(y, &output_p) != DXGI_ERROR_NOT_FOUND; ++y) { - dxgi::output_t output_tmp {output_p }; + dxgi::output_t output_tmp { output_p }; DXGI_OUTPUT_DESC desc; output_tmp->GetDesc(&desc); @@ -173,14 +168,12 @@ int display_base_t::init() { D3D11_CREATE_DEVICE_VIDEO_SUPPORT, featureLevels, sizeof(featureLevels) / sizeof(D3D_FEATURE_LEVEL), D3D11_SDK_VERSION, - &device_p, + &device, &feature_level, - &device_ctx_p); + &device_ctx); adapter_p->Release(); - device.reset(device_p); - device_ctx.reset(device_ctx_p); if(FAILED(status)) { BOOST_LOG(error) << "Failed to create D3D11 device [0x"sv << util::hex(status).to_string_view() << ']'; @@ -216,7 +209,7 @@ int display_base_t::init() { tp.Privileges[0].Attributes = SE_PRIVILEGE_ENABLED; if (!AdjustTokenPrivileges(token, false, &tp, sizeof(tp), NULL, NULL)) { - BOOST_LOG(error) << "Could not set privilege to increase GPU priority"; + BOOST_LOG(warning) << "Could not set privilege to increase GPU priority"; } } @@ -229,17 +222,15 @@ int display_base_t::init() { if (fn) { status = fn(GetCurrentProcess(), D3DKMT_SCHEDULINGPRIORITYCLASS_REALTIME); if (FAILED(status)) { - BOOST_LOG(error) << "Failed to set realtime GPU priority. Please run application as administrator for optimal performance."; + BOOST_LOG(warning) << "Failed to set realtime GPU priority. Please run application as administrator for optimal performance."; } } } - dxgi::dxgi_t::pointer dxgi_p {}; - status = device->QueryInterface(IID_IDXGIDevice, (void**)&dxgi_p); - dxgi::dxgi_t dxgi { dxgi_p }; - + dxgi::dxgi_t dxgi; + status = device->QueryInterface(IID_IDXGIDevice, (void**)&dxgi); if(FAILED(status)) { - BOOST_LOG(error) << "Failed to query DXGI interface from device [0x"sv << util::hex(status).to_string_view() << ']'; + BOOST_LOG(warning) << "Failed to query DXGI interface from device [0x"sv << util::hex(status).to_string_view() << ']'; return -1; } @@ -248,25 +239,24 @@ int display_base_t::init() { // Try to reduce latency { - dxgi::dxgi1_t::pointer dxgi_p {}; - status = device->QueryInterface(IID_IDXGIDevice, (void**)&dxgi_p); - dxgi::dxgi1_t dxgi { dxgi_p }; - + dxgi::dxgi1_t dxgi {}; + status = device->QueryInterface(IID_IDXGIDevice, (void**)&dxgi); if(FAILED(status)) { BOOST_LOG(error) << "Failed to query DXGI interface from device [0x"sv << util::hex(status).to_string_view() << ']'; return -1; } - dxgi->SetMaximumFrameLatency(1); + status = dxgi->SetMaximumFrameLatency(1); + if(FAILED(status)) { + BOOST_LOG(warning) << "Failed to set maximum frame latency [0x"sv << util::hex(status).to_string_view() << ']'; + } } //FIXME: Duplicate output on RX580 in combination with DOOM (2016) --> BSOD //TODO: Use IDXGIOutput5 for improved performance { - dxgi::output1_t::pointer output1_p {}; - status = output->QueryInterface(IID_IDXGIOutput1, (void**)&output1_p); - dxgi::output1_t output1 {output1_p }; - + dxgi::output1_t output1 {}; + status = output->QueryInterface(IID_IDXGIOutput1, (void**)&output1); if(FAILED(status)) { BOOST_LOG(error) << "Failed to query IDXGIOutput1 from the output"sv; return -1; @@ -274,10 +264,8 @@ int display_base_t::init() { // We try this twice, in case we still get an error on reinitialization for(int x = 0; x < 2; ++x) { - dxgi::dup_t::pointer dup_p {}; - status = output1->DuplicateOutput((IUnknown*)device.get(), &dup_p); + status = output1->DuplicateOutput((IUnknown*)device.get(), &dup.dup); if(SUCCEEDED(status)) { - dup.reset(dup_p); break; } std::this_thread::sleep_for(200ms); diff --git a/sunshine/platform/windows/display_ram.cpp b/sunshine/platform/windows/display_ram.cpp index 15d608fe..ffcbbd25 100644 --- a/sunshine/platform/windows/display_ram.cpp +++ b/sunshine/platform/windows/display_ram.cpp @@ -203,9 +203,8 @@ capture_e display_ram_t::snapshot(::platf::img_t *img_base, std::chrono::millise // If frame has been updated if (frame_info.LastPresentTime.QuadPart != 0) { { - texture2d_t::pointer src_p {}; - status = res->QueryInterface(IID_ID3D11Texture2D, (void **)&src_p); - texture2d_t src{src_p}; + texture2d_t src {}; + status = res->QueryInterface(IID_ID3D11Texture2D, (void **)&src); if (FAILED(status)) { BOOST_LOG(error) << "Couldn't query interface [0x"sv << util::hex(status).to_string_view() << ']'; @@ -279,10 +278,7 @@ int display_ram_t::init() { t.Format = format; t.CPUAccessFlags = D3D11_CPU_ACCESS_READ; - dxgi::texture2d_t::pointer tex_p {}; - auto status = device->CreateTexture2D(&t, nullptr, &tex_p); - - texture.reset(tex_p); + auto status = device->CreateTexture2D(&t, nullptr, &texture); if(FAILED(status)) { BOOST_LOG(error) << "Failed to create texture [0x"sv << util::hex(status).to_string_view() << ']'; diff --git a/sunshine/platform/windows/display_vram.cpp b/sunshine/platform/windows/display_vram.cpp index 0d87b40c..00e0c69d 100644 --- a/sunshine/platform/windows/display_vram.cpp +++ b/sunshine/platform/windows/display_vram.cpp @@ -1,14 +1,131 @@ +#include + +#include +#include + #include "sunshine/main.h" #include "display.h" +#define SUNSHINE_SHADERS_DIR SUNSHINE_ASSETS_DIR "/shaders" namespace platf { using namespace std::literals; } namespace platf::dxgi { +constexpr float aquamarine[] { 0.498039246f, 1.000000000f, 0.831372619f, 1.000000000f }; + +using input_layout_t = util::safe_ptr>; +using render_target_t = util::safe_ptr>; +using shader_res_t = util::safe_ptr>; +using buf_t = util::safe_ptr>; +using blend_t = util::safe_ptr>; +using raster_state_t = util::safe_ptr>; +using sampler_state_t = util::safe_ptr>; +using vs_t = util::safe_ptr>; +using ps_t = util::safe_ptr>; +using blob_t = util::safe_ptr>; +using depth_stencil_state_t = util::safe_ptr>; +using depth_stencil_view_t = util::safe_ptr>; + +using float4 = DirectX::XMFLOAT4; +using float3 = DirectX::XMFLOAT3; +using float2 = DirectX::XMFLOAT2; +struct __attribute__ ((__aligned__ (16))) color_t { + float4 color_vec_y; + float4 color_vec_u; + float4 color_vec_v; + float2 range_y; + float2 range_uv; +}; + +color_t make_color_matrix(float Cr, float Cb, float U_max, float V_max, float add_Y, float add_UV, float2 range_Y, float2 range_UV) { + float Cg = 1.0f - Cr - Cb; + + float Cr_i = 1.0f - Cr; + float Cb_i = 1.0f - Cb; + + float shift_y = range_Y.x / 256.0f; + float shift_uv = range_UV.x / 256.0f; + + float scale_y = (range_Y.y - range_Y.x) / 256.0f; + float scale_uv = (range_UV.y - range_UV.x) / 256.0f; + return { + { Cr, Cg, Cb, add_Y }, + { -(Cr * U_max / Cb_i), -(Cg * U_max / Cb_i), U_max, add_UV }, + { V_max, -(Cg * V_max / Cr_i), -(Cb * V_max / Cr_i), add_UV }, + { scale_y, shift_y }, + { scale_uv, shift_uv }, + }; +} + +color_t colors[] { + make_color_matrix(0.299f, 0.114f, 0.436f, 0.615f, 0.0625, 0.5f, { 16.0f, 235.0f }, { 16.0f, 240.0f }), // BT601 MPEG + make_color_matrix(0.299f, 0.114f, 0.5f, 0.5f, 0.0f, 0.5f, { 0.0f, 255.0f }, { 0.0f, 255.0f }), // BT601 JPEG + make_color_matrix(0.2126f, 0.0722f, 0.436f, 0.615f, 0.0625, 0.5f, { 16.0f, 235.0f }, { 16.0f, 240.0f }), //BT701 MPEG + make_color_matrix(0.2126f, 0.0722f, 0.5f, 0.5f, 0.0f, 0.5f, { 0.0f, 255.0f }, { 0.0f, 255.0f }), //BT701 JPEG +}; + +template +buf_t make_buffer(device_t::pointer device, const T& t) { + static_assert(sizeof(T) % 16 == 0, "Buffer needs to be aligned on a 16-byte alignment"); + + D3D11_BUFFER_DESC buffer_desc { + sizeof(T), + D3D11_USAGE_IMMUTABLE, + D3D11_BIND_CONSTANT_BUFFER + }; + + D3D11_SUBRESOURCE_DATA init_data { + &t + }; + + buf_t::pointer buf_p; + auto status = device->CreateBuffer(&buffer_desc, &init_data, &buf_p); + if(status) { + BOOST_LOG(error) << "Failed to create buffer: [0x"sv << util::hex(status).to_string_view() << ']'; + return nullptr; + } + + return buf_t { buf_p }; +} + +blend_t make_blend(device_t::pointer device, bool enable) { + D3D11_BLEND_DESC bdesc {}; + auto &rt = bdesc.RenderTarget[0]; + rt.BlendEnable = enable; + rt.RenderTargetWriteMask = D3D11_COLOR_WRITE_ENABLE_ALL; + + if(enable) { + rt.BlendOp = D3D11_BLEND_OP_ADD; + rt.BlendOpAlpha = D3D11_BLEND_OP_ADD; + + rt.SrcBlend = D3D11_BLEND_SRC_ALPHA; + rt.DestBlend = D3D11_BLEND_INV_SRC_ALPHA; + + rt.SrcBlendAlpha = D3D11_BLEND_ZERO; + rt.DestBlendAlpha = D3D11_BLEND_ZERO; + } + + blend_t blend; + auto status = device->CreateBlendState(&bdesc, &blend); + if(status) { + BOOST_LOG(error) << "Failed to create blend state: [0x"sv << util::hex(status).to_string_view() << ']'; + return nullptr; + } + + return blend; +} + +blob_t convert_UV_vs_hlsl; +blob_t convert_UV_ps_hlsl; +blob_t scene_vs_hlsl; +blob_t convert_Y_ps_hlsl; +blob_t scene_ps_hlsl; + struct img_d3d_t : public platf::img_t { - std::shared_ptr display; + shader_res_t input_res; texture2d_t texture; + std::shared_ptr display; ~img_d3d_t() override = default; }; @@ -97,9 +214,43 @@ util::buffer_t make_cursor_image(util::buffer_t &&im return cursor_img; } +blob_t compile_shader(LPCSTR file, LPCSTR entrypoint, LPCSTR shader_model) { + blob_t::pointer msg_p = nullptr; + blob_t::pointer compiled_p; + + DWORD flags = D3DCOMPILE_ENABLE_STRICTNESS; + +#ifndef NDEBUG + flags |= D3DCOMPILE_DEBUG | D3DCOMPILE_SKIP_OPTIMIZATION; +#endif + std::wstring_convert, wchar_t> converter; + + auto wFile = converter.from_bytes(file); + auto status = D3DCompileFromFile(wFile.c_str(), nullptr, nullptr, entrypoint, shader_model, flags, 0, &compiled_p, &msg_p); + + if(msg_p) { + BOOST_LOG(warning) << std::string_view { (const char *)msg_p->GetBufferPointer(), msg_p->GetBufferSize() - 1 }; + msg_p->Release(); + } + + if(status) { + BOOST_LOG(error) << "Couldn't compile ["sv << file << "] [0x"sv << util::hex(status).to_string_view() << ']'; + return nullptr; + } + + return blob_t { compiled_p }; +} + +blob_t compile_pixel_shader(LPCSTR file) { + return compile_shader(file, "main_ps", "ps_5_0"); +} + +blob_t compile_vertex_shader(LPCSTR file) { + return compile_shader(file, "main_vs", "vs_5_0"); +} + class hwdevice_t : public platf::hwdevice_t { public: - hwdevice_t(std::vector *hwdevices_p) : hwdevices_p { hwdevices_p } {} hwdevice_t() = delete; @@ -110,81 +261,118 @@ public: return; } - LONG x = ((double)rel_x) * out_width / (double)in_width; - LONG y = ((double)rel_y) * out_height / (double)in_height; + auto x = ((float)rel_x); + auto y = ((float)rel_y); - // Ensure it's within bounds - auto left_out = std::min(out_width, std::max(0, x)); - auto top_out = std::min(out_height, std::max(0, y)); - auto right_out = std::max(0, std::min(out_width, x + cursor_scaled_width)); - auto bottom_out = std::max(0, std::min(out_height, y + cursor_scaled_height)); - - auto left_in = std::max(0, -rel_x); - auto top_in = std::max(0, -rel_y); - auto right_in = std::min(in_width - rel_x, cursor_width); - auto bottom_in = std::min(in_height - rel_y, cursor_height); - - RECT rect_in { left_in, top_in, right_in, bottom_in }; - RECT rect_out { left_out, top_out, right_out, bottom_out }; - - ctx->VideoProcessorSetStreamSourceRect(processor.get(), 1, TRUE, &rect_in); - ctx->VideoProcessorSetStreamDestRect(processor.get(), 1, TRUE, &rect_out); + cursor_view.TopLeftX = x; + cursor_view.TopLeftY = y; } int set_cursor_texture(texture2d_t::pointer texture, LONG width, LONG height) { - D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC input_desc = { 0, (D3D11_VPIV_DIMENSION)D3D11_VPIV_DIMENSION_TEXTURE2D, { 0, 0 } }; + auto device = (device_t::pointer)data; - video::processor_in_t::pointer processor_in_p; - auto status = device->CreateVideoProcessorInputView(texture, processor_e.get(), &input_desc, &processor_in_p); + cursor_view.Width = width; + cursor_view.Height = height; + + D3D11_SHADER_RESOURCE_VIEW_DESC desc { + DXGI_FORMAT_B8G8R8A8_UNORM, + D3D11_SRV_DIMENSION_TEXTURE2D + }; + desc.Texture2D.MipLevels = 1; + + auto status = device->CreateShaderResourceView(texture, &desc, &img.input_res); if(FAILED(status)) { - BOOST_LOG(error) << "Failed to create cursor VideoProcessorInputView [0x"sv << util::hex(status).to_string_view() << ']'; + BOOST_LOG(error) << "Failed to create cursor shader resource view [0x"sv << util::hex(status).to_string_view() << ']'; return -1; } - cursor_in.reset(processor_in_p); - - cursor_width = width; - cursor_height = height; - cursor_scaled_width = ((double)width) / in_width * out_width; - cursor_scaled_height = ((double)height) / in_height * out_height; - return 0; } int convert(platf::img_t &img_base) override { auto &img = (img_d3d_t&)img_base; - auto it = texture_to_processor_in.find(img.texture.get()); - if(it == std::end(texture_to_processor_in)) { - D3D11_VIDEO_PROCESSOR_INPUT_VIEW_DESC input_desc = { 0, (D3D11_VPIV_DIMENSION)D3D11_VPIV_DIMENSION_TEXTURE2D, { 0, 0 } }; + if(!img.input_res) { + auto device = (device_t::pointer)data; - video::processor_in_t::pointer processor_in_p; - auto status = device->CreateVideoProcessorInputView(img.texture.get(), processor_e.get(), &input_desc, &processor_in_p); + D3D11_SHADER_RESOURCE_VIEW_DESC desc { + DXGI_FORMAT_B8G8R8A8_UNORM, + D3D11_SRV_DIMENSION_TEXTURE2D + }; + desc.Texture2D.MipLevels = 1; + + auto status = device->CreateShaderResourceView(img.texture.get(), &desc, &img.input_res); if(FAILED(status)) { - BOOST_LOG(error) << "Failed to create VideoProcessorInputView [0x"sv << util::hex(status).to_string_view() << ']'; + BOOST_LOG(error) << "Failed to create input shader resource view [0x"sv << util::hex(status).to_string_view() << ']'; return -1; } - it = texture_to_processor_in.emplace(img.texture.get(), processor_in_p).first; } - auto &processor_in = it->second; - D3D11_VIDEO_PROCESSOR_STREAM stream[] { - { TRUE, 0, 0, 0, 0, nullptr, processor_in.get() }, - { TRUE, 0, 0, 0, 0, nullptr, cursor_in.get() } - }; + auto input_res_p = img.input_res.get(); - auto status = ctx->VideoProcessorBlt(processor.get(), processor_out.get(), 0, cursor_visible ? 2 : 1, stream); - if(FAILED(status)) { - BOOST_LOG(error) << "Failed size and color conversion [0x"sv << util::hex(status).to_string_view() << ']'; - return -1; + if(cursor_visible) { + _init_view_port(img.width, img.height); + + device_ctx_p->OMSetRenderTargets(1, &scene_rt, nullptr); + device_ctx_p->VSSetShader(scene_vs.get(), nullptr, 0); + device_ctx_p->PSSetShader(scene_ps.get(), nullptr, 0); + device_ctx_p->PSSetShaderResources(0, 1, &input_res_p); + + device_ctx_p->Draw(3, 0); + + device_ctx_p->OMSetBlendState(blend_enable.get(), nullptr, 0xFFFFFFFFu); + device_ctx_p->RSSetViewports(1, &cursor_view); + device_ctx_p->PSSetShaderResources(0, 1, &this->img.input_res); + device_ctx_p->Draw(3, 0); + device_ctx_p->OMSetBlendState(blend_disable.get(), nullptr, 0xFFFFFFFFu); + + input_res_p = scene_sr.get(); } + _init_view_port(out_width, out_height); + device_ctx_p->OMSetRenderTargets(1, &nv12_Y_rt, nullptr); + device_ctx_p->VSSetShader(scene_vs.get(), nullptr, 0); + device_ctx_p->PSSetShader(convert_Y_ps.get(), nullptr, 0); + device_ctx_p->PSSetShaderResources(0, 1, &input_res_p); + device_ctx_p->Draw(3, 0); + + _init_view_port(out_width / 2, out_height / 2); + device_ctx_p->OMSetRenderTargets(1, &nv12_UV_rt, nullptr); + device_ctx_p->VSSetShader(convert_UV_vs.get(), nullptr, 0); + device_ctx_p->PSSetShader(convert_UV_ps.get(), nullptr, 0); + device_ctx_p->PSSetShaderResources(0, 1, &input_res_p); + device_ctx_p->Draw(3, 0); + return 0; } void set_colorspace(std::uint32_t colorspace, std::uint32_t color_range) override { - colorspace |= (color_range >> 4); - ctx->VideoProcessorSetOutputColorSpace(processor.get(), (D3D11_VIDEO_PROCESSOR_COLOR_SPACE*)&colorspace); + switch (colorspace) { + case 5: // SWS_CS_SMPTE170M + color_p = &colors[0]; + break; + case 1: // SWS_CS_ITU709 + color_p = &colors[2]; + break; + case 9: // SWS_CS_BT2020 + default: + BOOST_LOG(warning) << "Colorspace: ["sv << colorspace << "] not yet supported: switching to default"sv; + color_p = &colors[0]; + }; + + if(color_range > 1) { + // Full range + ++color_p; + } + + auto color_matrix = make_buffer((device_t::pointer)data, *color_p); + if(!color_matrix) { + BOOST_LOG(warning) << "Failed to create color matrix"sv; + return; + } + + device_ctx_p->PSSetConstantBuffers(0, 1, &color_matrix); + this->color_matrix = std::move(color_matrix); } int init( @@ -194,62 +382,82 @@ public: ) { HRESULT status; + device_p->AddRef(); + data = device_p; + + this->device_ctx_p = device_ctx_p; + cursor_visible = false; + cursor_view.MinDepth = 0.0f; + cursor_view.MaxDepth = 1.0f; platf::hwdevice_t::img = &img; this->out_width = out_width; this->out_height = out_height; - this->in_width = in_width; - this->in_height = in_height; - video::device_t::pointer vdevice_p; - status = device_p->QueryInterface(IID_ID3D11VideoDevice, (void**)&vdevice_p); - if(FAILED(status)) { - BOOST_LOG(error) << "Failed to query ID3D11VideoDevice interface [0x"sv << util::hex(status).to_string_view() << ']'; + status = device_p->CreateVertexShader(scene_vs_hlsl->GetBufferPointer(), scene_vs_hlsl->GetBufferSize(), nullptr, &scene_vs); + if(status) { + BOOST_LOG(error) << "Failed to create scene vertex shader [0x"sv << util::hex(status).to_string_view() << ']'; return -1; } - device.reset(vdevice_p); - video::ctx_t::pointer ctx_p; - status = device_ctx_p->QueryInterface(IID_ID3D11VideoContext, (void**)&ctx_p); - if(FAILED(status)) { - BOOST_LOG(error) << "Failed to query ID3D11VideoContext interface [0x"sv << util::hex(status).to_string_view() << ']'; + status = device_p->CreatePixelShader(convert_Y_ps_hlsl->GetBufferPointer(), convert_Y_ps_hlsl->GetBufferSize(), nullptr, &convert_Y_ps); + if(status) { + BOOST_LOG(error) << "Failed to create convertY pixel shader [0x"sv << util::hex(status).to_string_view() << ']'; return -1; } - ctx.reset(ctx_p); - D3D11_VIDEO_PROCESSOR_CONTENT_DESC contentDesc { - D3D11_VIDEO_FRAME_FORMAT_PROGRESSIVE, - { 1, 1 }, (UINT)in_width, (UINT)in_height, - { 1, 1 }, (UINT)out_width, (UINT)out_height, - D3D11_VIDEO_USAGE_OPTIMAL_QUALITY + status = device_p->CreatePixelShader(convert_UV_ps_hlsl->GetBufferPointer(), convert_UV_ps_hlsl->GetBufferSize(), nullptr, &convert_UV_ps); + if(status) { + BOOST_LOG(error) << "Failed to create convertUV pixel shader [0x"sv << util::hex(status).to_string_view() << ']'; + return -1; + } + + status = device_p->CreateVertexShader(convert_UV_vs_hlsl->GetBufferPointer(), convert_UV_vs_hlsl->GetBufferSize(), nullptr, &convert_UV_vs); + if(status) { + BOOST_LOG(error) << "Failed to create convertUV vertex shader [0x"sv << util::hex(status).to_string_view() << ']'; + return -1; + } + + status = device_p->CreatePixelShader(scene_ps_hlsl->GetBufferPointer(), scene_ps_hlsl->GetBufferSize(), nullptr, &scene_ps); + if(status) { + BOOST_LOG(error) << "Failed to create scene pixel shader [0x"sv << util::hex(status).to_string_view() << ']'; + return -1; + } + + blend_disable = make_blend(device_p, false); + blend_enable = make_blend(device_p, true); + + if(!blend_disable || !blend_enable) { + return -1; + } + + if(_init_rt(scene_sr, scene_rt, in_width, in_height, DXGI_FORMAT_B8G8R8A8_UNORM)) { + return -1; + } + + color_matrix = make_buffer(device_p, colors[0]); + if(!color_matrix) { + BOOST_LOG(error) << "Failed to create color matrix buffer"sv; + return -1; + } + + float info_in[16 / sizeof(float)] { 1.0f / (float)out_width }; //aligned to 16-byte + info_scene = make_buffer(device_p, info_in); + if(!info_in) { + BOOST_LOG(error) << "Failed to create info scene buffer"sv; + return -1; + } + + D3D11_INPUT_ELEMENT_DESC layout_desc { + "SV_Position", 0, DXGI_FORMAT_R32G32B32_FLOAT, 0, 0, D3D11_INPUT_PER_VERTEX_DATA, 0 }; - video::processor_enum_t::pointer vp_e_p; - status = device->CreateVideoProcessorEnumerator(&contentDesc, &vp_e_p); - if(FAILED(status)) { - BOOST_LOG(error) << "Failed to create video processor enumerator [0x"sv << util::hex(status).to_string_view() << ']'; - return -1; - } - processor_e.reset(vp_e_p); - - D3D11_VIDEO_PROCESSOR_CAPS proc_caps; - processor_e->GetVideoProcessorCaps(&proc_caps); - if(!(proc_caps.FeatureCaps & D3D11_VIDEO_PROCESSOR_FEATURE_CAPS_ALPHA_STREAM)) { - BOOST_LOG(warning) << "VideoProcessorSetStreamAlpha() not supported, hardware accelerated mouse cannot be added to the video stream"sv; - } - - video::processor_t::pointer processor_p; - status = device->CreateVideoProcessor(processor_e.get(), 0, &processor_p); - if(FAILED(status)) { - BOOST_LOG(error) << "Failed to create video processor [0x"sv << util::hex(status).to_string_view() << ']'; - return -1; - } - processor.reset(processor_p); - - // Tell video processor alpha values need to be enabled - ctx->VideoProcessorSetStreamAlpha(processor.get(), 1, TRUE, 1.0f); + status = device_p->CreateInputLayout( + &layout_desc, 1, + convert_UV_vs_hlsl->GetBufferPointer(), convert_UV_vs_hlsl->GetBufferSize(), + &input_layout); D3D11_TEXTURE2D_DESC t {}; t.Width = out_width; @@ -259,34 +467,61 @@ public: t.SampleDesc.Count = 1; t.Usage = D3D11_USAGE_DEFAULT; t.Format = pix_fmt == pix_fmt_e::nv12 ? DXGI_FORMAT_NV12 : DXGI_FORMAT_P010; - t.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_VIDEO_ENCODER; + t.BindFlags = D3D11_BIND_RENDER_TARGET; - dxgi::texture2d_t::pointer tex_p {}; - status = device_p->CreateTexture2D(&t, nullptr, &tex_p); + status = device_p->CreateTexture2D(&t, nullptr, &img.texture); if(FAILED(status)) { - BOOST_LOG(error) << "Failed to create video output texture [0x"sv << util::hex(status).to_string_view() << ']'; + BOOST_LOG(error) << "Failed to create render target texture [0x"sv << util::hex(status).to_string_view() << ']'; return -1; } - img.texture.reset(tex_p); img.display = std::move(display); img.width = out_width; img.height = out_height; - img.data = (std::uint8_t*)tex_p; + img.data = (std::uint8_t*)img.texture.get(); img.row_pitch = out_width; img.pixel_pitch = 1; - D3D11_VIDEO_PROCESSOR_OUTPUT_VIEW_DESC output_desc { D3D11_VPOV_DIMENSION_TEXTURE2D, 0 }; - video::processor_out_t::pointer processor_out_p; - status = device->CreateVideoProcessorOutputView(img.texture.get(), processor_e.get(), &output_desc, &processor_out_p); + D3D11_RENDER_TARGET_VIEW_DESC nv12_rt_desc { + DXGI_FORMAT_R8_UNORM, + D3D11_RTV_DIMENSION_TEXTURE2D + }; + + status = device_p->CreateRenderTargetView(img.texture.get(), &nv12_rt_desc, &nv12_Y_rt); if(FAILED(status)) { - BOOST_LOG(error) << "Failed to create VideoProcessorOutputView [0x"sv << util::hex(status).to_string_view() << ']'; + BOOST_LOG(error) << "Failed to create render target view [0x"sv << util::hex(status).to_string_view() << ']'; return -1; } - processor_out.reset(processor_out_p); - device_p->AddRef(); - data = device_p; + nv12_rt_desc.Format = DXGI_FORMAT_R8G8_UNORM; + status = device_p->CreateRenderTargetView(img.texture.get(), &nv12_rt_desc, &nv12_UV_rt); + if(FAILED(status)) { + BOOST_LOG(error) << "Failed to create render target view [0x"sv << util::hex(status).to_string_view() << ']'; + return -1; + } + + D3D11_SAMPLER_DESC sampler_desc {}; + sampler_desc.Filter = D3D11_FILTER_MIN_MAG_MIP_LINEAR; + sampler_desc.AddressU = D3D11_TEXTURE_ADDRESS_CLAMP; + sampler_desc.AddressV = D3D11_TEXTURE_ADDRESS_CLAMP; + sampler_desc.AddressW = D3D11_TEXTURE_ADDRESS_WRAP; + sampler_desc.ComparisonFunc = D3D11_COMPARISON_NEVER; + sampler_desc.MinLOD = 0; + sampler_desc.MaxLOD = D3D11_FLOAT32_MAX; + + status = device_p->CreateSamplerState(&sampler_desc, &sampler_linear); + if(FAILED(status)) { + BOOST_LOG(error) << "Failed to create point sampler state [0x"sv << util::hex(status).to_string_view() << ']'; + return -1; + } + + device_ctx_p->OMSetBlendState(blend_disable.get(), nullptr, 0xFFFFFFFFu); + device_ctx_p->PSSetSamplers(0, 1, &sampler_linear); + device_ctx_p->PSSetConstantBuffers(0, 1, &color_matrix); + device_ctx_p->VSSetConstantBuffers(0, 1, &info_scene); + device_ctx_p->IASetPrimitiveTopology(D3D11_PRIMITIVE_TOPOLOGY_TRIANGLESTRIP); + device_ctx_p->IASetInputLayout(input_layout.get()); + return 0; } @@ -300,25 +535,104 @@ public: hwdevices_p->erase(it); } } +private: + void _init_view_port(float x, float y, float width, float height) { + D3D11_VIEWPORT view { + x, y, + width, height, + 0.0f, 1.0f + }; + + device_ctx_p->RSSetViewports(1, &view); + } + + void _init_view_port(float width, float height) { + _init_view_port(0.0f, 0.0f, width, height); + } + + int _init_rt(shader_res_t &shader_res, render_target_t &render_target, int width, int height, DXGI_FORMAT format) { + D3D11_TEXTURE2D_DESC desc {}; + + desc.Width = width; + desc.Height = height; + desc.Format = format; + desc.Usage = D3D11_USAGE_DEFAULT; + desc.BindFlags = D3D11_BIND_RENDER_TARGET | D3D11_BIND_SHADER_RESOURCE; + desc.MipLevels = 1; + desc.ArraySize = 1; + desc.SampleDesc.Count = 1; + + auto device = (device_t::pointer)data; + + texture2d_t tex; + auto status = device->CreateTexture2D(&desc, nullptr, &tex); + if(status) { + BOOST_LOG(error) << "Failed to create render target texture for luma [0x"sv << util::hex(status).to_string_view() << ']'; + return -1; + } + + + D3D11_SHADER_RESOURCE_VIEW_DESC shader_resource_desc { + format, + D3D11_SRV_DIMENSION_TEXTURE2D + }; + shader_resource_desc.Texture2D.MipLevels = 1; + + device->CreateShaderResourceView(tex.get(), &shader_resource_desc, &shader_res); + if(status) { + BOOST_LOG(error) << "Failed to create render target texture for luma [0x"sv << util::hex(status).to_string_view() << ']'; + return -1; + } + + D3D11_RENDER_TARGET_VIEW_DESC render_target_desc { + format, + D3D11_RTV_DIMENSION_TEXTURE2D + }; + + device->CreateRenderTargetView(tex.get(), &render_target_desc, &render_target); + if(status) { + BOOST_LOG(error) << "Failed to create render target view [0x"sv << util::hex(status).to_string_view() << ']'; + return -1; + } + + return 0; + } + +public: + color_t *color_p; + + blend_t blend_enable; + blend_t blend_disable; + + buf_t info_scene; + buf_t color_matrix; + + sampler_state_t sampler_linear; + + input_layout_t input_layout; + + render_target_t nv12_Y_rt; + render_target_t nv12_UV_rt; + + render_target_t scene_rt; + shader_res_t scene_sr; img_d3d_t img; - video::device_t device; - video::ctx_t ctx; - video::processor_enum_t processor_e; - video::processor_t processor; - video::processor_out_t processor_out; - std::unordered_map texture_to_processor_in; - video::processor_in_t cursor_in; + vs_t convert_UV_vs; + ps_t convert_UV_ps; + ps_t convert_Y_ps; + ps_t scene_ps; + vs_t scene_vs; + D3D11_VIEWPORT cursor_view; bool cursor_visible; - LONG cursor_width, cursor_height; - LONG cursor_scaled_width, cursor_scaled_height; + float out_width, out_height; - LONG in_width, in_height; - double out_width, out_height; + device_ctx_t::pointer device_ctx_p; + // The destructor will remove itself from the list of hardware devices, this is done synchronously std::vector *hwdevices_p; }; @@ -375,18 +689,17 @@ capture_e display_vram_t::snapshot(platf::img_t *img_base, std::chrono::millisec t.SampleDesc.Count = 1; t.Usage = D3D11_USAGE_DEFAULT; t.Format = DXGI_FORMAT_B8G8R8A8_UNORM; - t.BindFlags = D3D11_BIND_RENDER_TARGET; + t.BindFlags = D3D11_BIND_SHADER_RESOURCE; - dxgi::texture2d_t::pointer tex_p {}; - auto status = device->CreateTexture2D(&t, &data, &tex_p); + texture2d_t texture; + auto status = device->CreateTexture2D(&t, &data, &texture); if(FAILED(status)) { BOOST_LOG(error) << "Failed to create mouse texture [0x"sv << util::hex(status).to_string_view() << ']'; return capture_e::error; } - texture2d_t texture { tex_p }; for(auto *hwdevice : hwdevices) { - if(hwdevice->set_cursor_texture(tex_p, t.Width, t.Height)) { + if(hwdevice->set_cursor_texture(texture.get(), t.Width, t.Height)) { return capture_e::error; } } @@ -403,15 +716,14 @@ capture_e display_vram_t::snapshot(platf::img_t *img_base, std::chrono::millisec } if(frame_update_flag) { - texture2d_t::pointer src_p {}; - status = res->QueryInterface(IID_ID3D11Texture2D, (void **)&src_p); + texture2d_t src; + status = res->QueryInterface(IID_ID3D11Texture2D, (void **)&src); if(FAILED(status)) { BOOST_LOG(error) << "Couldn't query interface [0x"sv << util::hex(status).to_string_view() << ']'; return capture_e::error; } - texture2d_t src { src_p }; device_ctx->CopyResource(img->texture.get(), src.get()); } @@ -429,21 +741,19 @@ std::shared_ptr display_vram_t::alloc_img() { t.SampleDesc.Count = 1; t.Usage = D3D11_USAGE_DEFAULT; t.Format = format; - t.BindFlags = D3D11_BIND_RENDER_TARGET; + t.BindFlags = D3D11_BIND_SHADER_RESOURCE; - dxgi::texture2d_t::pointer tex_p {}; - auto status = device->CreateTexture2D(&t, nullptr, &tex_p); + auto status = device->CreateTexture2D(&t, nullptr, &img->texture); if(FAILED(status)) { BOOST_LOG(error) << "Failed to create img buf texture [0x"sv << util::hex(status).to_string_view() << ']'; return nullptr; } - img->data = (std::uint8_t*)tex_p; + img->data = (std::uint8_t*)img->texture.get(); img->row_pitch = 0; img->pixel_pitch = 4; img->width = 0; img->height = 0; - img->texture.reset(tex_p); img->display = shared_from_this(); return img; @@ -456,8 +766,7 @@ int display_vram_t::dummy_img(platf::img_t *img_base) { auto dummy_data = std::make_unique(width * height); D3D11_SUBRESOURCE_DATA data { dummy_data.get(), - (UINT)img->row_pitch, - 0 + (UINT)img->row_pitch }; D3D11_TEXTURE2D_DESC t {}; @@ -468,17 +777,17 @@ int display_vram_t::dummy_img(platf::img_t *img_base) { t.SampleDesc.Count = 1; t.Usage = D3D11_USAGE_DEFAULT; t.Format = format; - t.BindFlags = D3D11_BIND_RENDER_TARGET; + t.BindFlags = D3D11_BIND_SHADER_RESOURCE; - dxgi::texture2d_t::pointer tex_p {}; - auto status = device->CreateTexture2D(&t, &data, &tex_p); + dxgi::texture2d_t tex; + auto status = device->CreateTexture2D(&t, &data, &tex); if(FAILED(status)) { BOOST_LOG(error) << "Failed to create dummy texture [0x"sv << util::hex(status).to_string_view() << ']'; return -1; } - img->data = (std::uint8_t*)tex_p; - img->texture.reset(tex_p); + img->texture = std::move(tex); + img->data = (std::uint8_t*)img->texture.get(); img->height = height; img->width = width; img->pixel_pitch = 4; @@ -487,8 +796,8 @@ int display_vram_t::dummy_img(platf::img_t *img_base) { } std::shared_ptr display_vram_t::make_hwdevice(int width, int height, pix_fmt_e pix_fmt) { - if(pix_fmt != platf::pix_fmt_e::nv12 && pix_fmt != platf::pix_fmt_e::p010) { - BOOST_LOG(error) << "display_vram_t doesn't support pixel format ["sv << (int)pix_fmt << ']'; + if(pix_fmt != platf::pix_fmt_e::nv12) { + BOOST_LOG(error) << "display_vram_t doesn't support pixel format ["sv << from_pix_fmt(pix_fmt) << ']'; return nullptr; } @@ -515,4 +824,44 @@ std::shared_ptr display_vram_t::make_hwdevice(int width, int return hwdevice; } + +int init() { + for(auto &color : colors) { + BOOST_LOG(debug) << "Color Matrix"sv; + BOOST_LOG(debug) << "Y ["sv << color.color_vec_y.x << ", "sv << color.color_vec_y.y << ", "sv << color.color_vec_y.z << ", "sv << color.color_vec_y.w << ']'; + BOOST_LOG(debug) << "U ["sv << color.color_vec_u.x << ", "sv << color.color_vec_u.y << ", "sv << color.color_vec_u.z << ", "sv << color.color_vec_u.w << ']'; + BOOST_LOG(debug) << "V ["sv << color.color_vec_v.x << ", "sv << color.color_vec_v.y << ", "sv << color.color_vec_v.z << ", "sv << color.color_vec_v.w << ']'; + BOOST_LOG(debug) << "range Y ["sv << color.range_y.x << ", "sv << color.range_y.y << ']'; + BOOST_LOG(debug) << "range UV ["sv << color.range_uv.x << ", "sv << color.range_uv.y << ']'; + } + + BOOST_LOG(info) << "Compiling shaders..."sv; + scene_vs_hlsl = compile_vertex_shader(SUNSHINE_SHADERS_DIR "/SceneVS.hlsl"); + if(!scene_vs_hlsl) { + return -1; + } + + convert_Y_ps_hlsl = compile_pixel_shader(SUNSHINE_SHADERS_DIR "/ConvertYPS.hlsl"); + if(!convert_Y_ps_hlsl) { + return -1; + } + + convert_UV_ps_hlsl = compile_pixel_shader(SUNSHINE_SHADERS_DIR "/ConvertUVPS.hlsl"); + if(!convert_UV_ps_hlsl) { + return -1; + } + + convert_UV_vs_hlsl = compile_vertex_shader(SUNSHINE_SHADERS_DIR "/ConvertUVVS.hlsl"); + if(!convert_UV_vs_hlsl) { + return -1; + } + + scene_ps_hlsl = compile_pixel_shader(SUNSHINE_SHADERS_DIR "/ScenePS.hlsl"); + if(!scene_ps_hlsl) { + return -1; + } + BOOST_LOG(info) << "Compiled shaders"sv; + + return 0; +} } \ No newline at end of file diff --git a/sunshine/utility.h b/sunshine/utility.h index 0c03c51d..63be108c 100644 --- a/sunshine/utility.h +++ b/sunshine/utility.h @@ -76,37 +76,6 @@ struct __either { template using either_t = typename __either::type; -template -struct __false_v; - -template -struct __false_v>> { - static constexpr std::nullopt_t value = std::nullopt; -}; - -template -struct __false_v || instantiation_of_v || instantiation_of_v) - >> { - static constexpr std::nullptr_t value = nullptr; -}; - -template -struct __false_v>> { - static constexpr bool value = false; -}; - -template -static constexpr auto false_v = __false_v::value; - -template -using optional_t = either_t< - (std::is_same_v || - instantiation_of_v || - instantiation_of_v || - std::is_pointer_v), - T, std::optional>; - template struct overloaded : Ts... { using Ts::operator()...; }; template overloaded(Ts...) -> overloaded; @@ -362,35 +331,6 @@ auto enm(T& val) -> std::underlying_type_t& { return *reinterpret_cast*>(&val); } -template -struct Function { - typedef ReturnType (*type)(Args...); -}; - -template::type function> -struct Destroy { - typedef T pointer; - - void operator()(pointer p) { - function(p); - } -}; - -template::type function> -using safe_ptr = std::unique_ptr>; - -// You cannot specialize an alias -template::type function> -using safe_ptr_v2 = std::unique_ptr>; - -template -void c_free(T *p) { - free(p); -} - -template -using c_ptr = safe_ptr>; - inline std::int64_t from_chars(const char *begin, const char *end) { std::int64_t res {}; std::int64_t mul = 1; @@ -436,6 +376,163 @@ public: } }; +// Compared to std::unique_ptr, it adds the ability to get the address of the pointer itself +template > +class uniq_ptr { +public: + using element_type = T; + using pointer = element_type*; + using deleter_type = D; + + constexpr uniq_ptr() noexcept : _p { nullptr } {} + constexpr uniq_ptr(std::nullptr_t) noexcept : _p { nullptr } {} + + uniq_ptr(const uniq_ptr &other) noexcept = delete; + uniq_ptr &operator=(const uniq_ptr &other) noexcept = delete; + + template + uniq_ptr(V *p) noexcept : _p { p } { + static_assert(std::is_same_v || std::is_same_v || std::is_base_of_v, "element_type must be base class of V"); + } + + template + uniq_ptr(std::unique_ptr &&uniq) noexcept : _p { uniq.release() } { + static_assert(std::is_same_v || std::is_same_v || std::is_base_of_v, "element_type must be base class of V"); + } + + template + uniq_ptr(uniq_ptr &&other) noexcept : _p { other.release() } { + static_assert(std::is_same_v || std::is_same_v || std::is_base_of_v, "element_type must be base class of V"); + } + + template + uniq_ptr &operator=(uniq_ptr &&other) noexcept { + static_assert(std::is_same_v || std::is_same_v || std::is_base_of_v, "element_type must be base class of V"); + reset(other.release()); + + return *this; + } + + template + uniq_ptr &operator=(std::unique_ptr &&uniq) noexcept { + static_assert(std::is_same_v || std::is_same_v || std::is_base_of_v, "element_type must be base class of V"); + + reset(uniq.release()); + + return *this; + } + + ~uniq_ptr() { + reset(); + } + + void reset(pointer p = pointer()) { + if(_p) { + _deleter(_p); + } + + _p = p; + } + + pointer release() { + auto tmp = _p; + _p = nullptr; + return tmp; + } + + pointer get() { + return _p; + } + + const pointer get() const { + return _p; + } + + const std::add_lvalue_reference_t operator*() const { + return *_p; + } + std::add_lvalue_reference_t operator*() { + return *_p; + } + const pointer operator->() const { + return _p; + } + pointer operator->() { + return _p; + } + pointer *operator&() const { + return &_p; + } + + pointer *operator&() { + return &_p; + } + + deleter_type& get_deleter() { + return _deleter; + } + + const deleter_type& get_deleter() const { + return _deleter; + } + + explicit operator bool() const { + return _p != nullptr; + } +protected: + pointer _p; + deleter_type _deleter; +}; + +template +bool operator==(const uniq_ptr& x, const uniq_ptr& y) { + return x.get() == y.get(); +} + +template +bool operator!=(const uniq_ptr& x, const uniq_ptr& y) { + return x.get() != y.get(); +} + +template +bool operator==(const std::unique_ptr& x, const uniq_ptr& y) { + return x.get() == y.get(); +} + +template +bool operator!=(const std::unique_ptr& x, const uniq_ptr& y) { + return x.get() != y.get(); +} + +template +bool operator==(const uniq_ptr& x, const std::unique_ptr& y) { + return x.get() == y.get(); +} + +template +bool operator!=(const uniq_ptr& x, const std::unique_ptr& y) { + return x.get() != y.get(); +} + +template +bool operator==(const uniq_ptr& x, std::nullptr_t) { + return !(bool)x; +} + +template +bool operator!=(const uniq_ptr& x, std::nullptr_t) { + return (bool)x; +} + +template +bool operator==(std::nullptr_t, const uniq_ptr& y) { + return !(bool)y; +} + +template +bool operator!=(std::nullptr_t, const uniq_ptr& y) { + return (bool)y; +} template class wrap_ptr { @@ -510,6 +607,43 @@ private: pointer _p; }; +template +struct __false_v; + +template +struct __false_v>> { + static constexpr std::nullopt_t value = std::nullopt; +}; + +template +struct __false_v || + instantiation_of_v || + instantiation_of_v || + instantiation_of_v + ) + >> { + static constexpr std::nullptr_t value = nullptr; +}; + +template +struct __false_v>> { + static constexpr bool value = false; +}; + +template +static constexpr auto false_v = __false_v::value; + +template +using optional_t = either_t< + (std::is_same_v || + instantiation_of_v || + instantiation_of_v || + instantiation_of_v || + std::is_pointer_v), + T, std::optional>; + template class buffer_t { public: @@ -569,6 +703,35 @@ T either(std::optional &&l, T &&r) { return std::forward(r); } +template +struct Function { + typedef ReturnType (*type)(Args...); +}; + +template::type function> +struct Destroy { + typedef T pointer; + + void operator()(pointer p) { + function(p); + } +}; + +template::type function> +using safe_ptr = uniq_ptr>; + +// You cannot specialize an alias +template::type function> +using safe_ptr_v2 = uniq_ptr>; + +template +void c_free(T *p) { + free(p); +} + +template +using c_ptr = safe_ptr>; + namespace endian { template struct endianness { diff --git a/sunshine/video.cpp b/sunshine/video.cpp index b2e23855..c6befe8d 100644 --- a/sunshine/video.cpp +++ b/sunshine/video.cpp @@ -58,20 +58,6 @@ enum class profile_hevc_e : int { }; } -namespace amd { - -enum class profile_h264_e : int { - main, - high, - constrained_baseline, - constrained_high, -}; - -enum class profile_hevc_e : int { - main, -}; -} - using ctx_t = util::safe_ptr; using frame_t = util::safe_ptr; using buffer_t = util::safe_ptr; @@ -82,10 +68,10 @@ platf::dev_type_e map_dev_type(AVHWDeviceType type); platf::pix_fmt_e map_pix_fmt(AVPixelFormat fmt); void sw_img_to_frame(const platf::img_t &img, frame_t &frame); -void nv_d3d_img_to_frame(const platf::img_t &img, frame_t &frame); -util::Either nv_d3d_make_hwdevice_ctx(platf::hwdevice_t *hwdevice_ctx); -void amd_d3d_img_to_frame(const platf::img_t &img, frame_t &frame); -util::Either amd_d3d_make_hwdevice_ctx(platf::hwdevice_t *hwdevice_ctx); +void dxgi_img_to_frame(const platf::img_t &img, frame_t &frame); +util::Either dxgi_make_hwdevice_ctx(platf::hwdevice_t *hwdevice_ctx); +void dxgi_img_to_frame(const platf::img_t &img, frame_t &frame); +util::Either dxgi_make_hwdevice_ctx(platf::hwdevice_t *hwdevice_ctx); util::Either make_hwdevice_ctx(AVHWDeviceType type, void *hwdevice_ctx); int hwframe_ctx(ctx_t &ctx, buffer_t &hwdevice, AVPixelFormat format); @@ -297,16 +283,16 @@ static encoder_t nvenc { false, true, - nv_d3d_img_to_frame, - nv_d3d_make_hwdevice_ctx + dxgi_img_to_frame, + dxgi_make_hwdevice_ctx }; static encoder_t amdvce { "amdvce"sv, - { (int)amd::profile_h264_e::high, (int)amd::profile_hevc_e::main }, + { FF_PROFILE_H264_HIGH, FF_PROFILE_HEVC_MAIN }, AV_HWDEVICE_TYPE_D3D11VA, AV_PIX_FMT_D3D11, - AV_PIX_FMT_NV12, AV_PIX_FMT_YUV420P, + AV_PIX_FMT_NV12, AV_PIX_FMT_P010, { { { "header_insertion_mode"s, "idr"s }, @@ -331,8 +317,8 @@ static encoder_t amdvce { false, true, - amd_d3d_img_to_frame, - amd_d3d_make_hwdevice_ctx + dxgi_img_to_frame, + dxgi_make_hwdevice_ctx }; #endif @@ -373,11 +359,9 @@ static encoder_t software { static std::vector encoders { #ifdef _WIN32 nvenc, -#endif - software, -#ifdef _WIN32 amdvce, #endif + software }; void reset_display(std::shared_ptr &disp, AVHWDeviceType type) { @@ -622,6 +606,7 @@ std::optional make_session(const encoder_t &encoder, const config_t & case 0: default: // Rec. 601 + BOOST_LOG(info) << "Color coding [Rec. 601]"sv; ctx->color_primaries = AVCOL_PRI_SMPTE170M; ctx->color_trc = AVCOL_TRC_SMPTE170M; ctx->colorspace = AVCOL_SPC_SMPTE170M; @@ -630,6 +615,7 @@ std::optional make_session(const encoder_t &encoder, const config_t & case 1: // Rec. 709 + BOOST_LOG(info) << "Color coding [Rec. 709]"sv; ctx->color_primaries = AVCOL_PRI_BT709; ctx->color_trc = AVCOL_TRC_BT709; ctx->colorspace = AVCOL_SPC_BT709; @@ -638,12 +624,14 @@ std::optional make_session(const encoder_t &encoder, const config_t & case 2: // Rec. 2020 + BOOST_LOG(info) << "Color coding [Rec. 2020]"sv; ctx->color_primaries = AVCOL_PRI_BT2020; ctx->color_trc = AVCOL_TRC_BT2020_10; ctx->colorspace = AVCOL_SPC_BT2020_NCL; sws_color_space = SWS_CS_BT2020; break; } + BOOST_LOG(info) << "Color range: ["sv << ((config.encoderCscMode & 0x1) ? "JPEG"sv : "MPEG"sv) << ']'; AVPixelFormat sw_fmt; if(config.dynamicRange == 0) { @@ -1196,7 +1184,7 @@ bool validate_encoder(encoder_t &encoder) { encoder.hevc[encoder_t::PASSED] = test_hevc; std::vector> configs { - { encoder_t::DYNAMIC_RANGE, { 1920, 1080, 60, 1000, 1, 0, 1, 1, 1 } } + { encoder_t::DYNAMIC_RANGE, { 1920, 1080, 60, 1000, 1, 0, 3, 1, 1 } } }; for(auto &[flag, config] : configs) { auto h264 = config; @@ -1300,7 +1288,16 @@ int hwframe_ctx(ctx_t &ctx, buffer_t &hwdevice, AVPixelFormat format) { void sw_img_to_frame(const platf::img_t &img, frame_t &frame) {} #ifdef _WIN32 -void nv_d3d_img_to_frame(const platf::img_t &img, frame_t &frame) { +} + +// Ugly, but need to declare for wio +namespace platf::dxgi { +void lock(void *hwdevice); +void unlock(void *hwdevice); +} +void do_nothing(void*) {} +namespace video { +void dxgi_img_to_frame(const platf::img_t &img, frame_t &frame) { if(img.data == frame->data[0]) { return; } @@ -1323,65 +1320,25 @@ void nv_d3d_img_to_frame(const platf::img_t &img, frame_t &frame) { frame->width = img.width; } -void amd_d3d_img_to_frame(const platf::img_t &img, frame_t &frame) { - if(img.data == frame->data[0]) { - return; - } - - // Need to have something refcounted - if(!frame->buf[0]) { - frame->buf[0] = av_buffer_allocz(sizeof(AVD3D11FrameDescriptor)); - } - - auto desc = (AVD3D11FrameDescriptor*)frame->buf[0]->data; - desc->texture = (ID3D11Texture2D*)img.data; - desc->index = 0; - - frame->data[0] = img.data; - frame->data[1] = 0; - - frame->linesize[0] = img.row_pitch; - - frame->height = img.height; - frame->width = img.width; -} - - -util::Either nv_d3d_make_hwdevice_ctx(platf::hwdevice_t *hwdevice_ctx) { +util::Either dxgi_make_hwdevice_ctx(platf::hwdevice_t *hwdevice_ctx) { buffer_t ctx_buf { av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_D3D11VA) }; auto ctx = (AVD3D11VADeviceContext*)((AVHWDeviceContext*)ctx_buf->data)->hwctx; std::fill_n((std::uint8_t*)ctx, sizeof(AVD3D11VADeviceContext), 0); auto device = (ID3D11Device*)hwdevice_ctx->data; + device->AddRef(); ctx->device = device; + ctx->lock_ctx = (void*)1; + ctx->lock = do_nothing; + ctx->unlock = do_nothing; + auto err = av_hwdevice_ctx_init(ctx_buf.get()); if(err) { char err_str[AV_ERROR_MAX_STRING_SIZE] {0}; - BOOST_LOG(error) << "Failed to create FFMpeg nvenc: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err); - - return err; - } - - return ctx_buf; -} - -util::Either amd_d3d_make_hwdevice_ctx(platf::hwdevice_t *hwdevice_ctx) { - buffer_t ctx_buf { av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_D3D11VA) }; - auto ctx = (AVD3D11VADeviceContext*)((AVHWDeviceContext*)ctx_buf->data)->hwctx; - - std::fill_n((std::uint8_t*)ctx, sizeof(AVD3D11VADeviceContext), 0); - - auto device = (ID3D11Device*)hwdevice_ctx->data; - device->AddRef(); - ctx->device = device; - - auto err = av_hwdevice_ctx_init(ctx_buf.get()); - if(err) { - char err_str[AV_ERROR_MAX_STRING_SIZE] {0}; - BOOST_LOG(error) << "Failed to create FFMpeg amddech: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err); + BOOST_LOG(error) << "Failed to create FFMpeg hardware device context: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err); return err; }