Inject VUI data in SPS header if missing

This commit is contained in:
loki 2021-06-13 21:29:32 +02:00
parent 30f7742f51
commit 8e32c8e6f4
10 changed files with 478 additions and 84 deletions

View File

@ -147,6 +147,7 @@ set(SUNSHINE_TARGET_FILES
third-party/moonlight-common-c/src/Rtsp.h
third-party/moonlight-common-c/src/RtspParser.c
third-party/moonlight-common-c/src/Video.h
sunshine/cbs.cpp
sunshine/utility.h
sunshine/uuid.h
sunshine/config.h

220
sunshine/cbs.cpp Normal file
View File

@ -0,0 +1,220 @@
extern "C" {
#include <cbs/cbs_h264.h>
#include <cbs/cbs_h265.h>
#include <cbs/h264_levels.h>
#include <libavcodec/avcodec.h>
}
#include "main.h"
#include "utility.h"
using namespace std::literals;
namespace cbs {
void close(CodedBitstreamContext *c) {
ff_cbs_close(&c);
}
using ctx_t = util::safe_ptr<CodedBitstreamContext, close>;
class frag_t : public CodedBitstreamFragment {
public:
frag_t(frag_t &&o) {
std::copy((std::uint8_t *)&o, (std::uint8_t *)(&o + 1), (std::uint8_t *)this);
o.data = nullptr;
o.units = nullptr;
};
frag_t() {
std::fill_n((std::uint8_t *)this, sizeof(*this), 0);
}
frag_t &operator=(frag_t &&o) {
std::copy((std::uint8_t *)&o, (std::uint8_t *)(&o + 1), (std::uint8_t *)this);
o.data = nullptr;
o.units = nullptr;
return *this;
};
~frag_t() {
if(data || units) {
ff_cbs_fragment_free(this);
}
}
};
util::buffer_t<std::uint8_t> write(const H264RawNALUnitHeader &uh, AVCodecID codec_id) {
cbs::frag_t frag;
auto err = ff_cbs_insert_unit_content(&frag, -1, uh.nal_unit_type, (void *)&uh, nullptr);
if(err < 0) {
char err_str[AV_ERROR_MAX_STRING_SIZE] { 0 };
BOOST_LOG(error) << "Could not NAL unit SPS: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err);
return {};
}
cbs::ctx_t cbs_ctx;
ff_cbs_init(&cbs_ctx, codec_id, nullptr);
err = ff_cbs_write_fragment_data(cbs_ctx.get(), &frag);
if(err < 0) {
char err_str[AV_ERROR_MAX_STRING_SIZE] { 0 };
BOOST_LOG(error) << "Could not write fragment data: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err);
return {};
}
// frag.data_size * 8 - frag.data_bit_padding == bits in fragment
util::buffer_t<std::uint8_t> data { frag.data_size };
std::copy_n(frag.data, frag.data_size, std::begin(data));
return data;
}
util::buffer_t<std::uint8_t> make_sps_h264(const AVCodecContext *ctx) {
H264RawSPS sps {};
/* b_per_p == ctx->max_b_frames for h264 */
/* desired_b_depth == avoption("b_depth") == 1 */
/* max_b_depth == std::min(av_log2(ctx->b_per_p) + 1, desired_b_depth) ==> 1 */
auto max_b_depth = 1;
auto dpb_frame = ctx->gop_size == 1 ? 0 : 1 + max_b_depth;
auto mb_width = (FFALIGN(ctx->width, 16) / 16) * 16;
auto mb_height = (FFALIGN(ctx->height, 16) / 16) * 16;
sps.nal_unit_header.nal_ref_idc = 3;
sps.nal_unit_header.nal_unit_type = H264_NAL_SPS;
sps.profile_idc = FF_PROFILE_H264_HIGH & 0xFF;
sps.constraint_set1_flag = 1;
if(ctx->level != FF_LEVEL_UNKNOWN) {
sps.level_idc = ctx->level;
}
else {
auto framerate = ctx->framerate;
auto level = ff_h264_guess_level(
sps.profile_idc,
ctx->bit_rate,
framerate.num / framerate.den,
mb_width,
mb_height,
dpb_frame);
if(!level) {
BOOST_LOG(error) << "Could not guess h264 level"sv;
return {};
}
sps.level_idc = level->level_idc;
}
sps.seq_parameter_set_id = 0;
sps.chroma_format_idc = 1;
sps.log2_max_frame_num_minus4 = 3; //4;
sps.pic_order_cnt_type = 0;
sps.log2_max_pic_order_cnt_lsb_minus4 = 0; //4;
sps.max_num_ref_frames = dpb_frame;
sps.pic_width_in_mbs_minus1 = mb_width / 16 - 1;
sps.pic_height_in_map_units_minus1 = mb_height / 16 - 1;
sps.frame_mbs_only_flag = 1;
sps.direct_8x8_inference_flag = 1;
if(ctx->width != mb_width || ctx->height != mb_height) {
sps.frame_cropping_flag = 1;
sps.frame_crop_left_offset = 0;
sps.frame_crop_top_offset = 0;
sps.frame_crop_right_offset = (mb_width - ctx->width) / 2;
sps.frame_crop_bottom_offset = (mb_height - ctx->height) / 2;
}
sps.vui_parameters_present_flag = 1;
auto &vui = sps.vui;
vui.video_format = 5;
vui.colour_description_present_flag = 1;
vui.video_signal_type_present_flag = 1;
vui.video_full_range_flag = ctx->color_range == AVCOL_RANGE_JPEG;
vui.colour_primaries = ctx->color_primaries;
vui.transfer_characteristics = ctx->color_trc;
vui.matrix_coefficients = ctx->colorspace;
vui.low_delay_hrd_flag = 1 - vui.fixed_frame_rate_flag;
vui.bitstream_restriction_flag = 1;
vui.motion_vectors_over_pic_boundaries_flag = 1;
vui.log2_max_mv_length_horizontal = 15;
vui.log2_max_mv_length_vertical = 15;
vui.max_num_reorder_frames = max_b_depth;
vui.max_dec_frame_buffering = max_b_depth + 1;
return write(sps.nal_unit_header, AV_CODEC_ID_H264);
}
util::buffer_t<std::uint8_t> read_sps(const AVPacket *packet, int codec_id) {
cbs::ctx_t ctx;
if(ff_cbs_init(&ctx, (AVCodecID)codec_id, nullptr)) {
return {};
}
cbs::frag_t frag;
int err = ff_cbs_read_packet(ctx.get(), &frag, &*packet);
if(err < 0) {
char err_str[AV_ERROR_MAX_STRING_SIZE] { 0 };
BOOST_LOG(error) << "Couldn't read packet: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err);
return {};
}
H264RawNALUnitHeader *p;
if(codec_id == AV_CODEC_ID_H264) {
p = (H264RawNALUnitHeader *)((CodedBitstreamH264Context *)ctx->priv_data)->active_sps;
}
else {
p = (H264RawNALUnitHeader *)((CodedBitstreamH265Context *)ctx->priv_data)->active_sps;
}
return write(*p, (AVCodecID)codec_id);
}
bool validate_sps(const AVPacket *packet, int codec_id) {
cbs::ctx_t ctx;
if(ff_cbs_init(&ctx, (AVCodecID)codec_id, nullptr)) {
return false;
}
cbs::frag_t frag;
int err = ff_cbs_read_packet(ctx.get(), &frag, &*packet);
if(err < 0) {
char err_str[AV_ERROR_MAX_STRING_SIZE] { 0 };
BOOST_LOG(error) << "Couldn't read packet: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err);
return false;
}
if(codec_id == AV_CODEC_ID_H264) {
auto h264 = (CodedBitstreamH264Context *)ctx->priv_data;
if(!h264->active_sps->vui_parameters_present_flag) {
return false;
}
return true;
}
return ((CodedBitstreamH265Context *)ctx->priv_data)->active_sps->vui_parameters_present_flag;
}
} // namespace cbs

20
sunshine/cbs.h Normal file
View File

@ -0,0 +1,20 @@
#ifndef SUNSHINE_CBS_H
#define SUNSHINE_CBS_H
#include "utility.h"
struct AVPacket;
struct AVCodecContext;
namespace cbs {
util::buffer_t<std::uint8_t> read_sps(const AVPacket *packet, int codec_id);
util::buffer_t<std::uint8_t> make_sps_h264(const AVCodecContext *ctx);
util::buffer_t<std::uint8_t> make_sps_hevc(const AVCodecContext *ctx);
/**
* Check if SPS->VUI is present
*/
bool validate_sps(const AVPacket *packet, int codec_id);
} // namespace cbs
#endif

View File

@ -632,6 +632,15 @@ void videoBroadcastThread(safe::signal_t *shutdown_event, udp::socket &sock, vid
payload = { (char *)payload_new.data(), payload_new.size() };
}
if(packet->flags & AV_PKT_FLAG_KEY && packet->sps.old.size()) {
BOOST_LOG(debug) << "Replacing SPS header"sv;
std::string_view frame_old = packet->sps.old;
std::string_view frame_new = packet->sps.replacement;
payload_new = replace(payload, frame_old, frame_new);
payload = { (char *)payload_new.data(), payload_new.size() };
}
// insert packet headers
auto blocksize = session->config.packetsize + MAX_RTP_HEADER_SIZE;
auto payload_blocksize = blocksize - sizeof(video_packet_raw_t);

View File

@ -696,8 +696,15 @@ template<class T>
class buffer_t {
public:
buffer_t() : _els { 0 } {};
buffer_t(buffer_t &&) noexcept = default;
buffer_t &operator=(buffer_t &&other) noexcept = default;
buffer_t(buffer_t &&o) noexcept : _els { o._els }, _buf { std::move(o._buf) } {
o._els = 0;
}
buffer_t &operator=(buffer_t &&o) noexcept {
std::swap(_els, o._els);
std::swap(_buf, o._buf);
return *this;
};
explicit buffer_t(size_t elements) : _els { elements }, _buf { std::make_unique<T[]>(elements) } {}
explicit buffer_t(size_t elements, const T &t) : _els { elements }, _buf { std::make_unique<T[]>(elements) } {

View File

@ -7,11 +7,10 @@
#include <thread>
extern "C" {
#include <cbs/cbs_h264.h>
#include <cbs/cbs_h265.h>
#include <libswscale/swscale.h>
}
#include "cbs.h"
#include "config.h"
#include "main.h"
#include "platform/common.h"
@ -46,43 +45,6 @@ using buffer_t = util::safe_ptr<AVBufferRef, free_buffer>;
using sws_t = util::safe_ptr<SwsContext, sws_freeContext>;
using img_event_t = std::shared_ptr<safe::event_t<std::shared_ptr<platf::img_t>>>;
namespace cbs {
void close(CodedBitstreamContext *c) {
ff_cbs_close(&c);
}
using ctx_t = util::safe_ptr<CodedBitstreamContext, close>;
class frag_t : public CodedBitstreamFragment {
public:
frag_t(frag_t &&o) {
std::copy((std::uint8_t *)&o, (std::uint8_t *)(&o + 1), (std::uint8_t *)this);
o.data = nullptr;
o.units = nullptr;
};
frag_t() {
std::fill_n((std::uint8_t *)this, sizeof(*this), 0);
}
frag_t &operator=(frag_t &&o) {
std::copy((std::uint8_t *)&o, (std::uint8_t *)(&o + 1), (std::uint8_t *)this);
o.data = nullptr;
o.units = nullptr;
return *this;
};
~frag_t() {
if(data || units) {
ff_cbs_fragment_free(this);
}
}
};
} // namespace cbs
namespace nv {
enum class profile_h264_e : int {
@ -282,7 +244,7 @@ struct encoder_t {
REF_FRAMES_AUTOSELECT, // Allow encoder to select maximum reference frames (If !REF_FRAMES_RESTRICT --> REF_FRAMES_AUTOSELECT)
SLICE, // Allow frame to be partitioned into multiple slices
DYNAMIC_RANGE, // hdr
VUI_PARAMETERS,
VUI_PARAMETERS, // AMD encoder with VAAPI doesn't add VUI parameters to SPS
MAX_FLAGS
};
@ -350,20 +312,25 @@ struct encoder_t {
class session_t {
public:
session_t() = default;
session_t(ctx_t &&ctx, util::wrap_ptr<platf::hwdevice_t> &&device) : ctx { std::move(ctx) }, device { std::move(device) } {}
session_t(ctx_t &&ctx, util::wrap_ptr<platf::hwdevice_t> &&device, util::buffer_t<std::uint8_t> &&sps) : ctx { std::move(ctx) }, device { std::move(device) }, sps { std::move(sps) } {}
session_t(session_t &&other) noexcept : ctx { std::move(other.ctx) }, device { std::move(other.device) } {}
session_t(session_t &&other) noexcept : ctx { std::move(other.ctx) }, device { std::move(other.device) }, sps { std::move(sps) }, sps_old { std::move(sps_old) } {}
// Ensure objects are destroyed in the correct order
session_t &operator=(session_t &&other) {
device = std::move(other.device);
ctx = std::move(other.ctx);
device = std::move(other.device);
ctx = std::move(other.ctx);
sps = std::move(other.sps);
sps_old = std::move(other.sps_old);
return *this;
}
ctx_t ctx;
util::wrap_ptr<platf::hwdevice_t> device;
util::buffer_t<std::uint8_t> sps;
util::buffer_t<std::uint8_t> sps_old;
};
struct sync_session_ctx_t {
@ -712,9 +679,14 @@ void captureThread(
}
}
int encode(int64_t frame_nr, ctx_t &ctx, frame_t::pointer frame, packet_queue_t &packets, void *channel_data) {
int encode(int64_t frame_nr, session_t &session, frame_t::pointer frame, packet_queue_t &packets, void *channel_data) {
frame->pts = frame_nr;
auto &ctx = session.ctx;
auto &sps = session.sps;
auto &sps_old = session.sps_old;
/* send the frame to the encoder */
auto ret = avcodec_send_frame(ctx.get(), frame);
if(ret < 0) {
@ -735,7 +707,12 @@ int encode(int64_t frame_nr, ctx_t &ctx, frame_t::pointer frame, packet_queue_t
return ret;
}
packet->channel_data = channel_data;
if(sps.size() && !sps_old.size()) {
sps_old = cbs::read_sps(packet.get(), AV_CODEC_ID_H264);
}
packet->sps.old = std::string_view((char *)std::begin(sps_old), sps_old.size());
packet->sps.replacement = std::string_view((char *)std::begin(sps), sps.size());
packet->channel_data = channel_data;
packets->raise(std::move(packet));
}
@ -948,10 +925,19 @@ std::optional<session_t> make_session(const encoder_t &encoder, const config_t &
}
device->set_colorspace(sws_color_space, ctx->color_range);
return std::make_optional(session_t {
if(video_format[encoder_t::VUI_PARAMETERS]) {
return std::make_optional(session_t {
std::move(ctx),
std::move(device),
{},
});
}
return std::make_optional<session_t>(
std::move(ctx),
std::move(device),
});
cbs::make_sps_h264(ctx.get()));
}
void encode_run(
@ -1018,7 +1004,7 @@ void encode_run(
}
}
if(encode(frame_nr++, session->ctx, frame, packets, channel_data)) {
if(encode(frame_nr++, *session, frame, packets, channel_data)) {
BOOST_LOG(error) << "Could not encode video packet"sv;
return;
}
@ -1189,7 +1175,7 @@ encode_e encode_run_sync(std::vector<std::unique_ptr<sync_session_ctx_t>> &synce
pos->img_tmp = nullptr;
}
if(encode(ctx->frame_nr++, pos->session.ctx, frame, ctx->packets, ctx->channel_data)) {
if(encode(ctx->frame_nr++, pos->session, frame, ctx->packets, ctx->channel_data)) {
BOOST_LOG(error) << "Could not encode video packet"sv;
ctx->shutdown_event->raise(true);
@ -1357,7 +1343,7 @@ bool validate_config(std::shared_ptr<platf::display_t> &disp, const encoder_t &e
auto packets = std::make_shared<packet_queue_t::element_type>(30);
while(!packets->peek()) {
if(encode(1, session->ctx, frame, packets, nullptr)) {
if(encode(1, *session, frame, packets, nullptr)) {
return false;
}
}
@ -1373,35 +1359,7 @@ bool validate_config(std::shared_ptr<platf::display_t> &disp, const encoder_t &e
return true;
}
auto codec_id = (validate_sps == 0 ? AV_CODEC_ID_H264 : AV_CODEC_ID_H265);
// validate sps
cbs::ctx_t ctx;
if(ff_cbs_init(&ctx, codec_id, nullptr)) {
return false;
}
cbs::frag_t frag;
int err = ff_cbs_read_packet(ctx.get(), &frag, &*packet);
if(err < 0) {
char err_str[AV_ERROR_MAX_STRING_SIZE] { 0 };
BOOST_LOG(error) << "Couldn't read packet: "sv << av_make_error_string(err_str, AV_ERROR_MAX_STRING_SIZE, err);
return false;
}
if(validate_sps == 0) {
auto h264 = (CodedBitstreamH264Context *)ctx->priv_data;
if(!h264->active_sps->vui_parameters_present_flag) {
return false;
}
return true;
}
return ((CodedBitstreamH265Context *)ctx->priv_data)->active_sps->vui_parameters_present_flag;
return cbs::validate_sps(&*packet, validate_sps);
}
bool validate_encoder(encoder_t &encoder) {
@ -1473,12 +1431,12 @@ bool validate_encoder(encoder_t &encoder) {
// test for presence of vui-parameters in the sps header
config_autoselect.videoFormat = 0;
encoder.h264[encoder_t::VUI_PARAMETERS] = validate_config(disp, encoder, config_autoselect, 0);
encoder.h264[encoder_t::VUI_PARAMETERS] = validate_config(disp, encoder, config_autoselect, AV_CODEC_ID_H264);
if(encoder.hevc[encoder_t::PASSED]) {
config_autoselect.videoFormat = 1;
encoder.hevc[encoder_t::VUI_PARAMETERS] = validate_config(disp, encoder, config_autoselect, 1);
encoder.hevc[encoder_t::VUI_PARAMETERS] = validate_config(disp, encoder, config_autoselect, AV_CODEC_ID_H265);
}
if(!encoder.h264[encoder_t::VUI_PARAMETERS]) {

View File

@ -42,6 +42,11 @@ struct packet_raw_t : public AVPacket {
av_packet_unref(this);
}
struct {
std::string_view old;
std::string_view replacement;
} sps;
void *channel_data;
};

View File

@ -24,6 +24,7 @@ include/cbs/h2645_parse.h
include/cbs/h264.h
include/cbs/hevc.h
include/cbs/sei.h
include/cbs/h264_levels.h
cbs.c
cbs_h2645.c
@ -33,6 +34,7 @@ cbs_mpeg2.c
cbs_jpeg.c
cbs_sei.c
h2645_parse.c
h264_levels.c
attributes.h
bytestream.h

121
third-party/cbs/h264_levels.c vendored Normal file
View File

@ -0,0 +1,121 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <libavcodec/avcodec.h>
#include "include/cbs/h264_levels.h"
// H.264 table A-1.
static const H264LevelDescriptor h264_levels[] = {
// Name MaxMBPS MaxBR MinCR
// | level_idc | MaxFS | MaxCPB | MaxMvsPer2Mb
// | | cs3f | | MaxDpbMbs | | MaxVmvR | |
{ "1", 10, 0, 1485, 99, 396, 64, 175, 64, 2, 0 },
{ "1b", 11, 1, 1485, 99, 396, 128, 350, 64, 2, 0 },
{ "1b", 9, 0, 1485, 99, 396, 128, 350, 64, 2, 0 },
{ "1.1", 11, 0, 3000, 396, 900, 192, 500, 128, 2, 0 },
{ "1.2", 12, 0, 6000, 396, 2376, 384, 1000, 128, 2, 0 },
{ "1.3", 13, 0, 11880, 396, 2376, 768, 2000, 128, 2, 0 },
{ "2", 20, 0, 11880, 396, 2376, 2000, 2000, 128, 2, 0 },
{ "2.1", 21, 0, 19800, 792, 4752, 4000, 4000, 256, 2, 0 },
{ "2.2", 22, 0, 20250, 1620, 8100, 4000, 4000, 256, 2, 0 },
{ "3", 30, 0, 40500, 1620, 8100, 10000, 10000, 256, 2, 32 },
{ "3.1", 31, 0, 108000, 3600, 18000, 14000, 14000, 512, 4, 16 },
{ "3.2", 32, 0, 216000, 5120, 20480, 20000, 20000, 512, 4, 16 },
{ "4", 40, 0, 245760, 8192, 32768, 20000, 25000, 512, 4, 16 },
{ "4.1", 41, 0, 245760, 8192, 32768, 50000, 62500, 512, 2, 16 },
{ "4.2", 42, 0, 522240, 8704, 34816, 50000, 62500, 512, 2, 16 },
{ "5", 50, 0, 589824, 22080, 110400, 135000, 135000, 512, 2, 16 },
{ "5.1", 51, 0, 983040, 36864, 184320, 240000, 240000, 512, 2, 16 },
{ "5.2", 52, 0, 2073600, 36864, 184320, 240000, 240000, 512, 2, 16 },
{ "6", 60, 0, 4177920, 139264, 696320, 240000, 240000, 8192, 2, 16 },
{ "6.1", 61, 0, 8355840, 139264, 696320, 480000, 480000, 8192, 2, 16 },
{ "6.2", 62, 0, 16711680, 139264, 696320, 800000, 800000, 8192, 2, 16 },
};
// H.264 table A-2 plus values from A-1.
static const struct {
int profile_idc;
int cpb_br_vcl_factor;
int cpb_br_nal_factor;
} h264_br_factors[] = {
{ 66, 1000, 1200 },
{ 77, 1000, 1200 },
{ 88, 1000, 1200 },
{ 100, 1250, 1500 },
{ 110, 3000, 3600 },
{ 122, 4000, 4800 },
{ 244, 4000, 4800 },
{ 44, 4000, 4800 },
};
// We are only ever interested in the NAL bitrate factor.
static int h264_get_br_factor(int profile_idc) {
int i;
for(i = 0; i < FF_ARRAY_ELEMS(h264_br_factors); i++) {
if(h264_br_factors[i].profile_idc == profile_idc)
return h264_br_factors[i].cpb_br_nal_factor;
}
// Default to the non-high profile value if not specified.
return 1200;
}
const H264LevelDescriptor *ff_h264_guess_level(int profile_idc,
int64_t bitrate,
int framerate,
int width, int height,
int max_dec_frame_buffering) {
int width_mbs = (width + 15) / 16;
int height_mbs = (height + 15) / 16;
int no_cs3f = !(profile_idc == 66 ||
profile_idc == 77 ||
profile_idc == 88);
int i;
for(i = 0; i < FF_ARRAY_ELEMS(h264_levels); i++) {
const H264LevelDescriptor *level = &h264_levels[i];
if(level->constraint_set3_flag && no_cs3f)
continue;
if(bitrate > (int64_t)level->max_br * h264_get_br_factor(profile_idc))
continue;
if(width_mbs * height_mbs > level->max_fs)
continue;
if(width_mbs * width_mbs > 8 * level->max_fs)
continue;
if(height_mbs * height_mbs > 8 * level->max_fs)
continue;
if(width_mbs && height_mbs) {
int max_dpb_frames =
FFMIN(level->max_dpb_mbs / (width_mbs * height_mbs), 16);
if(max_dec_frame_buffering > max_dpb_frames)
continue;
if(framerate > (level->max_mbps / (width_mbs * height_mbs)))
continue;
}
return level;
}
// No usable levels found - frame is too big or bitrate is too high.
return NULL;
}

View File

@ -0,0 +1,51 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_H264_LEVELS_H
#define AVCODEC_H264_LEVELS_H
#include <stdint.h>
typedef struct H264LevelDescriptor {
const char *name;
uint8_t level_idc;
uint8_t constraint_set3_flag;
uint32_t max_mbps;
uint32_t max_fs;
uint32_t max_dpb_mbs;
uint32_t max_br;
uint32_t max_cpb;
uint16_t max_v_mv_r;
uint8_t min_cr;
uint8_t max_mvs_per_2mb;
} H264LevelDescriptor;
/**
* Guess the level of a stream from some parameters.
*
* Unknown parameters may be zero, in which case they are ignored.
*/
const H264LevelDescriptor *ff_h264_guess_level(int profile_idc,
int64_t bitrate,
int framerate,
int width, int height,
int max_dec_frame_buffering);
#endif /* AVCODEC_H264_LEVELS_H */