FFmpeg 5 and pre-built CBS (#509)

This commit is contained in:
Brad Richardson 2022-12-27 09:13:54 -05:00 committed by GitHub
parent cbb5ec3f29
commit 95437d15f3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
55 changed files with 14 additions and 20286 deletions

View File

@ -401,6 +401,8 @@ endif()
set(FFMPEG_LIBRARIES
${FFMPEG_PREPARED_BINARIES}/lib/libavcodec.a
${FFMPEG_PREPARED_BINARIES}/lib/libavutil.a
${FFMPEG_PREPARED_BINARIES}/lib/libcbs.a
${FFMPEG_PREPARED_BINARIES}/lib/libSvtAv1Enc.a
${FFMPEG_PREPARED_BINARIES}/lib/libswscale.a
${FFMPEG_PREPARED_BINARIES}/lib/libx264.a
${FFMPEG_PREPARED_BINARIES}/lib/libx265.a
@ -410,15 +412,12 @@ set(FFMPEG_LIBRARIES
include_directories(
${CMAKE_CURRENT_SOURCE_DIR}
${CMAKE_CURRENT_SOURCE_DIR}/third-party
${CMAKE_CURRENT_SOURCE_DIR}/third-party/cbs/include
${CMAKE_CURRENT_SOURCE_DIR}/third-party/moonlight-common-c/enet/include
${CMAKE_CURRENT_SOURCE_DIR}/third-party/moonlight-common-c/reedsolomon
${FFMPEG_INCLUDE_DIRS}
${PLATFORM_INCLUDE_DIRS}
)
add_subdirectory(third-party/cbs)
string(TOUPPER "x${CMAKE_BUILD_TYPE}" BUILD_TYPE)
if("${BUILD_TYPE}" STREQUAL "XDEBUG")
list(APPEND SUNSHINE_COMPILE_OPTIONS -O0 -ggdb3)
@ -446,13 +445,8 @@ else()
endif()
list(APPEND SUNSHINE_DEFINITIONS SUNSHINE_ASSETS_DIR="${SUNSHINE_ASSETS_DIR_DEF}")
list(APPEND CBS_EXTERNAL_LIBRARIES
cbs)
list(APPEND SUNSHINE_EXTERNAL_LIBRARIES
libminiupnpc-static
${CBS_EXTERNAL_LIBRARIES}
${CMAKE_THREAD_LIBS_INIT}
enet
opus

View File

@ -958,7 +958,7 @@ amd_rc
Value Description
=========== ===========
auto let ffmpeg decide
constqp constant QP mode
cqp constant QP mode
cbr constant bitrate
vbr_latency variable bitrate, latency constrained
vbr_peak variable bitrate, peak constrained

View File

@ -1,7 +1,7 @@
extern "C" {
#include <cbs/cbs_h264.h>
#include <cbs/cbs_h265.h>
#include <cbs/video_levels.h>
#include <cbs/h264_levels.h>
#include <libavcodec/avcodec.h>
#include <libavutil/pixdesc.h>
}

View File

@ -103,14 +103,14 @@ enum quality_e : int {
};
enum class rc_hevc_e : int {
constqp, /**< Constant QP mode */
cqp, /**< Constant QP mode */
vbr_latency, /**< Latency Constrained Variable Bitrate */
vbr_peak, /**< Peak Constrained Variable Bitrate */
cbr, /**< Constant bitrate mode */
};
enum class rc_h264_e : int {
constqp, /**< Constant QP mode */
cqp, /**< Constant QP mode */
cbr, /**< Constant bitrate mode */
vbr_peak, /**< Peak Constrained Variable Bitrate */
vbr_latency, /**< Latency Constrained Variable Bitrate */
@ -135,7 +135,7 @@ std::optional<quality_e> quality_from_view(const std::string_view &quality) {
std::optional<int> rc_h264_from_view(const std::string_view &rc) {
#define _CONVERT_(x) \
if(rc == #x##sv) return (int)rc_h264_e::x
_CONVERT_(constqp);
_CONVERT_(cqp);
_CONVERT_(vbr_latency);
_CONVERT_(vbr_peak);
_CONVERT_(cbr);
@ -146,7 +146,7 @@ std::optional<int> rc_h264_from_view(const std::string_view &rc) {
std::optional<int> rc_hevc_from_view(const std::string_view &rc) {
#define _CONVERT_(x) \
if(rc == #x##sv) return (int)rc_hevc_e::x
_CONVERT_(constqp);
_CONVERT_(cqp);
_CONVERT_(vbr_latency);
_CONVERT_(vbr_peak);
_CONVERT_(cbr);

View File

@ -700,7 +700,7 @@
<label for="amd_rc" class="form-label">AMD AMF Rate Control</label>
<select id="amd_rc" class="form-select" v-model="config.amd_rc">
<option value="auto">auto -- let ffmpeg decide rate control</option>
<option value="constqp">constqp -- constant QP mode</option>
<option value="cqp">cqp -- constant QP mode</option>
<option value="vbr_latency">vbr_latency -- Latency Constrained Variable Bitrate</option>
<option value="vbr_peak">vbr_peak -- Peak Contrained Variable Bitrate</option>
<option value="cbr">cbr -- constant bitrate</option>

View File

@ -1,69 +0,0 @@
cmake_minimum_required(VERSION 3.0)
project(CBS)
SET(CBS_SOURCE_FILES
include/cbs/av1.h
include/cbs/cbs_av1.h
include/cbs/cbs_bsf.h
include/cbs/cbs.h
include/cbs/cbs_h2645.h
include/cbs/cbs_h264.h
include/cbs/cbs_h265.h
include/cbs/cbs_jpeg.h
include/cbs/cbs_mpeg2.h
include/cbs/cbs_sei.h
include/cbs/cbs_vp9.h
include/cbs/h2645_parse.h
include/cbs/h264.h
include/cbs/hevc.h
include/cbs/sei.h
include/cbs/video_levels.h
cbs.c
cbs_h2645.c
cbs_av1.c
cbs_vp9.c
cbs_mpeg2.c
cbs_jpeg.c
cbs_sei.c
h2645_parse.c
video_levels.c
bytestream.h
cbs_internal.h
defs.h
get_bits.h
h264_ps.h
h264_sei.h
hevc_sei.h
intmath.h
mathops.h
put_bits.h
vlc.h
config.h
)
include_directories(include)
if(DEFINED FFMPEG_INCLUDE_DIRS)
include_directories(${FFMPEG_INCLUDE_DIRS})
endif()
add_compile_definitions(
HAVE_THREADS=1
HAVE_FAST_UNALIGNED
PIC=1
CONFIG_CBS_AV1=1
CONFIG_CBS_H264=1
CONFIG_CBS_H265=1
CONFIG_CBS_JPEG=1
CONFIG_CBS_MPEG2=1
CONFIG_CBS_VP9=1
)
add_library(cbs ${CBS_SOURCE_FILES})
target_compile_options(cbs PRIVATE -Wall -Wno-incompatible-pointer-types -Wno-maybe-uninitialized -Wno-format -Wno-format-extra-args)

View File

@ -1,351 +0,0 @@
/*
* Bytestream functions
* copyright (c) 2006 Baptiste Coudurier <baptiste.coudurier@free.fr>
* Copyright (c) 2012 Aneesh Dogra (lionaneesh) <lionaneesh@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_BYTESTREAM_H
#define AVCODEC_BYTESTREAM_H
#include "config.h"
#include <stdint.h>
#include <string.h>
#include <libavutil/avassert.h>
#include <libavutil/common.h>
#include <libavutil/intreadwrite.h>
typedef struct GetByteContext {
const uint8_t *buffer, *buffer_end, *buffer_start;
} GetByteContext;
typedef struct PutByteContext {
uint8_t *buffer, *buffer_end, *buffer_start;
int eof;
} PutByteContext;
#define DEF(type, name, bytes, read, write) \
static av_always_inline type bytestream_get_##name(const uint8_t **b) { \
(*b) += bytes; \
return read(*b - bytes); \
} \
static av_always_inline void bytestream_put_##name(uint8_t **b, \
const type value) { \
write(*b, value); \
(*b) += bytes; \
} \
static av_always_inline void bytestream2_put_##name##u(PutByteContext *p, \
const type value) { \
bytestream_put_##name(&p->buffer, value); \
} \
static av_always_inline void bytestream2_put_##name(PutByteContext *p, \
const type value) { \
if(!p->eof && (p->buffer_end - p->buffer >= bytes)) { \
write(p->buffer, value); \
p->buffer += bytes; \
} \
else \
p->eof = 1; \
} \
static av_always_inline type bytestream2_get_##name##u(GetByteContext *g) { \
return bytestream_get_##name(&g->buffer); \
} \
static av_always_inline type bytestream2_get_##name(GetByteContext *g) { \
if(g->buffer_end - g->buffer < bytes) { \
g->buffer = g->buffer_end; \
return 0; \
} \
return bytestream2_get_##name##u(g); \
} \
static av_always_inline type bytestream2_peek_##name##u(GetByteContext *g) { \
return read(g->buffer); \
} \
static av_always_inline type bytestream2_peek_##name(GetByteContext *g) { \
if(g->buffer_end - g->buffer < bytes) \
return 0; \
return bytestream2_peek_##name##u(g); \
}
DEF(uint64_t, le64, 8, AV_RL64, AV_WL64)
DEF(unsigned int, le32, 4, AV_RL32, AV_WL32)
DEF(unsigned int, le24, 3, AV_RL24, AV_WL24)
DEF(unsigned int, le16, 2, AV_RL16, AV_WL16)
DEF(uint64_t, be64, 8, AV_RB64, AV_WB64)
DEF(unsigned int, be32, 4, AV_RB32, AV_WB32)
DEF(unsigned int, be24, 3, AV_RB24, AV_WB24)
DEF(unsigned int, be16, 2, AV_RB16, AV_WB16)
DEF(unsigned int, byte, 1, AV_RB8, AV_WB8)
#if AV_HAVE_BIGENDIAN
#define bytestream2_get_ne16 bytestream2_get_be16
#define bytestream2_get_ne24 bytestream2_get_be24
#define bytestream2_get_ne32 bytestream2_get_be32
#define bytestream2_get_ne64 bytestream2_get_be64
#define bytestream2_get_ne16u bytestream2_get_be16u
#define bytestream2_get_ne24u bytestream2_get_be24u
#define bytestream2_get_ne32u bytestream2_get_be32u
#define bytestream2_get_ne64u bytestream2_get_be64u
#define bytestream2_put_ne16 bytestream2_put_be16
#define bytestream2_put_ne24 bytestream2_put_be24
#define bytestream2_put_ne32 bytestream2_put_be32
#define bytestream2_put_ne64 bytestream2_put_be64
#define bytestream2_peek_ne16 bytestream2_peek_be16
#define bytestream2_peek_ne24 bytestream2_peek_be24
#define bytestream2_peek_ne32 bytestream2_peek_be32
#define bytestream2_peek_ne64 bytestream2_peek_be64
#else
#define bytestream2_get_ne16 bytestream2_get_le16
#define bytestream2_get_ne24 bytestream2_get_le24
#define bytestream2_get_ne32 bytestream2_get_le32
#define bytestream2_get_ne64 bytestream2_get_le64
#define bytestream2_get_ne16u bytestream2_get_le16u
#define bytestream2_get_ne24u bytestream2_get_le24u
#define bytestream2_get_ne32u bytestream2_get_le32u
#define bytestream2_get_ne64u bytestream2_get_le64u
#define bytestream2_put_ne16 bytestream2_put_le16
#define bytestream2_put_ne24 bytestream2_put_le24
#define bytestream2_put_ne32 bytestream2_put_le32
#define bytestream2_put_ne64 bytestream2_put_le64
#define bytestream2_peek_ne16 bytestream2_peek_le16
#define bytestream2_peek_ne24 bytestream2_peek_le24
#define bytestream2_peek_ne32 bytestream2_peek_le32
#define bytestream2_peek_ne64 bytestream2_peek_le64
#endif
static av_always_inline void bytestream2_init(GetByteContext *g,
const uint8_t *buf,
int buf_size) {
av_assert0(buf_size >= 0);
g->buffer = buf;
g->buffer_start = buf;
g->buffer_end = buf + buf_size;
}
static av_always_inline void bytestream2_init_writer(PutByteContext *p,
uint8_t *buf,
int buf_size) {
av_assert0(buf_size >= 0);
p->buffer = buf;
p->buffer_start = buf;
p->buffer_end = buf + buf_size;
p->eof = 0;
}
static av_always_inline int bytestream2_get_bytes_left(GetByteContext *g) {
return g->buffer_end - g->buffer;
}
static av_always_inline int bytestream2_get_bytes_left_p(PutByteContext *p) {
return p->buffer_end - p->buffer;
}
static av_always_inline void bytestream2_skip(GetByteContext *g,
unsigned int size) {
g->buffer += FFMIN(g->buffer_end - g->buffer, size);
}
static av_always_inline void bytestream2_skipu(GetByteContext *g,
unsigned int size) {
g->buffer += size;
}
static av_always_inline void bytestream2_skip_p(PutByteContext *p,
unsigned int size) {
int size2;
if(p->eof)
return;
size2 = FFMIN(p->buffer_end - p->buffer, size);
if(size2 != size)
p->eof = 1;
p->buffer += size2;
}
static av_always_inline int bytestream2_tell(GetByteContext *g) {
return (int)(g->buffer - g->buffer_start);
}
static av_always_inline int bytestream2_tell_p(PutByteContext *p) {
return (int)(p->buffer - p->buffer_start);
}
static av_always_inline int bytestream2_size(GetByteContext *g) {
return (int)(g->buffer_end - g->buffer_start);
}
static av_always_inline int bytestream2_size_p(PutByteContext *p) {
return (int)(p->buffer_end - p->buffer_start);
}
static av_always_inline int bytestream2_seek(GetByteContext *g,
int offset,
int whence) {
switch(whence) {
case SEEK_CUR:
offset = av_clip(offset, -(g->buffer - g->buffer_start),
g->buffer_end - g->buffer);
g->buffer += offset;
break;
case SEEK_END:
offset = av_clip(offset, -(g->buffer_end - g->buffer_start), 0);
g->buffer = g->buffer_end + offset;
break;
case SEEK_SET:
offset = av_clip(offset, 0, g->buffer_end - g->buffer_start);
g->buffer = g->buffer_start + offset;
break;
default:
return AVERROR(EINVAL);
}
return bytestream2_tell(g);
}
static av_always_inline int bytestream2_seek_p(PutByteContext *p,
int offset,
int whence) {
p->eof = 0;
switch(whence) {
case SEEK_CUR:
if(p->buffer_end - p->buffer < offset)
p->eof = 1;
offset = av_clip(offset, -(p->buffer - p->buffer_start),
p->buffer_end - p->buffer);
p->buffer += offset;
break;
case SEEK_END:
if(offset > 0)
p->eof = 1;
offset = av_clip(offset, -(p->buffer_end - p->buffer_start), 0);
p->buffer = p->buffer_end + offset;
break;
case SEEK_SET:
if(p->buffer_end - p->buffer_start < offset)
p->eof = 1;
offset = av_clip(offset, 0, p->buffer_end - p->buffer_start);
p->buffer = p->buffer_start + offset;
break;
default:
return AVERROR(EINVAL);
}
return bytestream2_tell_p(p);
}
static av_always_inline unsigned int bytestream2_get_buffer(GetByteContext *g,
uint8_t *dst,
unsigned int size) {
int size2 = FFMIN(g->buffer_end - g->buffer, size);
memcpy(dst, g->buffer, size2);
g->buffer += size2;
return size2;
}
static av_always_inline unsigned int bytestream2_get_bufferu(GetByteContext *g,
uint8_t *dst,
unsigned int size) {
memcpy(dst, g->buffer, size);
g->buffer += size;
return size;
}
static av_always_inline unsigned int bytestream2_put_buffer(PutByteContext *p,
const uint8_t *src,
unsigned int size) {
int size2;
if(p->eof)
return 0;
size2 = FFMIN(p->buffer_end - p->buffer, size);
if(size2 != size)
p->eof = 1;
memcpy(p->buffer, src, size2);
p->buffer += size2;
return size2;
}
static av_always_inline unsigned int bytestream2_put_bufferu(PutByteContext *p,
const uint8_t *src,
unsigned int size) {
memcpy(p->buffer, src, size);
p->buffer += size;
return size;
}
static av_always_inline void bytestream2_set_buffer(PutByteContext *p,
const uint8_t c,
unsigned int size) {
int size2;
if(p->eof)
return;
size2 = FFMIN(p->buffer_end - p->buffer, size);
if(size2 != size)
p->eof = 1;
memset(p->buffer, c, size2);
p->buffer += size2;
}
static av_always_inline void bytestream2_set_bufferu(PutByteContext *p,
const uint8_t c,
unsigned int size) {
memset(p->buffer, c, size);
p->buffer += size;
}
static av_always_inline unsigned int bytestream2_get_eof(PutByteContext *p) {
return p->eof;
}
static av_always_inline unsigned int bytestream2_copy_bufferu(PutByteContext *p,
GetByteContext *g,
unsigned int size) {
memcpy(p->buffer, g->buffer, size);
p->buffer += size;
g->buffer += size;
return size;
}
static av_always_inline unsigned int bytestream2_copy_buffer(PutByteContext *p,
GetByteContext *g,
unsigned int size) {
int size2;
if(p->eof)
return 0;
size = FFMIN(g->buffer_end - g->buffer, size);
size2 = FFMIN(p->buffer_end - p->buffer, size);
if(size2 != size)
p->eof = 1;
return bytestream2_copy_bufferu(p, g, size2);
}
static av_always_inline unsigned int bytestream_get_buffer(const uint8_t **b,
uint8_t *dst,
unsigned int size) {
memcpy(dst, *b, size);
(*b) += size;
return size;
}
static av_always_inline void bytestream_put_buffer(uint8_t **b,
const uint8_t *src,
unsigned int size) {
memcpy(*b, src, size);
(*b) += size;
}
#endif /* AVCODEC_BYTESTREAM_H */

1050
third-party/cbs/cbs.c vendored

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,220 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_CBS_INTERNAL_H
#define AVCODEC_CBS_INTERNAL_H
#include <stdint.h>
#include <libavutil/buffer.h>
#include <libavutil/log.h>
#include "cbs/cbs.h"
#include "get_bits.h"
#include "put_bits.h"
enum CBSContentType {
// Unit content is a simple structure.
CBS_CONTENT_TYPE_POD,
// Unit content contains some references to other structures, but all
// managed via buffer reference counting. The descriptor defines the
// structure offsets of every buffer reference.
CBS_CONTENT_TYPE_INTERNAL_REFS,
// Unit content is something more complex. The descriptor defines
// special functions to manage the content.
CBS_CONTENT_TYPE_COMPLEX,
};
enum {
// Maximum number of unit types described by the same unit type
// descriptor.
CBS_MAX_UNIT_TYPES = 3,
// Maximum number of reference buffer offsets in any one unit.
CBS_MAX_REF_OFFSETS = 2,
// Special value used in a unit type descriptor to indicate that it
// applies to a large range of types rather than a set of discrete
// values.
CBS_UNIT_TYPE_RANGE = -1,
};
typedef const struct CodedBitstreamUnitTypeDescriptor {
// Number of entries in the unit_types array, or the special value
// CBS_UNIT_TYPE_RANGE to indicate that the range fields should be
// used instead.
int nb_unit_types;
// Array of unit types that this entry describes.
const CodedBitstreamUnitType unit_types[CBS_MAX_UNIT_TYPES];
// Start and end of unit type range, used if nb_unit_types is
// CBS_UNIT_TYPE_RANGE.
const CodedBitstreamUnitType unit_type_range_start;
const CodedBitstreamUnitType unit_type_range_end;
// The type of content described.
enum CBSContentType content_type;
// The size of the structure which should be allocated to contain
// the decomposed content of this type of unit.
size_t content_size;
// Number of entries in the ref_offsets array. Only used if the
// content_type is CBS_CONTENT_TYPE_INTERNAL_REFS.
int nb_ref_offsets;
// The structure must contain two adjacent elements:
// type *field;
// AVBufferRef *field_ref;
// where field points to something in the buffer referred to by
// field_ref. This offset is then set to offsetof(struct, field).
size_t ref_offsets[CBS_MAX_REF_OFFSETS];
void (*content_free)(void *opaque, uint8_t *data);
int (*content_clone)(AVBufferRef **ref, CodedBitstreamUnit *unit);
} CodedBitstreamUnitTypeDescriptor;
typedef struct CodedBitstreamType {
enum AVCodecID codec_id;
// A class for the private data, used to declare private AVOptions.
// This field is NULL for types that do not declare any options.
// If this field is non-NULL, the first member of the filter private data
// must be a pointer to AVClass.
const AVClass *priv_class;
size_t priv_data_size;
// List of unit type descriptors for this codec.
// Terminated by a descriptor with nb_unit_types equal to zero.
const CodedBitstreamUnitTypeDescriptor *unit_types;
// Split frag->data into coded bitstream units, creating the
// frag->units array. Fill data but not content on each unit.
// The header argument should be set if the fragment came from
// a header block, which may require different parsing for some
// codecs (e.g. the AVCC header in H.264).
int (*split_fragment)(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag,
int header);
// Read the unit->data bitstream and decompose it, creating
// unit->content.
int (*read_unit)(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit);
// Write the data bitstream from unit->content into pbc.
// Return value AVERROR(ENOSPC) indicates that pbc was too small.
int (*write_unit)(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit,
PutBitContext *pbc);
// Read the data from all of frag->units and assemble it into
// a bitstream for the whole fragment.
int (*assemble_fragment)(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag);
// Reset the codec internal state.
void (*flush)(CodedBitstreamContext *ctx);
// Free the codec internal state.
void (*close)(CodedBitstreamContext *ctx);
} CodedBitstreamType;
// Helper functions for trace output.
void ff_cbs_trace_header(CodedBitstreamContext *ctx,
const char *name);
void ff_cbs_trace_syntax_element(CodedBitstreamContext *ctx, int position,
const char *name, const int *subscripts,
const char *bitstring, int64_t value);
// Helper functions for read/write of common bitstream elements, including
// generation of trace output.
int ff_cbs_read_unsigned(CodedBitstreamContext *ctx, GetBitContext *gbc,
int width, const char *name,
const int *subscripts, uint32_t *write_to,
uint32_t range_min, uint32_t range_max);
int ff_cbs_write_unsigned(CodedBitstreamContext *ctx, PutBitContext *pbc,
int width, const char *name,
const int *subscripts, uint32_t value,
uint32_t range_min, uint32_t range_max);
int ff_cbs_read_signed(CodedBitstreamContext *ctx, GetBitContext *gbc,
int width, const char *name,
const int *subscripts, int32_t *write_to,
int32_t range_min, int32_t range_max);
int ff_cbs_write_signed(CodedBitstreamContext *ctx, PutBitContext *pbc,
int width, const char *name,
const int *subscripts, int32_t value,
int32_t range_min, int32_t range_max);
// The largest unsigned value representable in N bits, suitable for use as
// range_max in the above functions.
#define MAX_UINT_BITS(length) ((UINT64_C(1) << (length)) - 1)
// The largest signed value representable in N bits, suitable for use as
// range_max in the above functions.
#define MAX_INT_BITS(length) ((INT64_C(1) << ((length)-1)) - 1)
// The smallest signed value representable in N bits, suitable for use as
// range_min in the above functions.
#define MIN_INT_BITS(length) (-(INT64_C(1) << ((length)-1)))
#define CBS_UNIT_TYPE_POD(type, structure) \
{ \
.nb_unit_types = 1, \
.unit_types = { type }, \
.content_type = CBS_CONTENT_TYPE_POD, \
.content_size = sizeof(structure), \
}
#define CBS_UNIT_TYPE_INTERNAL_REF(type, structure, ref_field) \
{ \
.nb_unit_types = 1, \
.unit_types = { type }, \
.content_type = CBS_CONTENT_TYPE_INTERNAL_REFS, \
.content_size = sizeof(structure), \
.nb_ref_offsets = 1, \
.ref_offsets = { offsetof(structure, ref_field) }, \
}
#define CBS_UNIT_TYPE_COMPLEX(type, structure, free_func) \
{ \
.nb_unit_types = 1, \
.unit_types = { type }, \
.content_type = CBS_CONTENT_TYPE_COMPLEX, \
.content_size = sizeof(structure), \
.content_free = free_func, \
}
#define CBS_UNIT_TYPE_END_OF_LIST \
{ .nb_unit_types = 0 }
extern const CodedBitstreamType ff_cbs_type_av1;
extern const CodedBitstreamType ff_cbs_type_h264;
extern const CodedBitstreamType ff_cbs_type_h265;
extern const CodedBitstreamType ff_cbs_type_jpeg;
extern const CodedBitstreamType ff_cbs_type_mpeg2;
extern const CodedBitstreamType ff_cbs_type_vp9;
#endif /* AVCODEC_CBS_INTERNAL_H */

View File

@ -1,482 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "cbs/cbs_jpeg.h"
#include "cbs/cbs.h"
#include "cbs_internal.h"
#define HEADER(name) \
do { \
ff_cbs_trace_header(ctx, name); \
} while(0)
#define CHECK(call) \
do { \
err = (call); \
if(err < 0) \
return err; \
} while(0)
#define SUBSCRIPTS(subs, ...) (subs > 0 ? ((int[subs + 1]) { subs, __VA_ARGS__ }) : NULL)
#define u(width, name, range_min, range_max) \
xu(width, name, range_min, range_max, 0, )
#define us(width, name, sub, range_min, range_max) \
xu(width, name, range_min, range_max, 1, sub)
#define READ
#define READWRITE read
#define RWContext GetBitContext
#define FUNC(name) cbs_jpeg_read_##name
#define xu(width, name, range_min, range_max, subs, ...) \
do { \
uint32_t value; \
CHECK(ff_cbs_read_unsigned(ctx, rw, width, #name, \
SUBSCRIPTS(subs, __VA_ARGS__), \
&value, range_min, range_max)); \
current->name = value; \
} while(0)
#include "cbs_jpeg_syntax_template.c"
#undef READ
#undef READWRITE
#undef RWContext
#undef FUNC
#undef xu
#define WRITE
#define READWRITE write
#define RWContext PutBitContext
#define FUNC(name) cbs_jpeg_write_##name
#define xu(width, name, range_min, range_max, subs, ...) \
do { \
uint32_t value = current->name; \
CHECK(ff_cbs_write_unsigned(ctx, rw, width, #name, \
SUBSCRIPTS(subs, __VA_ARGS__), \
value, range_min, range_max)); \
} while(0)
#include "cbs_jpeg_syntax_template.c"
#undef WRITE
#undef READWRITE
#undef RWContext
#undef FUNC
#undef xu
static void cbs_jpeg_free_application_data(void *opaque, uint8_t *content) {
JPEGRawApplicationData *ad = (JPEGRawApplicationData *)content;
av_buffer_unref(&ad->Ap_ref);
av_freep(&content);
}
static void cbs_jpeg_free_comment(void *opaque, uint8_t *content) {
JPEGRawComment *comment = (JPEGRawComment *)content;
av_buffer_unref(&comment->Cm_ref);
av_freep(&content);
}
static void cbs_jpeg_free_scan(void *opaque, uint8_t *content) {
JPEGRawScan *scan = (JPEGRawScan *)content;
av_buffer_unref(&scan->data_ref);
av_freep(&content);
}
static int cbs_jpeg_split_fragment(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag,
int header) {
AVBufferRef *data_ref;
uint8_t *data;
size_t data_size;
int unit, start, end, marker, next_start, next_marker;
int err, i, j, length;
if(frag->data_size < 4) {
// Definitely too short to be meaningful.
return AVERROR_INVALIDDATA;
}
for(i = 0; i + 1 < frag->data_size && frag->data[i] != 0xff; i++)
;
if(i > 0) {
av_log(ctx->log_ctx, AV_LOG_WARNING, "Discarding %d bytes at "
"beginning of image.\n",
i);
}
for(++i; i + 1 < frag->data_size && frag->data[i] == 0xff; i++)
;
if(i + 1 >= frag->data_size && frag->data[i]) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid JPEG image: "
"no SOI marker found.\n");
return AVERROR_INVALIDDATA;
}
marker = frag->data[i];
if(marker != JPEG_MARKER_SOI) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid JPEG image: first "
"marker is %02x, should be SOI.\n",
marker);
return AVERROR_INVALIDDATA;
}
for(++i; i + 1 < frag->data_size && frag->data[i] == 0xff; i++)
;
if(i + 1 >= frag->data_size) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid JPEG image: "
"no image content found.\n");
return AVERROR_INVALIDDATA;
}
marker = frag->data[i];
start = i + 1;
for(unit = 0;; unit++) {
if(marker == JPEG_MARKER_EOI) {
break;
}
else if(marker == JPEG_MARKER_SOS) {
next_marker = -1;
end = start;
for(i = start; i + 1 < frag->data_size; i++) {
if(frag->data[i] != 0xff)
continue;
end = i;
for(++i; i + 1 < frag->data_size &&
frag->data[i] == 0xff;
i++)
;
if(i + 1 < frag->data_size) {
if(frag->data[i] == 0x00)
continue;
next_marker = frag->data[i];
next_start = i + 1;
}
break;
}
}
else {
i = start;
if(i + 2 > frag->data_size) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid JPEG image: "
"truncated at %02x marker.\n",
marker);
return AVERROR_INVALIDDATA;
}
length = AV_RB16(frag->data + i);
if(i + length > frag->data_size) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid JPEG image: "
"truncated at %02x marker segment.\n",
marker);
return AVERROR_INVALIDDATA;
}
end = start + length;
i = end;
if(frag->data[i] != 0xff) {
next_marker = -1;
}
else {
for(++i; i + 1 < frag->data_size &&
frag->data[i] == 0xff;
i++)
;
if(i + 1 >= frag->data_size) {
next_marker = -1;
}
else {
next_marker = frag->data[i];
next_start = i + 1;
}
}
}
if(marker == JPEG_MARKER_SOS) {
length = AV_RB16(frag->data + start);
if(length > end - start)
return AVERROR_INVALIDDATA;
data_ref = NULL;
data = av_malloc(end - start +
AV_INPUT_BUFFER_PADDING_SIZE);
if(!data)
return AVERROR(ENOMEM);
memcpy(data, frag->data + start, length);
for(i = start + length, j = length; i < end; i++, j++) {
if(frag->data[i] == 0xff) {
while(frag->data[i] == 0xff)
++i;
data[j] = 0xff;
}
else {
data[j] = frag->data[i];
}
}
data_size = j;
memset(data + data_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
}
else {
data = frag->data + start;
data_size = end - start;
data_ref = frag->data_ref;
}
err = ff_cbs_insert_unit_data(frag, unit, marker,
data, data_size, data_ref);
if(err < 0)
return err;
if(next_marker == -1)
break;
marker = next_marker;
start = next_start;
}
return 0;
}
static int cbs_jpeg_read_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit) {
GetBitContext gbc;
int err;
err = init_get_bits(&gbc, unit->data, 8 * unit->data_size);
if(err < 0)
return err;
if(unit->type >= JPEG_MARKER_SOF0 &&
unit->type <= JPEG_MARKER_SOF3) {
err = ff_cbs_alloc_unit_content(unit,
sizeof(JPEGRawFrameHeader),
NULL);
if(err < 0)
return err;
err = cbs_jpeg_read_frame_header(ctx, &gbc, unit->content);
if(err < 0)
return err;
}
else if(unit->type >= JPEG_MARKER_APPN &&
unit->type <= JPEG_MARKER_APPN + 15) {
err = ff_cbs_alloc_unit_content(unit,
sizeof(JPEGRawApplicationData),
&cbs_jpeg_free_application_data);
if(err < 0)
return err;
err = cbs_jpeg_read_application_data(ctx, &gbc, unit->content);
if(err < 0)
return err;
}
else if(unit->type == JPEG_MARKER_SOS) {
JPEGRawScan *scan;
int pos;
err = ff_cbs_alloc_unit_content(unit,
sizeof(JPEGRawScan),
&cbs_jpeg_free_scan);
if(err < 0)
return err;
scan = unit->content;
err = cbs_jpeg_read_scan_header(ctx, &gbc, &scan->header);
if(err < 0)
return err;
pos = get_bits_count(&gbc);
av_assert0(pos % 8 == 0);
if(pos > 0) {
scan->data_size = unit->data_size - pos / 8;
scan->data_ref = av_buffer_ref(unit->data_ref);
if(!scan->data_ref)
return AVERROR(ENOMEM);
scan->data = unit->data + pos / 8;
}
}
else {
switch(unit->type) {
#define SEGMENT(marker, type, func, free) \
case JPEG_MARKER_##marker: { \
err = ff_cbs_alloc_unit_content(unit, \
sizeof(type), free); \
if(err < 0) \
return err; \
err = cbs_jpeg_read_##func(ctx, &gbc, unit->content); \
if(err < 0) \
return err; \
} break
SEGMENT(DQT, JPEGRawQuantisationTableSpecification, dqt, NULL);
SEGMENT(DHT, JPEGRawHuffmanTableSpecification, dht, NULL);
SEGMENT(COM, JPEGRawComment, comment, &cbs_jpeg_free_comment);
#undef SEGMENT
default:
return AVERROR(ENOSYS);
}
}
return 0;
}
static int cbs_jpeg_write_scan(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit,
PutBitContext *pbc) {
JPEGRawScan *scan = unit->content;
int err;
err = cbs_jpeg_write_scan_header(ctx, pbc, &scan->header);
if(err < 0)
return err;
if(scan->data) {
if(scan->data_size * 8 > put_bits_left(pbc))
return AVERROR(ENOSPC);
av_assert0(put_bits_count(pbc) % 8 == 0);
flush_put_bits(pbc);
memcpy(put_bits_ptr(pbc), scan->data, scan->data_size);
skip_put_bytes(pbc, scan->data_size);
}
return 0;
}
static int cbs_jpeg_write_segment(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit,
PutBitContext *pbc) {
int err;
if(unit->type >= JPEG_MARKER_SOF0 &&
unit->type <= JPEG_MARKER_SOF3) {
err = cbs_jpeg_write_frame_header(ctx, pbc, unit->content);
}
else if(unit->type >= JPEG_MARKER_APPN &&
unit->type <= JPEG_MARKER_APPN + 15) {
err = cbs_jpeg_write_application_data(ctx, pbc, unit->content);
}
else {
switch(unit->type) {
#define SEGMENT(marker, func) \
case JPEG_MARKER_##marker: \
err = cbs_jpeg_write_##func(ctx, pbc, unit->content); \
break;
SEGMENT(DQT, dqt);
SEGMENT(DHT, dht);
SEGMENT(COM, comment);
default:
return AVERROR_PATCHWELCOME;
}
}
return err;
}
static int cbs_jpeg_write_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit,
PutBitContext *pbc) {
if(unit->type == JPEG_MARKER_SOS)
return cbs_jpeg_write_scan(ctx, unit, pbc);
else
return cbs_jpeg_write_segment(ctx, unit, pbc);
}
static int cbs_jpeg_assemble_fragment(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag) {
const CodedBitstreamUnit *unit;
uint8_t *data;
size_t size, dp, sp;
int i;
size = 4; // SOI + EOI.
for(i = 0; i < frag->nb_units; i++) {
unit = &frag->units[i];
size += 2 + unit->data_size;
if(unit->type == JPEG_MARKER_SOS) {
for(sp = 0; sp < unit->data_size; sp++) {
if(unit->data[sp] == 0xff)
++size;
}
}
}
frag->data_ref = av_buffer_alloc(size + AV_INPUT_BUFFER_PADDING_SIZE);
if(!frag->data_ref)
return AVERROR(ENOMEM);
data = frag->data_ref->data;
dp = 0;
data[dp++] = 0xff;
data[dp++] = JPEG_MARKER_SOI;
for(i = 0; i < frag->nb_units; i++) {
unit = &frag->units[i];
data[dp++] = 0xff;
data[dp++] = unit->type;
if(unit->type != JPEG_MARKER_SOS) {
memcpy(data + dp, unit->data, unit->data_size);
dp += unit->data_size;
}
else {
sp = AV_RB16(unit->data);
av_assert0(sp <= unit->data_size);
memcpy(data + dp, unit->data, sp);
dp += sp;
for(; sp < unit->data_size; sp++) {
if(unit->data[sp] == 0xff) {
data[dp++] = 0xff;
data[dp++] = 0x00;
}
else {
data[dp++] = unit->data[sp];
}
}
}
}
data[dp++] = 0xff;
data[dp++] = JPEG_MARKER_EOI;
av_assert0(dp == size);
memset(data + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
frag->data = data;
frag->data_size = size;
return 0;
}
const CodedBitstreamType ff_cbs_type_jpeg = {
.codec_id = AV_CODEC_ID_MJPEG,
.split_fragment = &cbs_jpeg_split_fragment,
.read_unit = &cbs_jpeg_read_unit,
.write_unit = &cbs_jpeg_write_unit,
.assemble_fragment = &cbs_jpeg_assemble_fragment,
};

View File

@ -1,189 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
static int FUNC(frame_header)(CodedBitstreamContext *ctx, RWContext *rw,
JPEGRawFrameHeader *current) {
int err, i;
HEADER("Frame Header");
u(16, Lf, 8, 8 + 3 * JPEG_MAX_COMPONENTS);
u(8, P, 2, 16);
u(16, Y, 0, JPEG_MAX_HEIGHT);
u(16, X, 1, JPEG_MAX_WIDTH);
u(8, Nf, 1, JPEG_MAX_COMPONENTS);
for(i = 0; i < current->Nf; i++) {
us(8, C[i], i, 0, JPEG_MAX_COMPONENTS);
us(4, H[i], i, 1, 4);
us(4, V[i], i, 1, 4);
us(8, Tq[i], i, 0, 3);
}
return 0;
}
static int FUNC(quantisation_table)(CodedBitstreamContext *ctx, RWContext *rw,
JPEGRawQuantisationTable *current) {
int err, i;
u(4, Pq, 0, 1);
u(4, Tq, 0, 3);
if(current->Pq) {
for(i = 0; i < 64; i++)
us(16, Q[i], i, 1, 255);
}
else {
for(i = 0; i < 64; i++)
us(8, Q[i], i, 1, 255);
}
return 0;
}
static int FUNC(dqt)(CodedBitstreamContext *ctx, RWContext *rw,
JPEGRawQuantisationTableSpecification *current) {
int err, i, n;
HEADER("Quantisation Tables");
u(16, Lq, 2, 2 + 4 * 65);
n = current->Lq / 65;
for(i = 0; i < n; i++)
CHECK(FUNC(quantisation_table)(ctx, rw, &current->table[i]));
return 0;
}
static int FUNC(huffman_table)(CodedBitstreamContext *ctx, RWContext *rw,
JPEGRawHuffmanTable *current) {
int err, i, j, ij;
u(4, Tc, 0, 1);
u(4, Th, 0, 3);
for(i = 0; i < 16; i++)
us(8, L[i], i, 0, 224);
ij = 0;
for(i = 0; i < 16; i++) {
for(j = 0; j < current->L[i]; j++) {
if(ij >= 224)
return AVERROR_INVALIDDATA;
us(8, V[ij], ij, 0, 255);
++ij;
}
}
return 0;
}
static int FUNC(dht)(CodedBitstreamContext *ctx, RWContext *rw,
JPEGRawHuffmanTableSpecification *current) {
int err, i, j, n;
HEADER("Huffman Tables");
u(16, Lh, 2, 2 + 8 * (1 + 16 + 256));
n = 2;
for(i = 0; n < current->Lh; i++) {
if(i >= 8)
return AVERROR_INVALIDDATA;
CHECK(FUNC(huffman_table)(ctx, rw, &current->table[i]));
++n;
for(j = 0; j < 16; j++)
n += 1 + current->table[i].L[j];
}
return 0;
}
static int FUNC(scan_header)(CodedBitstreamContext *ctx, RWContext *rw,
JPEGRawScanHeader *current) {
int err, j;
HEADER("Scan");
u(16, Ls, 6, 6 + 2 * JPEG_MAX_COMPONENTS);
u(8, Ns, 1, 4);
for(j = 0; j < current->Ns; j++) {
us(8, Cs[j], j, 0, JPEG_MAX_COMPONENTS);
us(4, Td[j], j, 0, 3);
us(4, Ta[j], j, 0, 3);
}
u(8, Ss, 0, 63);
u(8, Se, 0, 63);
u(4, Ah, 0, 13);
u(4, Al, 0, 15);
return 0;
}
static int FUNC(application_data)(CodedBitstreamContext *ctx, RWContext *rw,
JPEGRawApplicationData *current) {
int err, i;
HEADER("Application Data");
u(16, Lp, 2, 65535);
if(current->Lp > 2) {
#ifdef READ
current->Ap_ref = av_buffer_alloc(current->Lp - 2);
if(!current->Ap_ref)
return AVERROR(ENOMEM);
current->Ap = current->Ap_ref->data;
#endif
for(i = 0; i < current->Lp - 2; i++)
us(8, Ap[i], i, 0, 255);
}
return 0;
}
static int FUNC(comment)(CodedBitstreamContext *ctx, RWContext *rw,
JPEGRawComment *current) {
int err, i;
HEADER("Comment");
u(16, Lc, 2, 65535);
if(current->Lc > 2) {
#ifdef READ
current->Cm_ref = av_buffer_alloc(current->Lc - 2);
if(!current->Cm_ref)
return AVERROR(ENOMEM);
current->Cm = current->Cm_ref->data;
#endif
for(i = 0; i < current->Lc - 2; i++)
us(8, Cm[i], i, 0, 255);
}
return 0;
}

View File

@ -1,469 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <libavutil/avassert.h>
#include "cbs/cbs.h"
#include "cbs/cbs_mpeg2.h"
#include "cbs_internal.h"
#define HEADER(name) \
do { \
ff_cbs_trace_header(ctx, name); \
} while(0)
#define CHECK(call) \
do { \
err = (call); \
if(err < 0) \
return err; \
} while(0)
#define FUNC_NAME(rw, codec, name) cbs_##codec##_##rw##_##name
#define FUNC_MPEG2(rw, name) FUNC_NAME(rw, mpeg2, name)
#define FUNC(name) FUNC_MPEG2(READWRITE, name)
#define SUBSCRIPTS(subs, ...) (subs > 0 ? ((int[subs + 1]) { subs, __VA_ARGS__ }) : NULL)
#define ui(width, name) \
xui(width, name, current->name, 0, MAX_UINT_BITS(width), 0, )
#define uir(width, name) \
xui(width, name, current->name, 1, MAX_UINT_BITS(width), 0, )
#define uis(width, name, subs, ...) \
xui(width, name, current->name, 0, MAX_UINT_BITS(width), subs, __VA_ARGS__)
#define uirs(width, name, subs, ...) \
xui(width, name, current->name, 1, MAX_UINT_BITS(width), subs, __VA_ARGS__)
#define xui(width, name, var, range_min, range_max, subs, ...) \
xuia(width, #name, var, range_min, range_max, subs, __VA_ARGS__)
#define sis(width, name, subs, ...) \
xsi(width, name, current->name, subs, __VA_ARGS__)
#define marker_bit() \
bit("marker_bit", 1)
#define bit(string, value) \
do { \
av_unused uint32_t bit = value; \
xuia(1, string, bit, value, value, 0, ); \
} while(0)
#define READ
#define READWRITE read
#define RWContext GetBitContext
#define xuia(width, string, var, range_min, range_max, subs, ...) \
do { \
uint32_t value; \
CHECK(ff_cbs_read_unsigned(ctx, rw, width, string, \
SUBSCRIPTS(subs, __VA_ARGS__), \
&value, range_min, range_max)); \
var = value; \
} while(0)
#define xsi(width, name, var, subs, ...) \
do { \
int32_t value; \
CHECK(ff_cbs_read_signed(ctx, rw, width, #name, \
SUBSCRIPTS(subs, __VA_ARGS__), &value, \
MIN_INT_BITS(width), \
MAX_INT_BITS(width))); \
var = value; \
} while(0)
#define nextbits(width, compare, var) \
(get_bits_left(rw) >= width && \
(var = show_bits(rw, width)) == (compare))
#define infer(name, value) \
do { \
current->name = value; \
} while(0)
#include "cbs_mpeg2_syntax_template.c"
#undef READ
#undef READWRITE
#undef RWContext
#undef xuia
#undef xsi
#undef nextbits
#undef infer
#define WRITE
#define READWRITE write
#define RWContext PutBitContext
#define xuia(width, string, var, range_min, range_max, subs, ...) \
do { \
CHECK(ff_cbs_write_unsigned(ctx, rw, width, string, \
SUBSCRIPTS(subs, __VA_ARGS__), \
var, range_min, range_max)); \
} while(0)
#define xsi(width, name, var, subs, ...) \
do { \
CHECK(ff_cbs_write_signed(ctx, rw, width, #name, \
SUBSCRIPTS(subs, __VA_ARGS__), var, \
MIN_INT_BITS(width), \
MAX_INT_BITS(width))); \
} while(0)
#define nextbits(width, compare, var) (var)
#define infer(name, value) \
do { \
if(current->name != (value)) { \
av_log(ctx->log_ctx, AV_LOG_WARNING, "Warning: " \
"%s does not match inferred value: " \
"%" PRId64 ", but should be %" PRId64 ".\n", \
#name, (int64_t)current->name, (int64_t)(value)); \
} \
} while(0)
#include "cbs_mpeg2_syntax_template.c"
#undef WRITE
#undef READWRITE
#undef RWContext
#undef xuia
#undef xsi
#undef nextbits
#undef infer
static const uint8_t *avpriv_find_start_code(const uint8_t *restrict p,
const uint8_t *end,
uint32_t *restrict state) {
int i;
av_assert0(p <= end);
if(p >= end)
return end;
for(i = 0; i < 3; i++) {
uint32_t tmp = *state << 8;
*state = tmp + *(p++);
if(tmp == 0x100 || p == end)
return p;
}
while(p < end) {
if(p[-1] > 1) p += 3;
else if(p[-2])
p += 2;
else if(p[-3] | (p[-1] - 1))
p++;
else {
p++;
break;
}
}
p = FFMIN(p, end) - 4;
*state = AV_RB32(p);
return p + 4;
}
static int cbs_mpeg2_split_fragment(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag,
int header) {
const uint8_t *start, *end;
CodedBitstreamUnitType unit_type;
uint32_t start_code = -1;
size_t unit_size;
int err, i, final = 0;
start = avpriv_find_start_code(frag->data, frag->data + frag->data_size,
&start_code);
if(start_code >> 8 != 0x000001) {
// No start code found.
return AVERROR_INVALIDDATA;
}
for(i = 0;; i++) {
unit_type = start_code & 0xff;
if(start == frag->data + frag->data_size) {
// The last four bytes form a start code which constitutes
// a unit of its own. In this situation avpriv_find_start_code
// won't modify start_code at all so modify start_code so that
// the next unit will be treated as the last unit.
start_code = 0;
}
end = avpriv_find_start_code(start--, frag->data + frag->data_size,
&start_code);
// start points to the byte containing the start_code_identifier
// (may be the last byte of fragment->data); end points to the byte
// following the byte containing the start code identifier (or to
// the end of fragment->data).
if(start_code >> 8 == 0x000001) {
// Unit runs from start to the beginning of the start code
// pointed to by end (including any padding zeroes).
unit_size = (end - 4) - start;
}
else {
// We didn't find a start code, so this is the final unit.
unit_size = end - start;
final = 1;
}
err = ff_cbs_insert_unit_data(frag, i, unit_type, (uint8_t *)start,
unit_size, frag->data_ref);
if(err < 0)
return err;
if(final)
break;
start = end;
}
return 0;
}
static int cbs_mpeg2_read_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit) {
GetBitContext gbc;
int err;
err = init_get_bits(&gbc, unit->data, 8 * unit->data_size);
if(err < 0)
return err;
err = ff_cbs_alloc_unit_content2(ctx, unit);
if(err < 0)
return err;
if(MPEG2_START_IS_SLICE(unit->type)) {
MPEG2RawSlice *slice = unit->content;
int pos, len;
err = cbs_mpeg2_read_slice_header(ctx, &gbc, &slice->header);
if(err < 0)
return err;
if(!get_bits_left(&gbc))
return AVERROR_INVALIDDATA;
pos = get_bits_count(&gbc);
len = unit->data_size;
slice->data_size = len - pos / 8;
slice->data_ref = av_buffer_ref(unit->data_ref);
if(!slice->data_ref)
return AVERROR(ENOMEM);
slice->data = unit->data + pos / 8;
slice->data_bit_start = pos % 8;
}
else {
switch(unit->type) {
#define START(start_code, type, read_func, free_func) \
case start_code: { \
type *header = unit->content; \
err = cbs_mpeg2_read_##read_func(ctx, &gbc, header); \
if(err < 0) \
return err; \
} break;
START(MPEG2_START_PICTURE, MPEG2RawPictureHeader,
picture_header, &cbs_mpeg2_free_picture_header);
START(MPEG2_START_USER_DATA, MPEG2RawUserData,
user_data, &cbs_mpeg2_free_user_data);
START(MPEG2_START_SEQUENCE_HEADER, MPEG2RawSequenceHeader,
sequence_header, NULL);
START(MPEG2_START_EXTENSION, MPEG2RawExtensionData,
extension_data, NULL);
START(MPEG2_START_GROUP, MPEG2RawGroupOfPicturesHeader,
group_of_pictures_header, NULL);
START(MPEG2_START_SEQUENCE_END, MPEG2RawSequenceEnd,
sequence_end, NULL);
#undef START
default:
return AVERROR(ENOSYS);
}
}
return 0;
}
static int cbs_mpeg2_write_header(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit,
PutBitContext *pbc) {
int err;
switch(unit->type) {
#define START(start_code, type, func) \
case start_code: \
err = cbs_mpeg2_write_##func(ctx, pbc, unit->content); \
break;
START(MPEG2_START_PICTURE, MPEG2RawPictureHeader, picture_header);
START(MPEG2_START_USER_DATA, MPEG2RawUserData, user_data);
START(MPEG2_START_SEQUENCE_HEADER, MPEG2RawSequenceHeader, sequence_header);
START(MPEG2_START_EXTENSION, MPEG2RawExtensionData, extension_data);
START(MPEG2_START_GROUP, MPEG2RawGroupOfPicturesHeader,
group_of_pictures_header);
START(MPEG2_START_SEQUENCE_END, MPEG2RawSequenceEnd, sequence_end);
#undef START
default:
av_log(ctx->log_ctx, AV_LOG_ERROR, "Write unimplemented for start "
"code %02" PRIx32 ".\n",
unit->type);
return AVERROR_PATCHWELCOME;
}
return err;
}
static int cbs_mpeg2_write_slice(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit,
PutBitContext *pbc) {
MPEG2RawSlice *slice = unit->content;
int err;
err = cbs_mpeg2_write_slice_header(ctx, pbc, &slice->header);
if(err < 0)
return err;
if(slice->data) {
size_t rest = slice->data_size - (slice->data_bit_start + 7) / 8;
uint8_t *pos = slice->data + slice->data_bit_start / 8;
av_assert0(slice->data_bit_start >= 0 &&
slice->data_size > slice->data_bit_start / 8);
if(slice->data_size * 8 + 8 > put_bits_left(pbc))
return AVERROR(ENOSPC);
// First copy the remaining bits of the first byte
if(slice->data_bit_start % 8)
put_bits(pbc, 8 - slice->data_bit_start % 8,
*pos++ & MAX_UINT_BITS(8 - slice->data_bit_start % 8));
if(put_bits_count(pbc) % 8 == 0) {
// If the writer is aligned at this point,
// memcpy can be used to improve performance.
// This is the normal case.
flush_put_bits(pbc);
memcpy(put_bits_ptr(pbc), pos, rest);
skip_put_bytes(pbc, rest);
}
else {
// If not, we have to copy manually:
for(; rest > 3; rest -= 4, pos += 4)
put_bits32(pbc, AV_RB32(pos));
for(; rest; rest--, pos++)
put_bits(pbc, 8, *pos);
// Align with zeros
put_bits(pbc, 8 - put_bits_count(pbc) % 8, 0);
}
}
return 0;
}
static int cbs_mpeg2_write_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit,
PutBitContext *pbc) {
if(MPEG2_START_IS_SLICE(unit->type))
return cbs_mpeg2_write_slice(ctx, unit, pbc);
else
return cbs_mpeg2_write_header(ctx, unit, pbc);
}
static int cbs_mpeg2_assemble_fragment(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag) {
uint8_t *data;
size_t size, dp;
int i;
size = 0;
for(i = 0; i < frag->nb_units; i++)
size += 3 + frag->units[i].data_size;
frag->data_ref = av_buffer_alloc(size + AV_INPUT_BUFFER_PADDING_SIZE);
if(!frag->data_ref)
return AVERROR(ENOMEM);
data = frag->data_ref->data;
dp = 0;
for(i = 0; i < frag->nb_units; i++) {
CodedBitstreamUnit *unit = &frag->units[i];
data[dp++] = 0;
data[dp++] = 0;
data[dp++] = 1;
memcpy(data + dp, unit->data, unit->data_size);
dp += unit->data_size;
}
av_assert0(dp == size);
memset(data + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
frag->data = data;
frag->data_size = size;
return 0;
}
static const CodedBitstreamUnitTypeDescriptor cbs_mpeg2_unit_types[] = {
CBS_UNIT_TYPE_INTERNAL_REF(MPEG2_START_PICTURE, MPEG2RawPictureHeader,
extra_information_picture.extra_information),
{
.nb_unit_types = CBS_UNIT_TYPE_RANGE,
.unit_type_range_start = 0x01,
.unit_type_range_end = 0xaf,
.content_type = CBS_CONTENT_TYPE_INTERNAL_REFS,
.content_size = sizeof(MPEG2RawSlice),
.nb_ref_offsets = 2,
.ref_offsets = { offsetof(MPEG2RawSlice, header.extra_information_slice.extra_information),
offsetof(MPEG2RawSlice, data) },
},
CBS_UNIT_TYPE_INTERNAL_REF(MPEG2_START_USER_DATA, MPEG2RawUserData,
user_data),
CBS_UNIT_TYPE_POD(MPEG2_START_SEQUENCE_HEADER, MPEG2RawSequenceHeader),
CBS_UNIT_TYPE_POD(MPEG2_START_EXTENSION, MPEG2RawExtensionData),
CBS_UNIT_TYPE_POD(MPEG2_START_SEQUENCE_END, MPEG2RawSequenceEnd),
CBS_UNIT_TYPE_POD(MPEG2_START_GROUP, MPEG2RawGroupOfPicturesHeader),
CBS_UNIT_TYPE_END_OF_LIST
};
const CodedBitstreamType ff_cbs_type_mpeg2 = {
.codec_id = AV_CODEC_ID_MPEG2VIDEO,
.priv_data_size = sizeof(CodedBitstreamMPEG2Context),
.unit_types = cbs_mpeg2_unit_types,
.split_fragment = &cbs_mpeg2_split_fragment,
.read_unit = &cbs_mpeg2_read_unit,
.write_unit = &cbs_mpeg2_write_unit,
.assemble_fragment = &cbs_mpeg2_assemble_fragment,
};

View File

@ -1,413 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
static int FUNC(sequence_header)(CodedBitstreamContext *ctx, RWContext *rw,
MPEG2RawSequenceHeader *current) {
CodedBitstreamMPEG2Context *mpeg2 = ctx->priv_data;
int err, i;
HEADER("Sequence Header");
ui(8, sequence_header_code);
uir(12, horizontal_size_value);
uir(12, vertical_size_value);
mpeg2->horizontal_size = current->horizontal_size_value;
mpeg2->vertical_size = current->vertical_size_value;
uir(4, aspect_ratio_information);
uir(4, frame_rate_code);
ui(18, bit_rate_value);
marker_bit();
ui(10, vbv_buffer_size_value);
ui(1, constrained_parameters_flag);
ui(1, load_intra_quantiser_matrix);
if(current->load_intra_quantiser_matrix) {
for(i = 0; i < 64; i++)
uirs(8, intra_quantiser_matrix[i], 1, i);
}
ui(1, load_non_intra_quantiser_matrix);
if(current->load_non_intra_quantiser_matrix) {
for(i = 0; i < 64; i++)
uirs(8, non_intra_quantiser_matrix[i], 1, i);
}
return 0;
}
static int FUNC(user_data)(CodedBitstreamContext *ctx, RWContext *rw,
MPEG2RawUserData *current) {
size_t k;
int err;
HEADER("User Data");
ui(8, user_data_start_code);
#ifdef READ
k = get_bits_left(rw);
av_assert0(k % 8 == 0);
current->user_data_length = k /= 8;
if(k > 0) {
current->user_data_ref = av_buffer_allocz(k + AV_INPUT_BUFFER_PADDING_SIZE);
if(!current->user_data_ref)
return AVERROR(ENOMEM);
current->user_data = current->user_data_ref->data;
}
#endif
for(k = 0; k < current->user_data_length; k++)
uis(8, user_data[k], 1, k);
return 0;
}
static int FUNC(sequence_extension)(CodedBitstreamContext *ctx, RWContext *rw,
MPEG2RawSequenceExtension *current) {
CodedBitstreamMPEG2Context *mpeg2 = ctx->priv_data;
int err;
HEADER("Sequence Extension");
ui(8, profile_and_level_indication);
ui(1, progressive_sequence);
ui(2, chroma_format);
ui(2, horizontal_size_extension);
ui(2, vertical_size_extension);
mpeg2->horizontal_size = (mpeg2->horizontal_size & 0xfff) |
current->horizontal_size_extension << 12;
mpeg2->vertical_size = (mpeg2->vertical_size & 0xfff) |
current->vertical_size_extension << 12;
mpeg2->progressive_sequence = current->progressive_sequence;
ui(12, bit_rate_extension);
marker_bit();
ui(8, vbv_buffer_size_extension);
ui(1, low_delay);
ui(2, frame_rate_extension_n);
ui(5, frame_rate_extension_d);
return 0;
}
static int FUNC(sequence_display_extension)(CodedBitstreamContext *ctx, RWContext *rw,
MPEG2RawSequenceDisplayExtension *current) {
int err;
HEADER("Sequence Display Extension");
ui(3, video_format);
ui(1, colour_description);
if(current->colour_description) {
#ifdef READ
#define READ_AND_PATCH(name) \
do { \
ui(8, name); \
if(current->name == 0) { \
current->name = 2; \
av_log(ctx->log_ctx, AV_LOG_WARNING, "%s in a sequence display " \
"extension had the invalid value 0. Setting it to 2 " \
"(meaning unknown) instead.\n", \
#name); \
} \
} while(0)
READ_AND_PATCH(colour_primaries);
READ_AND_PATCH(transfer_characteristics);
READ_AND_PATCH(matrix_coefficients);
#undef READ_AND_PATCH
#else
uir(8, colour_primaries);
uir(8, transfer_characteristics);
uir(8, matrix_coefficients);
#endif
}
else {
infer(colour_primaries, 2);
infer(transfer_characteristics, 2);
infer(matrix_coefficients, 2);
}
ui(14, display_horizontal_size);
marker_bit();
ui(14, display_vertical_size);
return 0;
}
static int FUNC(group_of_pictures_header)(CodedBitstreamContext *ctx, RWContext *rw,
MPEG2RawGroupOfPicturesHeader *current) {
int err;
HEADER("Group of Pictures Header");
ui(8, group_start_code);
ui(25, time_code);
ui(1, closed_gop);
ui(1, broken_link);
return 0;
}
static int FUNC(extra_information)(CodedBitstreamContext *ctx, RWContext *rw,
MPEG2RawExtraInformation *current,
const char *element_name, const char *marker_name) {
int err;
size_t k;
#ifdef READ
GetBitContext start = *rw;
uint8_t bit;
for(k = 0; nextbits(1, 1, bit); k++)
skip_bits(rw, 1 + 8);
current->extra_information_length = k;
if(k > 0) {
*rw = start;
current->extra_information_ref =
av_buffer_allocz(k + AV_INPUT_BUFFER_PADDING_SIZE);
if(!current->extra_information_ref)
return AVERROR(ENOMEM);
current->extra_information = current->extra_information_ref->data;
}
#endif
for(k = 0; k < current->extra_information_length; k++) {
bit(marker_name, 1);
xuia(8, element_name,
current->extra_information[k], 0, 255, 1, k);
}
bit(marker_name, 0);
return 0;
}
static int FUNC(picture_header)(CodedBitstreamContext *ctx, RWContext *rw,
MPEG2RawPictureHeader *current) {
int err;
HEADER("Picture Header");
ui(8, picture_start_code);
ui(10, temporal_reference);
uir(3, picture_coding_type);
ui(16, vbv_delay);
if(current->picture_coding_type == 2 ||
current->picture_coding_type == 3) {
ui(1, full_pel_forward_vector);
ui(3, forward_f_code);
}
if(current->picture_coding_type == 3) {
ui(1, full_pel_backward_vector);
ui(3, backward_f_code);
}
CHECK(FUNC(extra_information)(ctx, rw, &current->extra_information_picture,
"extra_information_picture[k]", "extra_bit_picture"));
return 0;
}
static int FUNC(picture_coding_extension)(CodedBitstreamContext *ctx, RWContext *rw,
MPEG2RawPictureCodingExtension *current) {
CodedBitstreamMPEG2Context *mpeg2 = ctx->priv_data;
int err;
HEADER("Picture Coding Extension");
uir(4, f_code[0][0]);
uir(4, f_code[0][1]);
uir(4, f_code[1][0]);
uir(4, f_code[1][1]);
ui(2, intra_dc_precision);
ui(2, picture_structure);
ui(1, top_field_first);
ui(1, frame_pred_frame_dct);
ui(1, concealment_motion_vectors);
ui(1, q_scale_type);
ui(1, intra_vlc_format);
ui(1, alternate_scan);
ui(1, repeat_first_field);
ui(1, chroma_420_type);
ui(1, progressive_frame);
if(mpeg2->progressive_sequence) {
if(current->repeat_first_field) {
if(current->top_field_first)
mpeg2->number_of_frame_centre_offsets = 3;
else
mpeg2->number_of_frame_centre_offsets = 2;
}
else {
mpeg2->number_of_frame_centre_offsets = 1;
}
}
else {
if(current->picture_structure == 1 || // Top field.
current->picture_structure == 2) { // Bottom field.
mpeg2->number_of_frame_centre_offsets = 1;
}
else {
if(current->repeat_first_field)
mpeg2->number_of_frame_centre_offsets = 3;
else
mpeg2->number_of_frame_centre_offsets = 2;
}
}
ui(1, composite_display_flag);
if(current->composite_display_flag) {
ui(1, v_axis);
ui(3, field_sequence);
ui(1, sub_carrier);
ui(7, burst_amplitude);
ui(8, sub_carrier_phase);
}
return 0;
}
static int FUNC(quant_matrix_extension)(CodedBitstreamContext *ctx, RWContext *rw,
MPEG2RawQuantMatrixExtension *current) {
int err, i;
HEADER("Quant Matrix Extension");
ui(1, load_intra_quantiser_matrix);
if(current->load_intra_quantiser_matrix) {
for(i = 0; i < 64; i++)
uirs(8, intra_quantiser_matrix[i], 1, i);
}
ui(1, load_non_intra_quantiser_matrix);
if(current->load_non_intra_quantiser_matrix) {
for(i = 0; i < 64; i++)
uirs(8, non_intra_quantiser_matrix[i], 1, i);
}
ui(1, load_chroma_intra_quantiser_matrix);
if(current->load_chroma_intra_quantiser_matrix) {
for(i = 0; i < 64; i++)
uirs(8, intra_quantiser_matrix[i], 1, i);
}
ui(1, load_chroma_non_intra_quantiser_matrix);
if(current->load_chroma_non_intra_quantiser_matrix) {
for(i = 0; i < 64; i++)
uirs(8, chroma_non_intra_quantiser_matrix[i], 1, i);
}
return 0;
}
static int FUNC(picture_display_extension)(CodedBitstreamContext *ctx, RWContext *rw,
MPEG2RawPictureDisplayExtension *current) {
CodedBitstreamMPEG2Context *mpeg2 = ctx->priv_data;
int err, i;
HEADER("Picture Display Extension");
for(i = 0; i < mpeg2->number_of_frame_centre_offsets; i++) {
sis(16, frame_centre_horizontal_offset[i], 1, i);
marker_bit();
sis(16, frame_centre_vertical_offset[i], 1, i);
marker_bit();
}
return 0;
}
static int FUNC(extension_data)(CodedBitstreamContext *ctx, RWContext *rw,
MPEG2RawExtensionData *current) {
int err;
HEADER("Extension Data");
ui(8, extension_start_code);
ui(4, extension_start_code_identifier);
switch(current->extension_start_code_identifier) {
case MPEG2_EXTENSION_SEQUENCE:
return FUNC(sequence_extension)(ctx, rw, &current->data.sequence);
case MPEG2_EXTENSION_SEQUENCE_DISPLAY:
return FUNC(sequence_display_extension)(ctx, rw, &current->data.sequence_display);
case MPEG2_EXTENSION_QUANT_MATRIX:
return FUNC(quant_matrix_extension)(ctx, rw, &current->data.quant_matrix);
case MPEG2_EXTENSION_PICTURE_DISPLAY:
return FUNC(picture_display_extension)(ctx, rw, &current->data.picture_display);
case MPEG2_EXTENSION_PICTURE_CODING:
return FUNC(picture_coding_extension)(ctx, rw, &current->data.picture_coding);
default:
av_log(ctx->log_ctx, AV_LOG_ERROR, "Extension ID %d not supported.\n",
current->extension_start_code_identifier);
return AVERROR_PATCHWELCOME;
}
}
static int FUNC(slice_header)(CodedBitstreamContext *ctx, RWContext *rw,
MPEG2RawSliceHeader *current) {
CodedBitstreamMPEG2Context *mpeg2 = ctx->priv_data;
int err;
HEADER("Slice Header");
ui(8, slice_vertical_position);
if(mpeg2->vertical_size > 2800)
ui(3, slice_vertical_position_extension);
if(mpeg2->scalable) {
if(mpeg2->scalable_mode == 0)
ui(7, priority_breakpoint);
}
uir(5, quantiser_scale_code);
if(nextbits(1, 1, current->slice_extension_flag)) {
ui(1, slice_extension_flag);
ui(1, intra_slice);
ui(1, slice_picture_id_enable);
ui(6, slice_picture_id);
}
CHECK(FUNC(extra_information)(ctx, rw, &current->extra_information_slice,
"extra_information_slice[k]", "extra_bit_slice"));
return 0;
}
static int FUNC(sequence_end)(CodedBitstreamContext *ctx, RWContext *rw,
MPEG2RawSequenceEnd *current) {
int err;
HEADER("Sequence End");
ui(8, sequence_end_code);
return 0;
}

View File

@ -1,355 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "cbs/cbs_sei.h"
#include "cbs/cbs.h"
#include "cbs/cbs_h264.h"
#include "cbs/cbs_h265.h"
#include "cbs_internal.h"
static void cbs_free_user_data_registered(void *opaque, uint8_t *data) {
SEIRawUserDataRegistered *udr = (SEIRawUserDataRegistered *)data;
av_buffer_unref(&udr->data_ref);
av_free(udr);
}
static void cbs_free_user_data_unregistered(void *opaque, uint8_t *data) {
SEIRawUserDataUnregistered *udu = (SEIRawUserDataUnregistered *)data;
av_buffer_unref(&udu->data_ref);
av_free(udu);
}
int ff_cbs_sei_alloc_message_payload(SEIRawMessage *message,
const SEIMessageTypeDescriptor *desc) {
void (*free_func)(void *, uint8_t *);
av_assert0(message->payload == NULL &&
message->payload_ref == NULL);
message->payload_type = desc->type;
if(desc->type == SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35)
free_func = &cbs_free_user_data_registered;
else if(desc->type == SEI_TYPE_USER_DATA_UNREGISTERED)
free_func = &cbs_free_user_data_unregistered;
else
free_func = NULL;
if(free_func) {
message->payload = av_mallocz(desc->size);
if(!message->payload)
return AVERROR(ENOMEM);
message->payload_ref =
av_buffer_create(message->payload, desc->size,
free_func, NULL, 0);
}
else {
message->payload_ref = av_buffer_alloc(desc->size);
}
if(!message->payload_ref) {
av_freep(&message->payload);
return AVERROR(ENOMEM);
}
message->payload = message->payload_ref->data;
return 0;
}
int ff_cbs_sei_list_add(SEIRawMessageList *list) {
void *ptr;
int old_count = list->nb_messages_allocated;
av_assert0(list->nb_messages <= old_count);
if(list->nb_messages + 1 > old_count) {
int new_count = 2 * old_count + 1;
ptr = av_realloc_array(list->messages,
new_count, sizeof(*list->messages));
if(!ptr)
return AVERROR(ENOMEM);
list->messages = ptr;
list->nb_messages_allocated = new_count;
// Zero the newly-added entries.
memset(list->messages + old_count, 0,
(new_count - old_count) * sizeof(*list->messages));
}
++list->nb_messages;
return 0;
}
void ff_cbs_sei_free_message_list(SEIRawMessageList *list) {
for(int i = 0; i < list->nb_messages; i++) {
SEIRawMessage *message = &list->messages[i];
av_buffer_unref(&message->payload_ref);
av_buffer_unref(&message->extension_data_ref);
}
av_free(list->messages);
}
static int cbs_sei_get_unit(CodedBitstreamContext *ctx,
CodedBitstreamFragment *au,
int prefix,
CodedBitstreamUnit **sei_unit) {
CodedBitstreamUnit *unit;
int sei_type, highest_vcl_type, err, i, position;
switch(ctx->codec->codec_id) {
case AV_CODEC_ID_H264:
// (We can ignore auxiliary slices because we only have prefix
// SEI in H.264 and an auxiliary picture must always follow a
// primary picture.)
highest_vcl_type = H264_NAL_IDR_SLICE;
if(prefix)
sei_type = H264_NAL_SEI;
else
return AVERROR(EINVAL);
break;
case AV_CODEC_ID_H265:
highest_vcl_type = HEVC_NAL_RSV_VCL31;
if(prefix)
sei_type = HEVC_NAL_SEI_PREFIX;
else
sei_type = HEVC_NAL_SEI_SUFFIX;
break;
default:
return AVERROR(EINVAL);
}
// Find an existing SEI NAL unit of the right type.
unit = NULL;
for(i = 0; i < au->nb_units; i++) {
if(au->units[i].type == sei_type) {
unit = &au->units[i];
break;
}
}
if(unit) {
*sei_unit = unit;
return 0;
}
// Need to add a new SEI NAL unit ...
if(prefix) {
// ... before the first VCL NAL unit.
for(i = 0; i < au->nb_units; i++) {
if(au->units[i].type < highest_vcl_type)
break;
}
position = i;
}
else {
// ... after the last VCL NAL unit.
for(i = au->nb_units - 1; i >= 0; i--) {
if(au->units[i].type < highest_vcl_type)
break;
}
if(i < 0) {
// No VCL units; just put it at the end.
position = au->nb_units;
}
else {
position = i + 1;
}
}
err = ff_cbs_insert_unit_content(au, position, sei_type,
NULL, NULL);
if(err < 0)
return err;
unit = &au->units[position];
unit->type = sei_type;
err = ff_cbs_alloc_unit_content2(ctx, unit);
if(err < 0)
return err;
switch(ctx->codec->codec_id) {
case AV_CODEC_ID_H264: {
H264RawSEI sei = {
.nal_unit_header = {
.nal_ref_idc = 0,
.nal_unit_type = sei_type,
},
};
memcpy(unit->content, &sei, sizeof(sei));
} break;
case AV_CODEC_ID_H265: {
H265RawSEI sei = {
.nal_unit_header = {
.nal_unit_type = sei_type,
.nuh_layer_id = 0,
.nuh_temporal_id_plus1 = 1,
},
};
memcpy(unit->content, &sei, sizeof(sei));
} break;
default:
av_assert0(0);
}
*sei_unit = unit;
return 0;
}
static int cbs_sei_get_message_list(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit,
SEIRawMessageList **list) {
switch(ctx->codec->codec_id) {
case AV_CODEC_ID_H264: {
H264RawSEI *sei = unit->content;
if(unit->type != H264_NAL_SEI)
return AVERROR(EINVAL);
*list = &sei->message_list;
} break;
case AV_CODEC_ID_H265: {
H265RawSEI *sei = unit->content;
if(unit->type != HEVC_NAL_SEI_PREFIX &&
unit->type != HEVC_NAL_SEI_SUFFIX)
return AVERROR(EINVAL);
*list = &sei->message_list;
} break;
default:
return AVERROR(EINVAL);
}
return 0;
}
int ff_cbs_sei_add_message(CodedBitstreamContext *ctx,
CodedBitstreamFragment *au,
int prefix,
uint32_t payload_type,
void *payload_data,
AVBufferRef *payload_buf) {
const SEIMessageTypeDescriptor *desc;
CodedBitstreamUnit *unit;
SEIRawMessageList *list;
SEIRawMessage *message;
AVBufferRef *payload_ref;
int err;
desc = ff_cbs_sei_find_type(ctx, payload_type);
if(!desc)
return AVERROR(EINVAL);
// Find an existing SEI unit or make a new one to add to.
err = cbs_sei_get_unit(ctx, au, prefix, &unit);
if(err < 0)
return err;
// Find the message list inside the codec-dependent unit.
err = cbs_sei_get_message_list(ctx, unit, &list);
if(err < 0)
return err;
// Add a new message to the message list.
err = ff_cbs_sei_list_add(list);
if(err < 0)
return err;
if(payload_buf) {
payload_ref = av_buffer_ref(payload_buf);
if(!payload_ref)
return AVERROR(ENOMEM);
}
else {
payload_ref = NULL;
}
message = &list->messages[list->nb_messages - 1];
message->payload_type = payload_type;
message->payload = payload_data;
message->payload_ref = payload_ref;
return 0;
}
int ff_cbs_sei_find_message(CodedBitstreamContext *ctx,
CodedBitstreamFragment *au,
uint32_t payload_type,
SEIRawMessage **iter) {
int err, i, j, found;
found = 0;
for(i = 0; i < au->nb_units; i++) {
CodedBitstreamUnit *unit = &au->units[i];
SEIRawMessageList *list;
err = cbs_sei_get_message_list(ctx, unit, &list);
if(err < 0)
continue;
for(j = 0; j < list->nb_messages; j++) {
SEIRawMessage *message = &list->messages[j];
if(message->payload_type == payload_type) {
if(!*iter || found) {
*iter = message;
return 0;
}
if(message == *iter)
found = 1;
}
}
}
return AVERROR(ENOENT);
}
static void cbs_sei_delete_message(SEIRawMessageList *list,
int position) {
SEIRawMessage *message;
av_assert0(0 <= position && position < list->nb_messages);
message = &list->messages[position];
av_buffer_unref(&message->payload_ref);
av_buffer_unref(&message->extension_data_ref);
--list->nb_messages;
if(list->nb_messages > 0) {
memmove(list->messages + position,
list->messages + position + 1,
(list->nb_messages - position) * sizeof(*list->messages));
}
}
void ff_cbs_sei_delete_message_type(CodedBitstreamContext *ctx,
CodedBitstreamFragment *au,
uint32_t payload_type) {
int err, i, j;
for(i = 0; i < au->nb_units; i++) {
CodedBitstreamUnit *unit = &au->units[i];
SEIRawMessageList *list;
err = cbs_sei_get_message_list(ctx, unit, &list);
if(err < 0)
continue;
for(j = list->nb_messages - 1; j >= 0; j--) {
if(list->messages[j].payload_type == payload_type)
cbs_sei_delete_message(list, j);
}
}
}

View File

@ -1,310 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
static int FUNC(filler_payload)(CodedBitstreamContext *ctx, RWContext *rw,
SEIRawFillerPayload *current, SEIMessageState *state) {
int err, i;
HEADER("Filler Payload");
#ifdef READ
current->payload_size = state->payload_size;
#endif
for(i = 0; i < current->payload_size; i++)
fixed(8, ff_byte, 0xff);
return 0;
}
static int FUNC(user_data_registered)(CodedBitstreamContext *ctx, RWContext *rw,
SEIRawUserDataRegistered *current, SEIMessageState *state) {
int err, i, j;
HEADER("User Data Registered ITU-T T.35");
u(8, itu_t_t35_country_code, 0x00, 0xff);
if(current->itu_t_t35_country_code != 0xff)
i = 1;
else {
u(8, itu_t_t35_country_code_extension_byte, 0x00, 0xff);
i = 2;
}
#ifdef READ
if(state->payload_size < i) {
av_log(ctx->log_ctx, AV_LOG_ERROR,
"Invalid SEI user data registered payload.\n");
return AVERROR_INVALIDDATA;
}
current->data_length = state->payload_size - i;
#endif
allocate(current->data, current->data_length);
for(j = 0; j < current->data_length; j++)
xu(8, itu_t_t35_payload_byte[], current->data[j], 0x00, 0xff, 1, i + j);
return 0;
}
static int FUNC(user_data_unregistered)(CodedBitstreamContext *ctx, RWContext *rw,
SEIRawUserDataUnregistered *current, SEIMessageState *state) {
int err, i;
HEADER("User Data Unregistered");
#ifdef READ
if(state->payload_size < 16) {
av_log(ctx->log_ctx, AV_LOG_ERROR,
"Invalid SEI user data unregistered payload.\n");
return AVERROR_INVALIDDATA;
}
current->data_length = state->payload_size - 16;
#endif
for(i = 0; i < 16; i++)
us(8, uuid_iso_iec_11578[i], 0x00, 0xff, 1, i);
allocate(current->data, current->data_length);
for(i = 0; i < current->data_length; i++)
xu(8, user_data_payload_byte[i], current->data[i], 0x00, 0xff, 1, i);
return 0;
}
static int FUNC(mastering_display_colour_volume)(CodedBitstreamContext *ctx, RWContext *rw,
SEIRawMasteringDisplayColourVolume *current, SEIMessageState *state) {
int err, c;
HEADER("Mastering Display Colour Volume");
for(c = 0; c < 3; c++) {
ubs(16, display_primaries_x[c], 1, c);
ubs(16, display_primaries_y[c], 1, c);
}
ub(16, white_point_x);
ub(16, white_point_y);
ub(32, max_display_mastering_luminance);
ub(32, min_display_mastering_luminance);
return 0;
}
static int FUNC(content_light_level_info)(CodedBitstreamContext *ctx, RWContext *rw,
SEIRawContentLightLevelInfo *current, SEIMessageState *state) {
int err;
HEADER("Content Light Level Information");
ub(16, max_content_light_level);
ub(16, max_pic_average_light_level);
return 0;
}
static int FUNC(alternative_transfer_characteristics)(CodedBitstreamContext *ctx, RWContext *rw,
SEIRawAlternativeTransferCharacteristics *current,
SEIMessageState *state) {
int err;
HEADER("Alternative Transfer Characteristics");
ub(8, preferred_transfer_characteristics);
return 0;
}
static int FUNC(message)(CodedBitstreamContext *ctx, RWContext *rw,
SEIRawMessage *current) {
const SEIMessageTypeDescriptor *desc;
int err, i;
desc = ff_cbs_sei_find_type(ctx, current->payload_type);
if(desc) {
SEIMessageState state = {
.payload_type = current->payload_type,
.payload_size = current->payload_size,
.extension_present = current->extension_bit_length > 0,
};
int start_position, current_position, bits_written;
#ifdef READ
CHECK(ff_cbs_sei_alloc_message_payload(current, desc));
#endif
start_position = bit_position(rw);
CHECK(desc->READWRITE(ctx, rw, current->payload, &state));
current_position = bit_position(rw);
bits_written = current_position - start_position;
if(byte_alignment(rw) || state.extension_present ||
bits_written < 8 * current->payload_size) {
size_t bits_left;
#ifdef READ
GetBitContext tmp = *rw;
int trailing_bits, trailing_zero_bits;
bits_left = 8 * current->payload_size - bits_written;
if(bits_left > 8)
skip_bits_long(&tmp, bits_left - 8);
trailing_bits = get_bits(&tmp, FFMIN(bits_left, 8));
if(trailing_bits == 0) {
// The trailing bits must contain a bit_equal_to_one, so
// they can't all be zero.
return AVERROR_INVALIDDATA;
}
trailing_zero_bits = ff_ctz(trailing_bits);
current->extension_bit_length =
bits_left - 1 - trailing_zero_bits;
#endif
if(current->extension_bit_length > 0) {
allocate(current->extension_data,
(current->extension_bit_length + 7) / 8);
bits_left = current->extension_bit_length;
for(i = 0; bits_left > 0; i++) {
int length = FFMIN(bits_left, 8);
xu(length, reserved_payload_extension_data,
current->extension_data[i],
0, MAX_UINT_BITS(length), 0);
bits_left -= length;
}
}
fixed(1, bit_equal_to_one, 1);
while(byte_alignment(rw))
fixed(1, bit_equal_to_zero, 0);
}
#ifdef WRITE
current->payload_size = (put_bits_count(rw) - start_position) / 8;
#endif
}
else {
uint8_t *data;
allocate(current->payload, current->payload_size);
data = current->payload;
for(i = 0; i < current->payload_size; i++)
xu(8, payload_byte[i], data[i], 0, 255, 1, i);
}
return 0;
}
static int FUNC(message_list)(CodedBitstreamContext *ctx, RWContext *rw,
SEIRawMessageList *current, int prefix) {
SEIRawMessage *message;
int err, k;
#ifdef READ
for(k = 0;; k++) {
uint32_t payload_type = 0;
uint32_t payload_size = 0;
uint32_t tmp;
GetBitContext payload_gbc;
while(show_bits(rw, 8) == 0xff) {
fixed(8, ff_byte, 0xff);
payload_type += 255;
}
xu(8, last_payload_type_byte, tmp, 0, 254, 0);
payload_type += tmp;
while(show_bits(rw, 8) == 0xff) {
fixed(8, ff_byte, 0xff);
payload_size += 255;
}
xu(8, last_payload_size_byte, tmp, 0, 254, 0);
payload_size += tmp;
// There must be space remaining for both the payload and
// the trailing bits on the SEI NAL unit.
if(payload_size + 1 > get_bits_left(rw) / 8) {
av_log(ctx->log_ctx, AV_LOG_ERROR,
"Invalid SEI message: payload_size too large "
"(%" PRIu32 " bytes).\n",
payload_size);
return AVERROR_INVALIDDATA;
}
CHECK(init_get_bits(&payload_gbc, rw->buffer,
get_bits_count(rw) + 8 * payload_size));
skip_bits_long(&payload_gbc, get_bits_count(rw));
CHECK(ff_cbs_sei_list_add(current));
message = &current->messages[k];
message->payload_type = payload_type;
message->payload_size = payload_size;
CHECK(FUNC(message)(ctx, &payload_gbc, message));
skip_bits_long(rw, 8 * payload_size);
if(!cbs_h2645_read_more_rbsp_data(rw))
break;
}
#else
for(k = 0; k < current->nb_messages; k++) {
PutBitContext start_state;
uint32_t tmp;
int trace, i;
message = &current->messages[k];
// We write the payload twice in order to find the size. Trace
// output is switched off for the first write.
trace = ctx->trace_enable;
ctx->trace_enable = 0;
start_state = *rw;
for(i = 0; i < 2; i++) {
*rw = start_state;
tmp = message->payload_type;
while(tmp >= 255) {
fixed(8, ff_byte, 0xff);
tmp -= 255;
}
xu(8, last_payload_type_byte, tmp, 0, 254, 0);
tmp = message->payload_size;
while(tmp >= 255) {
fixed(8, ff_byte, 0xff);
tmp -= 255;
}
xu(8, last_payload_size_byte, tmp, 0, 254, 0);
err = FUNC(message)(ctx, rw, message);
ctx->trace_enable = trace;
if(err < 0)
return err;
}
}
#endif
return 0;
}

View File

@ -1,675 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <libavutil/avassert.h>
#include "cbs/cbs.h"
#include "cbs/cbs_vp9.h"
#include "cbs_internal.h"
static int cbs_vp9_read_s(CodedBitstreamContext *ctx, GetBitContext *gbc,
int width, const char *name,
const int *subscripts, int32_t *write_to) {
uint32_t magnitude;
int position, sign;
int32_t value;
if(ctx->trace_enable)
position = get_bits_count(gbc);
if(get_bits_left(gbc) < width + 1) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid signed value at "
"%s: bitstream ended.\n",
name);
return AVERROR_INVALIDDATA;
}
magnitude = get_bits(gbc, width);
sign = get_bits1(gbc);
value = sign ? -(int32_t)magnitude : magnitude;
if(ctx->trace_enable) {
char bits[33];
int i;
for(i = 0; i < width; i++)
bits[i] = magnitude >> (width - i - 1) & 1 ? '1' : '0';
bits[i] = sign ? '1' : '0';
bits[i + 1] = 0;
ff_cbs_trace_syntax_element(ctx, position, name, subscripts,
bits, value);
}
*write_to = value;
return 0;
}
static int cbs_vp9_write_s(CodedBitstreamContext *ctx, PutBitContext *pbc,
int width, const char *name,
const int *subscripts, int32_t value) {
uint32_t magnitude;
int sign;
if(put_bits_left(pbc) < width + 1)
return AVERROR(ENOSPC);
sign = value < 0;
magnitude = sign ? -value : value;
if(ctx->trace_enable) {
char bits[33];
int i;
for(i = 0; i < width; i++)
bits[i] = magnitude >> (width - i - 1) & 1 ? '1' : '0';
bits[i] = sign ? '1' : '0';
bits[i + 1] = 0;
ff_cbs_trace_syntax_element(ctx, put_bits_count(pbc),
name, subscripts, bits, value);
}
put_bits(pbc, width, magnitude);
put_bits(pbc, 1, sign);
return 0;
}
static int cbs_vp9_read_increment(CodedBitstreamContext *ctx, GetBitContext *gbc,
uint32_t range_min, uint32_t range_max,
const char *name, uint32_t *write_to) {
uint32_t value;
int position, i;
char bits[8];
av_assert0(range_min <= range_max && range_max - range_min < sizeof(bits) - 1);
if(ctx->trace_enable)
position = get_bits_count(gbc);
for(i = 0, value = range_min; value < range_max;) {
if(get_bits_left(gbc) < 1) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid increment value at "
"%s: bitstream ended.\n",
name);
return AVERROR_INVALIDDATA;
}
if(get_bits1(gbc)) {
bits[i++] = '1';
++value;
}
else {
bits[i++] = '0';
break;
}
}
if(ctx->trace_enable) {
bits[i] = 0;
ff_cbs_trace_syntax_element(ctx, position, name, NULL, bits, value);
}
*write_to = value;
return 0;
}
static int cbs_vp9_write_increment(CodedBitstreamContext *ctx, PutBitContext *pbc,
uint32_t range_min, uint32_t range_max,
const char *name, uint32_t value) {
int len;
av_assert0(range_min <= range_max && range_max - range_min < 8);
if(value < range_min || value > range_max) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "%s out of range: "
"%" PRIu32 ", but must be in [%" PRIu32 ",%" PRIu32 "].\n",
name, value, range_min, range_max);
return AVERROR_INVALIDDATA;
}
if(value == range_max)
len = range_max - range_min;
else
len = value - range_min + 1;
if(put_bits_left(pbc) < len)
return AVERROR(ENOSPC);
if(ctx->trace_enable) {
char bits[8];
int i;
for(i = 0; i < len; i++) {
if(range_min + i == value)
bits[i] = '0';
else
bits[i] = '1';
}
bits[i] = 0;
ff_cbs_trace_syntax_element(ctx, put_bits_count(pbc),
name, NULL, bits, value);
}
if(len > 0)
put_bits(pbc, len, (1 << len) - 1 - (value != range_max));
return 0;
}
static int cbs_vp9_read_le(CodedBitstreamContext *ctx, GetBitContext *gbc,
int width, const char *name,
const int *subscripts, uint32_t *write_to) {
uint32_t value;
int position, b;
av_assert0(width % 8 == 0);
if(ctx->trace_enable)
position = get_bits_count(gbc);
if(get_bits_left(gbc) < width) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid le value at "
"%s: bitstream ended.\n",
name);
return AVERROR_INVALIDDATA;
}
value = 0;
for(b = 0; b < width; b += 8)
value |= get_bits(gbc, 8) << b;
if(ctx->trace_enable) {
char bits[33];
int i;
for(b = 0; b < width; b += 8)
for(i = 0; i < 8; i++)
bits[b + i] = value >> (b + i) & 1 ? '1' : '0';
bits[b] = 0;
ff_cbs_trace_syntax_element(ctx, position, name, subscripts,
bits, value);
}
*write_to = value;
return 0;
}
static int cbs_vp9_write_le(CodedBitstreamContext *ctx, PutBitContext *pbc,
int width, const char *name,
const int *subscripts, uint32_t value) {
int b;
av_assert0(width % 8 == 0);
if(put_bits_left(pbc) < width)
return AVERROR(ENOSPC);
if(ctx->trace_enable) {
char bits[33];
int i;
for(b = 0; b < width; b += 8)
for(i = 0; i < 8; i++)
bits[b + i] = value >> (b + i) & 1 ? '1' : '0';
bits[b] = 0;
ff_cbs_trace_syntax_element(ctx, put_bits_count(pbc),
name, subscripts, bits, value);
}
for(b = 0; b < width; b += 8)
put_bits(pbc, 8, value >> b & 0xff);
return 0;
}
#define HEADER(name) \
do { \
ff_cbs_trace_header(ctx, name); \
} while(0)
#define CHECK(call) \
do { \
err = (call); \
if(err < 0) \
return err; \
} while(0)
#define FUNC_NAME(rw, codec, name) cbs_##codec##_##rw##_##name
#define FUNC_VP9(rw, name) FUNC_NAME(rw, vp9, name)
#define FUNC(name) FUNC_VP9(READWRITE, name)
#define SUBSCRIPTS(subs, ...) (subs > 0 ? ((int[subs + 1]) { subs, __VA_ARGS__ }) : NULL)
#define f(width, name) \
xf(width, name, current->name, 0, )
#define s(width, name) \
xs(width, name, current->name, 0, )
#define fs(width, name, subs, ...) \
xf(width, name, current->name, subs, __VA_ARGS__)
#define ss(width, name, subs, ...) \
xs(width, name, current->name, subs, __VA_ARGS__)
#define READ
#define READWRITE read
#define RWContext GetBitContext
#define xf(width, name, var, subs, ...) \
do { \
uint32_t value; \
CHECK(ff_cbs_read_unsigned(ctx, rw, width, #name, \
SUBSCRIPTS(subs, __VA_ARGS__), \
&value, 0, (1 << width) - 1)); \
var = value; \
} while(0)
#define xs(width, name, var, subs, ...) \
do { \
int32_t value; \
CHECK(cbs_vp9_read_s(ctx, rw, width, #name, \
SUBSCRIPTS(subs, __VA_ARGS__), &value)); \
var = value; \
} while(0)
#define increment(name, min, max) \
do { \
uint32_t value; \
CHECK(cbs_vp9_read_increment(ctx, rw, min, max, #name, &value)); \
current->name = value; \
} while(0)
#define fle(width, name, subs, ...) \
do { \
CHECK(cbs_vp9_read_le(ctx, rw, width, #name, \
SUBSCRIPTS(subs, __VA_ARGS__), &current->name)); \
} while(0)
#define delta_q(name) \
do { \
uint8_t delta_coded; \
int8_t delta_q; \
xf(1, name.delta_coded, delta_coded, 0, ); \
if(delta_coded) \
xs(4, name.delta_q, delta_q, 0, ); \
else \
delta_q = 0; \
current->name = delta_q; \
} while(0)
#define prob(name, subs, ...) \
do { \
uint8_t prob_coded; \
uint8_t prob; \
xf(1, name.prob_coded, prob_coded, subs, __VA_ARGS__); \
if(prob_coded) \
xf(8, name.prob, prob, subs, __VA_ARGS__); \
else \
prob = 255; \
current->name = prob; \
} while(0)
#define fixed(width, name, value) \
do { \
av_unused uint32_t fixed_value; \
CHECK(ff_cbs_read_unsigned(ctx, rw, width, #name, \
0, &fixed_value, value, value)); \
} while(0)
#define infer(name, value) \
do { \
current->name = value; \
} while(0)
#define byte_alignment(rw) (get_bits_count(rw) % 8)
#include "cbs_vp9_syntax_template.c"
#undef READ
#undef READWRITE
#undef RWContext
#undef xf
#undef xs
#undef increment
#undef fle
#undef delta_q
#undef prob
#undef fixed
#undef infer
#undef byte_alignment
#define WRITE
#define READWRITE write
#define RWContext PutBitContext
#define xf(width, name, var, subs, ...) \
do { \
CHECK(ff_cbs_write_unsigned(ctx, rw, width, #name, \
SUBSCRIPTS(subs, __VA_ARGS__), \
var, 0, (1 << width) - 1)); \
} while(0)
#define xs(width, name, var, subs, ...) \
do { \
CHECK(cbs_vp9_write_s(ctx, rw, width, #name, \
SUBSCRIPTS(subs, __VA_ARGS__), var)); \
} while(0)
#define increment(name, min, max) \
do { \
CHECK(cbs_vp9_write_increment(ctx, rw, min, max, #name, current->name)); \
} while(0)
#define fle(width, name, subs, ...) \
do { \
CHECK(cbs_vp9_write_le(ctx, rw, width, #name, \
SUBSCRIPTS(subs, __VA_ARGS__), current->name)); \
} while(0)
#define delta_q(name) \
do { \
xf(1, name.delta_coded, !!current->name, 0, ); \
if(current->name) \
xs(4, name.delta_q, current->name, 0, ); \
} while(0)
#define prob(name, subs, ...) \
do { \
xf(1, name.prob_coded, current->name != 255, subs, __VA_ARGS__); \
if(current->name != 255) \
xf(8, name.prob, current->name, subs, __VA_ARGS__); \
} while(0)
#define fixed(width, name, value) \
do { \
CHECK(ff_cbs_write_unsigned(ctx, rw, width, #name, \
0, value, value, value)); \
} while(0)
#define infer(name, value) \
do { \
if(current->name != (value)) { \
av_log(ctx->log_ctx, AV_LOG_WARNING, "Warning: " \
"%s does not match inferred value: " \
"%" PRId64 ", but should be %" PRId64 ".\n", \
#name, (int64_t)current->name, (int64_t)(value)); \
} \
} while(0)
#define byte_alignment(rw) (put_bits_count(rw) % 8)
#include "cbs_vp9_syntax_template.c"
#undef WRITE
#undef READWRITE
#undef RWContext
#undef xf
#undef xs
#undef increment
#undef fle
#undef delta_q
#undef prob
#undef fixed
#undef infer
#undef byte_alignment
static int cbs_vp9_split_fragment(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag,
int header) {
uint8_t superframe_header;
int err;
if(frag->data_size == 0)
return AVERROR_INVALIDDATA;
// Last byte in the packet.
superframe_header = frag->data[frag->data_size - 1];
if((superframe_header & 0xe0) == 0xc0) {
VP9RawSuperframeIndex sfi;
GetBitContext gbc;
size_t index_size, pos;
int i;
index_size = 2 + (((superframe_header & 0x18) >> 3) + 1) *
((superframe_header & 0x07) + 1);
if(index_size > frag->data_size)
return AVERROR_INVALIDDATA;
err = init_get_bits(&gbc, frag->data + frag->data_size - index_size,
8 * index_size);
if(err < 0)
return err;
err = cbs_vp9_read_superframe_index(ctx, &gbc, &sfi);
if(err < 0)
return err;
pos = 0;
for(i = 0; i <= sfi.frames_in_superframe_minus_1; i++) {
if(pos + sfi.frame_sizes[i] + index_size > frag->data_size) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Frame %d too large "
"in superframe: %" PRIu32 " bytes.\n",
i, sfi.frame_sizes[i]);
return AVERROR_INVALIDDATA;
}
err = ff_cbs_insert_unit_data(frag, -1, 0,
frag->data + pos,
sfi.frame_sizes[i],
frag->data_ref);
if(err < 0)
return err;
pos += sfi.frame_sizes[i];
}
if(pos + index_size != frag->data_size) {
av_log(ctx->log_ctx, AV_LOG_WARNING, "Extra padding at "
"end of superframe: %zu bytes.\n",
frag->data_size - (pos + index_size));
}
return 0;
}
else {
err = ff_cbs_insert_unit_data(frag, -1, 0,
frag->data, frag->data_size,
frag->data_ref);
if(err < 0)
return err;
}
return 0;
}
static int cbs_vp9_read_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit) {
VP9RawFrame *frame;
GetBitContext gbc;
int err, pos;
err = init_get_bits(&gbc, unit->data, 8 * unit->data_size);
if(err < 0)
return err;
err = ff_cbs_alloc_unit_content2(ctx, unit);
if(err < 0)
return err;
frame = unit->content;
err = cbs_vp9_read_frame(ctx, &gbc, frame);
if(err < 0)
return err;
pos = get_bits_count(&gbc);
av_assert0(pos % 8 == 0);
pos /= 8;
av_assert0(pos <= unit->data_size);
if(pos == unit->data_size) {
// No data (e.g. a show-existing-frame frame).
}
else {
frame->data_ref = av_buffer_ref(unit->data_ref);
if(!frame->data_ref)
return AVERROR(ENOMEM);
frame->data = unit->data + pos;
frame->data_size = unit->data_size - pos;
}
return 0;
}
static int cbs_vp9_write_unit(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit,
PutBitContext *pbc) {
VP9RawFrame *frame = unit->content;
int err;
err = cbs_vp9_write_frame(ctx, pbc, frame);
if(err < 0)
return err;
// Frame must be byte-aligned.
av_assert0(put_bits_count(pbc) % 8 == 0);
if(frame->data) {
if(frame->data_size > put_bits_left(pbc) / 8)
return AVERROR(ENOSPC);
flush_put_bits(pbc);
memcpy(put_bits_ptr(pbc), frame->data, frame->data_size);
skip_put_bytes(pbc, frame->data_size);
}
return 0;
}
static int cbs_vp9_assemble_fragment(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag) {
int err;
if(frag->nb_units == 1) {
// Output is just the content of the single frame.
CodedBitstreamUnit *frame = &frag->units[0];
frag->data_ref = av_buffer_ref(frame->data_ref);
if(!frag->data_ref)
return AVERROR(ENOMEM);
frag->data = frame->data;
frag->data_size = frame->data_size;
}
else {
// Build superframe out of frames.
VP9RawSuperframeIndex sfi;
PutBitContext pbc;
AVBufferRef *ref;
uint8_t *data;
size_t size, max, pos;
int i, size_len;
if(frag->nb_units > 8) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Too many frames to "
"make superframe: %d.\n",
frag->nb_units);
return AVERROR(EINVAL);
}
max = 0;
for(i = 0; i < frag->nb_units; i++)
if(max < frag->units[i].data_size)
max = frag->units[i].data_size;
if(max < 2)
size_len = 1;
else
size_len = av_log2(max) / 8 + 1;
av_assert0(size_len <= 4);
sfi.superframe_marker = VP9_SUPERFRAME_MARKER;
sfi.bytes_per_framesize_minus_1 = size_len - 1;
sfi.frames_in_superframe_minus_1 = frag->nb_units - 1;
size = 2;
for(i = 0; i < frag->nb_units; i++) {
size += size_len + frag->units[i].data_size;
sfi.frame_sizes[i] = frag->units[i].data_size;
}
ref = av_buffer_alloc(size + AV_INPUT_BUFFER_PADDING_SIZE);
if(!ref)
return AVERROR(ENOMEM);
data = ref->data;
memset(data + size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
pos = 0;
for(i = 0; i < frag->nb_units; i++) {
av_assert0(size - pos > frag->units[i].data_size);
memcpy(data + pos, frag->units[i].data,
frag->units[i].data_size);
pos += frag->units[i].data_size;
}
av_assert0(size - pos == 2 + frag->nb_units * size_len);
init_put_bits(&pbc, data + pos, size - pos);
err = cbs_vp9_write_superframe_index(ctx, &pbc, &sfi);
if(err < 0) {
av_log(ctx->log_ctx, AV_LOG_ERROR, "Failed to write "
"superframe index.\n");
av_buffer_unref(&ref);
return err;
}
av_assert0(put_bits_left(&pbc) == 0);
flush_put_bits(&pbc);
frag->data_ref = ref;
frag->data = data;
frag->data_size = size;
}
return 0;
}
static void cbs_vp9_flush(CodedBitstreamContext *ctx) {
CodedBitstreamVP9Context *vp9 = ctx->priv_data;
memset(vp9->ref, 0, sizeof(vp9->ref));
}
static const CodedBitstreamUnitTypeDescriptor cbs_vp9_unit_types[] = {
CBS_UNIT_TYPE_INTERNAL_REF(0, VP9RawFrame, data),
CBS_UNIT_TYPE_END_OF_LIST
};
const CodedBitstreamType ff_cbs_type_vp9 = {
.codec_id = AV_CODEC_ID_VP9,
.priv_data_size = sizeof(CodedBitstreamVP9Context),
.unit_types = cbs_vp9_unit_types,
.split_fragment = &cbs_vp9_split_fragment,
.read_unit = &cbs_vp9_read_unit,
.write_unit = &cbs_vp9_write_unit,
.flush = &cbs_vp9_flush,
.assemble_fragment = &cbs_vp9_assemble_fragment,
};

View File

@ -1,422 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
static int FUNC(frame_sync_code)(CodedBitstreamContext *ctx, RWContext *rw,
VP9RawFrameHeader *current) {
int err;
fixed(8, frame_sync_byte_0, VP9_FRAME_SYNC_0);
fixed(8, frame_sync_byte_1, VP9_FRAME_SYNC_1);
fixed(8, frame_sync_byte_2, VP9_FRAME_SYNC_2);
return 0;
}
static int FUNC(color_config)(CodedBitstreamContext *ctx, RWContext *rw,
VP9RawFrameHeader *current, int profile) {
CodedBitstreamVP9Context *vp9 = ctx->priv_data;
int err;
if(profile >= 2) {
f(1, ten_or_twelve_bit);
vp9->bit_depth = current->ten_or_twelve_bit ? 12 : 10;
}
else
vp9->bit_depth = 8;
f(3, color_space);
if(current->color_space != VP9_CS_RGB) {
f(1, color_range);
if(profile == 1 || profile == 3) {
f(1, subsampling_x);
f(1, subsampling_y);
fixed(1, reserved_zero, 0);
}
else {
infer(subsampling_x, 1);
infer(subsampling_y, 1);
}
}
else {
infer(color_range, 1);
if(profile == 1 || profile == 3) {
infer(subsampling_x, 0);
infer(subsampling_y, 0);
fixed(1, reserved_zero, 0);
}
}
vp9->subsampling_x = current->subsampling_x;
vp9->subsampling_y = current->subsampling_y;
return 0;
}
static int FUNC(frame_size)(CodedBitstreamContext *ctx, RWContext *rw,
VP9RawFrameHeader *current) {
CodedBitstreamVP9Context *vp9 = ctx->priv_data;
int err;
f(16, frame_width_minus_1);
f(16, frame_height_minus_1);
vp9->frame_width = current->frame_width_minus_1 + 1;
vp9->frame_height = current->frame_height_minus_1 + 1;
vp9->mi_cols = (vp9->frame_width + 7) >> 3;
vp9->mi_rows = (vp9->frame_height + 7) >> 3;
vp9->sb64_cols = (vp9->mi_cols + 7) >> 3;
vp9->sb64_rows = (vp9->mi_rows + 7) >> 3;
return 0;
}
static int FUNC(render_size)(CodedBitstreamContext *ctx, RWContext *rw,
VP9RawFrameHeader *current) {
int err;
f(1, render_and_frame_size_different);
if(current->render_and_frame_size_different) {
f(16, render_width_minus_1);
f(16, render_height_minus_1);
}
return 0;
}
static int FUNC(frame_size_with_refs)(CodedBitstreamContext *ctx, RWContext *rw,
VP9RawFrameHeader *current) {
CodedBitstreamVP9Context *vp9 = ctx->priv_data;
int err, i;
for(i = 0; i < VP9_REFS_PER_FRAME; i++) {
fs(1, found_ref[i], 1, i);
if(current->found_ref[i]) {
VP9ReferenceFrameState *ref =
&vp9->ref[current->ref_frame_idx[i]];
vp9->frame_width = ref->frame_width;
vp9->frame_height = ref->frame_height;
vp9->subsampling_x = ref->subsampling_x;
vp9->subsampling_y = ref->subsampling_y;
vp9->bit_depth = ref->bit_depth;
break;
}
}
if(i >= VP9_REFS_PER_FRAME)
CHECK(FUNC(frame_size)(ctx, rw, current));
else {
vp9->mi_cols = (vp9->frame_width + 7) >> 3;
vp9->mi_rows = (vp9->frame_height + 7) >> 3;
vp9->sb64_cols = (vp9->mi_cols + 7) >> 3;
vp9->sb64_rows = (vp9->mi_rows + 7) >> 3;
}
CHECK(FUNC(render_size)(ctx, rw, current));
return 0;
}
static int FUNC(interpolation_filter)(CodedBitstreamContext *ctx, RWContext *rw,
VP9RawFrameHeader *current) {
int err;
f(1, is_filter_switchable);
if(!current->is_filter_switchable)
f(2, raw_interpolation_filter_type);
return 0;
}
static int FUNC(loop_filter_params)(CodedBitstreamContext *ctx, RWContext *rw,
VP9RawFrameHeader *current) {
int err, i;
f(6, loop_filter_level);
f(3, loop_filter_sharpness);
f(1, loop_filter_delta_enabled);
if(current->loop_filter_delta_enabled) {
f(1, loop_filter_delta_update);
if(current->loop_filter_delta_update) {
for(i = 0; i < VP9_MAX_REF_FRAMES; i++) {
fs(1, update_ref_delta[i], 1, i);
if(current->update_ref_delta[i])
ss(6, loop_filter_ref_deltas[i], 1, i);
}
for(i = 0; i < 2; i++) {
fs(1, update_mode_delta[i], 1, i);
if(current->update_mode_delta[i])
ss(6, loop_filter_mode_deltas[i], 1, i);
}
}
}
return 0;
}
static int FUNC(quantization_params)(CodedBitstreamContext *ctx, RWContext *rw,
VP9RawFrameHeader *current) {
int err;
f(8, base_q_idx);
delta_q(delta_q_y_dc);
delta_q(delta_q_uv_dc);
delta_q(delta_q_uv_ac);
return 0;
}
static int FUNC(segmentation_params)(CodedBitstreamContext *ctx, RWContext *rw,
VP9RawFrameHeader *current) {
static const uint8_t segmentation_feature_bits[VP9_SEG_LVL_MAX] = { 8, 6, 2, 0 };
static const uint8_t segmentation_feature_signed[VP9_SEG_LVL_MAX] = { 1, 1, 0, 0 };
int err, i, j;
f(1, segmentation_enabled);
if(current->segmentation_enabled) {
f(1, segmentation_update_map);
if(current->segmentation_update_map) {
for(i = 0; i < 7; i++)
prob(segmentation_tree_probs[i], 1, i);
f(1, segmentation_temporal_update);
for(i = 0; i < 3; i++) {
if(current->segmentation_temporal_update)
prob(segmentation_pred_prob[i], 1, i);
else
infer(segmentation_pred_prob[i], 255);
}
}
f(1, segmentation_update_data);
if(current->segmentation_update_data) {
f(1, segmentation_abs_or_delta_update);
for(i = 0; i < VP9_MAX_SEGMENTS; i++) {
for(j = 0; j < VP9_SEG_LVL_MAX; j++) {
fs(1, feature_enabled[i][j], 2, i, j);
if(current->feature_enabled[i][j] &&
segmentation_feature_bits[j]) {
fs(segmentation_feature_bits[j],
feature_value[i][j], 2, i, j);
if(segmentation_feature_signed[j])
fs(1, feature_sign[i][j], 2, i, j);
else
infer(feature_sign[i][j], 0);
}
else {
infer(feature_value[i][j], 0);
infer(feature_sign[i][j], 0);
}
}
}
}
}
return 0;
}
static int FUNC(tile_info)(CodedBitstreamContext *ctx, RWContext *rw,
VP9RawFrameHeader *current) {
CodedBitstreamVP9Context *vp9 = ctx->priv_data;
int min_log2_tile_cols, max_log2_tile_cols;
int err;
min_log2_tile_cols = 0;
while((VP9_MAX_TILE_WIDTH_B64 << min_log2_tile_cols) < vp9->sb64_cols)
++min_log2_tile_cols;
max_log2_tile_cols = 0;
while((vp9->sb64_cols >> (max_log2_tile_cols + 1)) >= VP9_MIN_TILE_WIDTH_B64)
++max_log2_tile_cols;
increment(tile_cols_log2, min_log2_tile_cols, max_log2_tile_cols);
increment(tile_rows_log2, 0, 2);
return 0;
}
static int FUNC(uncompressed_header)(CodedBitstreamContext *ctx, RWContext *rw,
VP9RawFrameHeader *current) {
CodedBitstreamVP9Context *vp9 = ctx->priv_data;
int err, i;
f(2, frame_marker);
f(1, profile_low_bit);
f(1, profile_high_bit);
vp9->profile = (current->profile_high_bit << 1) + current->profile_low_bit;
if(vp9->profile == 3)
fixed(1, reserved_zero, 0);
f(1, show_existing_frame);
if(current->show_existing_frame) {
f(3, frame_to_show_map_idx);
infer(header_size_in_bytes, 0);
infer(refresh_frame_flags, 0x00);
infer(loop_filter_level, 0);
return 0;
}
f(1, frame_type);
f(1, show_frame);
f(1, error_resilient_mode);
if(current->frame_type == VP9_KEY_FRAME) {
CHECK(FUNC(frame_sync_code)(ctx, rw, current));
CHECK(FUNC(color_config)(ctx, rw, current, vp9->profile));
CHECK(FUNC(frame_size)(ctx, rw, current));
CHECK(FUNC(render_size)(ctx, rw, current));
infer(refresh_frame_flags, 0xff);
}
else {
if(current->show_frame == 0)
f(1, intra_only);
else
infer(intra_only, 0);
if(current->error_resilient_mode == 0)
f(2, reset_frame_context);
else
infer(reset_frame_context, 0);
if(current->intra_only == 1) {
CHECK(FUNC(frame_sync_code)(ctx, rw, current));
if(vp9->profile > 0) {
CHECK(FUNC(color_config)(ctx, rw, current, vp9->profile));
}
else {
infer(color_space, 1);
infer(subsampling_x, 1);
infer(subsampling_y, 1);
vp9->bit_depth = 8;
vp9->subsampling_x = current->subsampling_x;
vp9->subsampling_y = current->subsampling_y;
}
f(8, refresh_frame_flags);
CHECK(FUNC(frame_size)(ctx, rw, current));
CHECK(FUNC(render_size)(ctx, rw, current));
}
else {
f(8, refresh_frame_flags);
for(i = 0; i < VP9_REFS_PER_FRAME; i++) {
fs(3, ref_frame_idx[i], 1, i);
fs(1, ref_frame_sign_bias[VP9_LAST_FRAME + i],
1, VP9_LAST_FRAME + i);
}
CHECK(FUNC(frame_size_with_refs)(ctx, rw, current));
f(1, allow_high_precision_mv);
CHECK(FUNC(interpolation_filter)(ctx, rw, current));
}
}
if(current->error_resilient_mode == 0) {
f(1, refresh_frame_context);
f(1, frame_parallel_decoding_mode);
}
else {
infer(refresh_frame_context, 0);
infer(frame_parallel_decoding_mode, 1);
}
f(2, frame_context_idx);
CHECK(FUNC(loop_filter_params)(ctx, rw, current));
CHECK(FUNC(quantization_params)(ctx, rw, current));
CHECK(FUNC(segmentation_params)(ctx, rw, current));
CHECK(FUNC(tile_info)(ctx, rw, current));
f(16, header_size_in_bytes);
for(i = 0; i < VP9_NUM_REF_FRAMES; i++) {
if(current->refresh_frame_flags & (1 << i)) {
vp9->ref[i] = (VP9ReferenceFrameState) {
.frame_width = vp9->frame_width,
.frame_height = vp9->frame_height,
.subsampling_x = vp9->subsampling_x,
.subsampling_y = vp9->subsampling_y,
.bit_depth = vp9->bit_depth,
};
}
}
av_log(ctx->log_ctx, AV_LOG_DEBUG, "Frame: size %dx%d "
"subsample %dx%d bit_depth %d tiles %dx%d.\n",
vp9->frame_width, vp9->frame_height,
vp9->subsampling_x, vp9->subsampling_y,
vp9->bit_depth, 1 << current->tile_cols_log2,
1 << current->tile_rows_log2);
return 0;
}
static int FUNC(trailing_bits)(CodedBitstreamContext *ctx, RWContext *rw) {
int err;
while(byte_alignment(rw) != 0)
fixed(1, zero_bit, 0);
return 0;
}
static int FUNC(frame)(CodedBitstreamContext *ctx, RWContext *rw,
VP9RawFrame *current) {
int err;
HEADER("Frame");
CHECK(FUNC(uncompressed_header)(ctx, rw, &current->header));
CHECK(FUNC(trailing_bits)(ctx, rw));
return 0;
}
static int FUNC(superframe_index)(CodedBitstreamContext *ctx, RWContext *rw,
VP9RawSuperframeIndex *current) {
int err, i;
HEADER("Superframe Index");
f(3, superframe_marker);
f(2, bytes_per_framesize_minus_1);
f(3, frames_in_superframe_minus_1);
for(i = 0; i <= current->frames_in_superframe_minus_1; i++) {
// Surprise little-endian!
fle(8 * (current->bytes_per_framesize_minus_1 + 1),
frame_sizes[i], 1, i);
}
f(3, superframe_marker);
f(2, bytes_per_framesize_minus_1);
f(3, frames_in_superframe_minus_1);
return 0;
}

View File

@ -1,27 +0,0 @@
#ifndef CBS_CONFIG_H
#define CBS_CONFIG_H
#if defined(__BYTE_ORDER) && __BYTE_ORDER == __BIG_ENDIAN || \
defined(__BIG_ENDIAN__) || \
defined(__ARMEB__) || \
defined(__THUMBEB__) || \
defined(__AARCH64EB__) || \
defined(_MIBSEB) || defined(__MIBSEB) || defined(__MIBSEB__)
// It's a big-endian target architecture
#define AV_HAVE_BIGENDIAN 1
#elif defined(__BYTE_ORDER) && __BYTE_ORDER == __LITTLE_ENDIAN || \
defined(__LITTLE_ENDIAN__) || \
defined(__ARMEL__) || \
defined(__THUMBEL__) || \
defined(__AARCH64EL__) || \
defined(_MIPSEL) || defined(__MIPSEL) || defined(__MIPSEL__) || \
defined(_WIN32)
// It's a little-endian target architecture
#define AV_HAVE_BIGENDIAN 0
#else
#error "Unknown Endianness"
#endif
#endif

View File

@ -1,51 +0,0 @@
/*
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_DEFS_H
#define AVCODEC_DEFS_H
/**
* @file
* @ingroup libavc
* Misc types and constants that do not belong anywhere else.
*/
#include <stdint.h>
#include <stdlib.h>
/**
* @ingroup lavc_decoding
* Required number of additionally allocated bytes at the end of the input bitstream for decoding.
* This is mainly needed because some optimized bitstream readers read
* 32 or 64 bit at once and could read over the end.<br>
* Note: If the first 23 bits of the additional bytes are not 0, then damaged
* MPEG bitstreams could cause overread and segfault.
*/
#define AV_INPUT_BUFFER_PADDING_SIZE 64
/**
* Encode extradata length to a buffer. Used by xiph codecs.
*
* @param s buffer to write to; must be at least (v/255+1) bytes long
* @param v size of extradata in bytes
* @return number of bytes written to the buffer.
*/
unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
#endif // AVCODEC_DEFS_H

View File

@ -1,831 +0,0 @@
/*
* Copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
* Copyright (c) 2016 Alexandra Hájková
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* bitstream reader API header.
*/
#ifndef AVCODEC_GET_BITS_H
#define AVCODEC_GET_BITS_H
#include <stdint.h>
#include <libavutil/avassert.h>
#include <libavutil/common.h>
#include <libavutil/intreadwrite.h>
#include <libavutil/log.h>
#include "defs.h"
#include "mathops.h"
#include "vlc.h"
/*
* Safe bitstream reading:
* optionally, the get_bits API can check to ensure that we
* don't read past input buffer boundaries. This is protected
* with CONFIG_SAFE_BITSTREAM_READER at the global level, and
* then below that with UNCHECKED_BITSTREAM_READER at the per-
* decoder level. This means that decoders that check internally
* can "#define UNCHECKED_BITSTREAM_READER 1" to disable
* overread checks.
* Boundary checking causes a minor performance penalty so for
* applications that won't want/need this, it can be disabled
* globally using "#define CONFIG_SAFE_BITSTREAM_READER 0".
*/
#ifndef UNCHECKED_BITSTREAM_READER
#define UNCHECKED_BITSTREAM_READER 0
#endif
#ifndef CACHED_BITSTREAM_READER
#define CACHED_BITSTREAM_READER 0
#endif
#include "cbs/h2645_parse.h"
static inline unsigned int get_bits(GetBitContext *s, int n);
static inline void skip_bits(GetBitContext *s, int n);
static inline unsigned int show_bits(GetBitContext *s, int n);
/* Bitstream reader API docs:
* name
* arbitrary name which is used as prefix for the internal variables
*
* gb
* getbitcontext
*
* OPEN_READER(name, gb)
* load gb into local variables
*
* CLOSE_READER(name, gb)
* store local vars in gb
*
* UPDATE_CACHE(name, gb)
* Refill the internal cache from the bitstream.
* After this call at least MIN_CACHE_BITS will be available.
*
* GET_CACHE(name, gb)
* Will output the contents of the internal cache,
* next bit is MSB of 32 or 64 bits (FIXME 64 bits).
*
* SHOW_UBITS(name, gb, num)
* Will return the next num bits.
*
* SHOW_SBITS(name, gb, num)
* Will return the next num bits and do sign extension.
*
* SKIP_BITS(name, gb, num)
* Will skip over the next num bits.
* Note, this is equivalent to SKIP_CACHE; SKIP_COUNTER.
*
* SKIP_CACHE(name, gb, num)
* Will remove the next num bits from the cache (note SKIP_COUNTER
* MUST be called before UPDATE_CACHE / CLOSE_READER).
*
* SKIP_COUNTER(name, gb, num)
* Will increment the internal bit counter (see SKIP_CACHE & SKIP_BITS).
*
* LAST_SKIP_BITS(name, gb, num)
* Like SKIP_BITS, to be used if next call is UPDATE_CACHE or CLOSE_READER.
*
* BITS_LEFT(name, gb)
* Return the number of bits left
*
* For examples see get_bits, show_bits, skip_bits, get_vlc.
*/
#if CACHED_BITSTREAM_READER
#define MIN_CACHE_BITS 64
#elif defined LONG_BITSTREAM_READER
#define MIN_CACHE_BITS 32
#else
#define MIN_CACHE_BITS 25
#endif
#if !CACHED_BITSTREAM_READER
#define OPEN_READER_NOSIZE(name, gb) \
unsigned int name##_index = (gb)->index; \
unsigned int av_unused name##_cache
#if UNCHECKED_BITSTREAM_READER
#define OPEN_READER(name, gb) OPEN_READER_NOSIZE(name, gb)
#define BITS_AVAILABLE(name, gb) 1
#else
#define OPEN_READER(name, gb) \
OPEN_READER_NOSIZE(name, gb); \
unsigned int name##_size_plus8 = (gb)->size_in_bits_plus8
#define BITS_AVAILABLE(name, gb) name##_index < name##_size_plus8
#endif
#define CLOSE_READER(name, gb) (gb)->index = name##_index
#ifdef LONG_BITSTREAM_READER
#define UPDATE_CACHE_LE(name, gb) name##_cache = \
AV_RL64((gb)->buffer + (name##_index >> 3)) >> (name##_index & 7)
#define UPDATE_CACHE_BE(name, gb) name##_cache = \
AV_RB64((gb)->buffer + (name##_index >> 3)) >> (32 - (name##_index & 7))
#else
#define UPDATE_CACHE_LE(name, gb) name##_cache = \
AV_RL32((gb)->buffer + (name##_index >> 3)) >> (name##_index & 7)
#define UPDATE_CACHE_BE(name, gb) name##_cache = \
AV_RB32((gb)->buffer + (name##_index >> 3)) << (name##_index & 7)
#endif
#ifdef BITSTREAM_READER_LE
#define UPDATE_CACHE(name, gb) UPDATE_CACHE_LE(name, gb)
#define SKIP_CACHE(name, gb, num) name##_cache >>= (num)
#else
#define UPDATE_CACHE(name, gb) UPDATE_CACHE_BE(name, gb)
#define SKIP_CACHE(name, gb, num) name##_cache <<= (num)
#endif
#if UNCHECKED_BITSTREAM_READER
#define SKIP_COUNTER(name, gb, num) name##_index += (num)
#else
#define SKIP_COUNTER(name, gb, num) \
name##_index = FFMIN(name##_size_plus8, name##_index + (num))
#endif
#define BITS_LEFT(name, gb) ((int)((gb)->size_in_bits - name##_index))
#define SKIP_BITS(name, gb, num) \
do { \
SKIP_CACHE(name, gb, num); \
SKIP_COUNTER(name, gb, num); \
} while(0)
#define LAST_SKIP_BITS(name, gb, num) SKIP_COUNTER(name, gb, num)
#define SHOW_UBITS_LE(name, gb, num) zero_extend(name##_cache, num)
#define SHOW_SBITS_LE(name, gb, num) sign_extend(name##_cache, num)
#define SHOW_UBITS_BE(name, gb, num) NEG_USR32(name##_cache, num)
#define SHOW_SBITS_BE(name, gb, num) NEG_SSR32(name##_cache, num)
#ifdef BITSTREAM_READER_LE
#define SHOW_UBITS(name, gb, num) SHOW_UBITS_LE(name, gb, num)
#define SHOW_SBITS(name, gb, num) SHOW_SBITS_LE(name, gb, num)
#else
#define SHOW_UBITS(name, gb, num) SHOW_UBITS_BE(name, gb, num)
#define SHOW_SBITS(name, gb, num) SHOW_SBITS_BE(name, gb, num)
#endif
#define GET_CACHE(name, gb) ((uint32_t)name##_cache)
#endif
static inline int get_bits_count(const GetBitContext *s) {
#if CACHED_BITSTREAM_READER
return s->index - s->bits_left;
#else
return s->index;
#endif
}
#if CACHED_BITSTREAM_READER
static inline void refill_32(GetBitContext *s, int is_le) {
#if !UNCHECKED_BITSTREAM_READER
if(s->index >> 3 >= s->buffer_end - s->buffer)
return;
#endif
if(is_le)
s->cache = (uint64_t)AV_RL32(s->buffer + (s->index >> 3)) << s->bits_left | s->cache;
else
s->cache = s->cache | (uint64_t)AV_RB32(s->buffer + (s->index >> 3)) << (32 - s->bits_left);
s->index += 32;
s->bits_left += 32;
}
static inline void refill_64(GetBitContext *s, int is_le) {
#if !UNCHECKED_BITSTREAM_READER
if(s->index >> 3 >= s->buffer_end - s->buffer)
return;
#endif
if(is_le)
s->cache = AV_RL64(s->buffer + (s->index >> 3));
else
s->cache = AV_RB64(s->buffer + (s->index >> 3));
s->index += 64;
s->bits_left = 64;
}
static inline uint64_t get_val(GetBitContext *s, unsigned n, int is_le) {
uint64_t ret;
av_assert2(n > 0 && n <= 63);
if(is_le) {
ret = s->cache & ((UINT64_C(1) << n) - 1);
s->cache >>= n;
}
else {
ret = s->cache >> (64 - n);
s->cache <<= n;
}
s->bits_left -= n;
return ret;
}
static inline unsigned show_val(const GetBitContext *s, unsigned n) {
#ifdef BITSTREAM_READER_LE
return s->cache & ((UINT64_C(1) << n) - 1);
#else
return s->cache >> (64 - n);
#endif
}
#endif
/**
* Skips the specified number of bits.
* @param n the number of bits to skip,
* For the UNCHECKED_BITSTREAM_READER this must not cause the distance
* from the start to overflow int32_t. Staying within the bitstream + padding
* is sufficient, too.
*/
static inline void skip_bits_long(GetBitContext *s, int n) {
#if CACHED_BITSTREAM_READER
skip_bits(s, n);
#else
#if UNCHECKED_BITSTREAM_READER
s->index += n;
#else
s->index += av_clip(n, -s->index, s->size_in_bits_plus8 - s->index);
#endif
#endif
}
#if CACHED_BITSTREAM_READER
static inline void skip_remaining(GetBitContext *s, unsigned n) {
#ifdef BITSTREAM_READER_LE
s->cache >>= n;
#else
s->cache <<= n;
#endif
s->bits_left -= n;
}
#endif
/**
* Read MPEG-1 dc-style VLC (sign bit + mantissa with no MSB).
* if MSB not set it is negative
* @param n length in bits
*/
static inline int get_xbits(GetBitContext *s, int n) {
#if CACHED_BITSTREAM_READER
int32_t cache = show_bits(s, 32);
int sign = ~cache >> 31;
skip_remaining(s, n);
return ((((uint32_t)(sign ^ cache)) >> (32 - n)) ^ sign) - sign;
#else
register int sign;
register int32_t cache;
OPEN_READER(re, s);
av_assert2(n > 0 && n <= 25);
UPDATE_CACHE(re, s);
cache = GET_CACHE(re, s);
sign = ~cache >> 31;
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
return (NEG_USR32(sign ^ cache, n) ^ sign) - sign;
#endif
}
#if !CACHED_BITSTREAM_READER
static inline int get_xbits_le(GetBitContext *s, int n) {
register int sign;
register int32_t cache;
OPEN_READER(re, s);
av_assert2(n > 0 && n <= 25);
UPDATE_CACHE_LE(re, s);
cache = GET_CACHE(re, s);
sign = sign_extend(~cache, n) >> 31;
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
return (zero_extend(sign ^ cache, n) ^ sign) - sign;
}
#endif
static inline int get_sbits(GetBitContext *s, int n) {
register int tmp;
#if CACHED_BITSTREAM_READER
av_assert2(n > 0 && n <= 25);
tmp = sign_extend(get_bits(s, n), n);
#else
OPEN_READER(re, s);
av_assert2(n > 0 && n <= 25);
UPDATE_CACHE(re, s);
tmp = SHOW_SBITS(re, s, n);
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
#endif
return tmp;
}
/**
* Read 1-25 bits.
*/
static inline unsigned int get_bits(GetBitContext *s, int n) {
register unsigned int tmp;
#if CACHED_BITSTREAM_READER
av_assert2(n > 0 && n <= 32);
if(n > s->bits_left) {
#ifdef BITSTREAM_READER_LE
refill_32(s, 1);
#else
refill_32(s, 0);
#endif
if(s->bits_left < 32)
s->bits_left = n;
}
#ifdef BITSTREAM_READER_LE
tmp = get_val(s, n, 1);
#else
tmp = get_val(s, n, 0);
#endif
#else
OPEN_READER(re, s);
av_assert2(n > 0 && n <= 25);
UPDATE_CACHE(re, s);
tmp = SHOW_UBITS(re, s, n);
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
#endif
av_assert2(tmp < UINT64_C(1) << n);
return tmp;
}
/**
* Read 0-25 bits.
*/
static av_always_inline int get_bitsz(GetBitContext *s, int n) {
return n ? get_bits(s, n) : 0;
}
static inline unsigned int get_bits_le(GetBitContext *s, int n) {
#if CACHED_BITSTREAM_READER
av_assert2(n > 0 && n <= 32);
if(n > s->bits_left) {
refill_32(s, 1);
if(s->bits_left < 32)
s->bits_left = n;
}
return get_val(s, n, 1);
#else
register int tmp;
OPEN_READER(re, s);
av_assert2(n > 0 && n <= 25);
UPDATE_CACHE_LE(re, s);
tmp = SHOW_UBITS_LE(re, s, n);
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
return tmp;
#endif
}
/**
* Show 1-25 bits.
*/
static inline unsigned int show_bits(GetBitContext *s, int n) {
register unsigned int tmp;
#if CACHED_BITSTREAM_READER
if(n > s->bits_left)
#ifdef BITSTREAM_READER_LE
refill_32(s, 1);
#else
refill_32(s, 0);
#endif
tmp = show_val(s, n);
#else
OPEN_READER_NOSIZE(re, s);
av_assert2(n > 0 && n <= 25);
UPDATE_CACHE(re, s);
tmp = SHOW_UBITS(re, s, n);
#endif
return tmp;
}
static inline void skip_bits(GetBitContext *s, int n) {
#if CACHED_BITSTREAM_READER
if(n < s->bits_left)
skip_remaining(s, n);
else {
n -= s->bits_left;
s->cache = 0;
s->bits_left = 0;
if(n >= 64) {
unsigned skip = (n / 8) * 8;
n -= skip;
s->index += skip;
}
#ifdef BITSTREAM_READER_LE
refill_64(s, 1);
#else
refill_64(s, 0);
#endif
if(n)
skip_remaining(s, n);
}
#else
OPEN_READER(re, s);
LAST_SKIP_BITS(re, s, n);
CLOSE_READER(re, s);
#endif
}
static inline unsigned int get_bits1(GetBitContext *s) {
#if CACHED_BITSTREAM_READER
if(!s->bits_left)
#ifdef BITSTREAM_READER_LE
refill_64(s, 1);
#else
refill_64(s, 0);
#endif
#ifdef BITSTREAM_READER_LE
return get_val(s, 1, 1);
#else
return get_val(s, 1, 0);
#endif
#else
unsigned int index = s->index;
uint8_t result = s->buffer[index >> 3];
#ifdef BITSTREAM_READER_LE
result >>= index & 7;
result &= 1;
#else
result <<= index & 7;
result >>= 8 - 1;
#endif
#if !UNCHECKED_BITSTREAM_READER
if(s->index < s->size_in_bits_plus8)
#endif
index++;
s->index = index;
return result;
#endif
}
static inline unsigned int show_bits1(GetBitContext *s) {
return show_bits(s, 1);
}
static inline void skip_bits1(GetBitContext *s) {
skip_bits(s, 1);
}
/**
* Read 0-32 bits.
*/
static inline unsigned int get_bits_long(GetBitContext *s, int n) {
av_assert2(n >= 0 && n <= 32);
if(!n) {
return 0;
#if CACHED_BITSTREAM_READER
}
return get_bits(s, n);
#else
}
else if(n <= MIN_CACHE_BITS) {
return get_bits(s, n);
}
else {
#ifdef BITSTREAM_READER_LE
unsigned ret = get_bits(s, 16);
return ret | (get_bits(s, n - 16) << 16);
#else
unsigned ret = get_bits(s, 16) << (n - 16);
return ret | get_bits(s, n - 16);
#endif
}
#endif
}
/**
* Read 0-64 bits.
*/
static inline uint64_t get_bits64(GetBitContext *s, int n) {
if(n <= 32) {
return get_bits_long(s, n);
}
else {
#ifdef BITSTREAM_READER_LE
uint64_t ret = get_bits_long(s, 32);
return ret | (uint64_t)get_bits_long(s, n - 32) << 32;
#else
uint64_t ret = (uint64_t)get_bits_long(s, n - 32) << 32;
return ret | get_bits_long(s, 32);
#endif
}
}
/**
* Read 0-32 bits as a signed integer.
*/
static inline int get_sbits_long(GetBitContext *s, int n) {
// sign_extend(x, 0) is undefined
if(!n)
return 0;
return sign_extend(get_bits_long(s, n), n);
}
/**
* Show 0-32 bits.
*/
static inline unsigned int show_bits_long(GetBitContext *s, int n) {
if(n <= MIN_CACHE_BITS) {
return show_bits(s, n);
}
else {
GetBitContext gb = *s;
return get_bits_long(&gb, n);
}
}
static inline int check_marker(void *logctx, GetBitContext *s, const char *msg) {
int bit = get_bits1(s);
if(!bit)
av_log(logctx, AV_LOG_INFO, "Marker bit missing at %d of %d %s\n",
get_bits_count(s) - 1, s->size_in_bits, msg);
return bit;
}
static inline int init_get_bits_xe(GetBitContext *s, const uint8_t *buffer,
int bit_size, int is_le) {
int buffer_size;
int ret = 0;
if(bit_size >= INT_MAX - FFMAX(7, AV_INPUT_BUFFER_PADDING_SIZE * 8) || bit_size < 0 || !buffer) {
bit_size = 0;
buffer = NULL;
ret = AVERROR_INVALIDDATA;
}
buffer_size = (bit_size + 7) >> 3;
s->buffer = buffer;
s->size_in_bits = bit_size;
s->size_in_bits_plus8 = bit_size + 8;
s->buffer_end = buffer + buffer_size;
s->index = 0;
#if CACHED_BITSTREAM_READER
s->cache = 0;
s->bits_left = 0;
refill_64(s, is_le);
#endif
return ret;
}
/**
* Initialize GetBitContext.
* @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes
* larger than the actual read bits because some optimized bitstream
* readers read 32 or 64 bit at once and could read over the end
* @param bit_size the size of the buffer in bits
* @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
*/
static inline int init_get_bits(GetBitContext *s, const uint8_t *buffer,
int bit_size) {
#ifdef BITSTREAM_READER_LE
return init_get_bits_xe(s, buffer, bit_size, 1);
#else
return init_get_bits_xe(s, buffer, bit_size, 0);
#endif
}
/**
* Initialize GetBitContext.
* @param buffer bitstream buffer, must be AV_INPUT_BUFFER_PADDING_SIZE bytes
* larger than the actual read bits because some optimized bitstream
* readers read 32 or 64 bit at once and could read over the end
* @param byte_size the size of the buffer in bytes
* @return 0 on success, AVERROR_INVALIDDATA if the buffer_size would overflow.
*/
static inline int init_get_bits8(GetBitContext *s, const uint8_t *buffer,
int byte_size) {
if(byte_size > INT_MAX / 8 || byte_size < 0)
byte_size = -1;
return init_get_bits(s, buffer, byte_size * 8);
}
static inline int init_get_bits8_le(GetBitContext *s, const uint8_t *buffer,
int byte_size) {
if(byte_size > INT_MAX / 8 || byte_size < 0)
byte_size = -1;
return init_get_bits_xe(s, buffer, byte_size * 8, 1);
}
static inline const uint8_t *align_get_bits(GetBitContext *s) {
int n = -get_bits_count(s) & 7;
if(n)
skip_bits(s, n);
return s->buffer + (s->index >> 3);
}
/**
* If the vlc code is invalid and max_depth=1, then no bits will be removed.
* If the vlc code is invalid and max_depth>1, then the number of bits removed
* is undefined.
*/
#define GET_VLC(code, name, gb, table, bits, max_depth) \
do { \
int n, nb_bits; \
unsigned int index; \
\
index = SHOW_UBITS(name, gb, bits); \
code = table[index][0]; \
n = table[index][1]; \
\
if(max_depth > 1 && n < 0) { \
LAST_SKIP_BITS(name, gb, bits); \
UPDATE_CACHE(name, gb); \
\
nb_bits = -n; \
\
index = SHOW_UBITS(name, gb, nb_bits) + code; \
code = table[index][0]; \
n = table[index][1]; \
if(max_depth > 2 && n < 0) { \
LAST_SKIP_BITS(name, gb, nb_bits); \
UPDATE_CACHE(name, gb); \
\
nb_bits = -n; \
\
index = SHOW_UBITS(name, gb, nb_bits) + code; \
code = table[index][0]; \
n = table[index][1]; \
} \
} \
SKIP_BITS(name, gb, n); \
} while(0)
#define GET_RL_VLC(level, run, name, gb, table, bits, \
max_depth, need_update) \
do { \
int n, nb_bits; \
unsigned int index; \
\
index = SHOW_UBITS(name, gb, bits); \
level = table[index].level; \
n = table[index].len; \
\
if(max_depth > 1 && n < 0) { \
SKIP_BITS(name, gb, bits); \
if(need_update) { \
UPDATE_CACHE(name, gb); \
} \
\
nb_bits = -n; \
\
index = SHOW_UBITS(name, gb, nb_bits) + level; \
level = table[index].level; \
n = table[index].len; \
if(max_depth > 2 && n < 0) { \
LAST_SKIP_BITS(name, gb, nb_bits); \
if(need_update) { \
UPDATE_CACHE(name, gb); \
} \
nb_bits = -n; \
\
index = SHOW_UBITS(name, gb, nb_bits) + level; \
level = table[index].level; \
n = table[index].len; \
} \
} \
run = table[index].run; \
SKIP_BITS(name, gb, n); \
} while(0)
/* Return the LUT element for the given bitstream configuration. */
static inline int set_idx(GetBitContext *s, int code, int *n, int *nb_bits,
VLC_TYPE (*table)[2]) {
unsigned idx;
*nb_bits = -*n;
idx = show_bits(s, *nb_bits) + code;
*n = table[idx][1];
return table[idx][0];
}
/**
* Parse a vlc code.
* @param bits is the number of bits which will be read at once, must be
* identical to nb_bits in init_vlc()
* @param max_depth is the number of times bits bits must be read to completely
* read the longest vlc code
* = (max_vlc_length + bits - 1) / bits
* @returns the code parsed or -1 if no vlc matches
*/
static av_always_inline int get_vlc2(GetBitContext *s, VLC_TYPE (*table)[2],
int bits, int max_depth) {
#if CACHED_BITSTREAM_READER
int nb_bits;
unsigned idx = show_bits(s, bits);
int code = table[idx][0];
int n = table[idx][1];
if(max_depth > 1 && n < 0) {
skip_remaining(s, bits);
code = set_idx(s, code, &n, &nb_bits, table);
if(max_depth > 2 && n < 0) {
skip_remaining(s, nb_bits);
code = set_idx(s, code, &n, &nb_bits, table);
}
}
skip_remaining(s, n);
return code;
#else
int code;
OPEN_READER(re, s);
UPDATE_CACHE(re, s);
GET_VLC(code, re, s, table, bits, max_depth);
CLOSE_READER(re, s);
return code;
#endif
}
static inline int decode012(GetBitContext *gb) {
int n;
n = get_bits1(gb);
if(n == 0)
return 0;
else
return get_bits1(gb) + 1;
}
static inline int decode210(GetBitContext *gb) {
if(get_bits1(gb))
return 0;
else
return 2 - get_bits1(gb);
}
static inline int get_bits_left(GetBitContext *gb) {
return gb->size_in_bits - get_bits_count(gb);
}
static inline int skip_1stop_8data_bits(GetBitContext *gb) {
if(get_bits_left(gb) <= 0)
return AVERROR_INVALIDDATA;
while(get_bits1(gb)) {
skip_bits(gb, 8);
if(get_bits_left(gb) <= 0)
return AVERROR_INVALIDDATA;
}
return 0;
}
#endif /* AVCODEC_GET_BITS_H */

View File

@ -1,535 +0,0 @@
/*
* H.264/HEVC common parsing code
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <string.h>
#include <libavutil/intreadwrite.h>
#include <libavutil/mem.h>
#include "cbs/h264.h"
#include "cbs/h2645_parse.h"
#include "cbs/hevc.h"
#include "bytestream.h"
#include "config.h"
#include "get_bits.h"
#include "intmath.h"
int ff_h2645_extract_rbsp(const uint8_t *src, int length,
H2645RBSP *rbsp, H2645NAL *nal, int small_padding) {
int i, si, di;
uint8_t *dst;
nal->skipped_bytes = 0;
#define STARTCODE_TEST \
if(i + 2 < length && src[i + 1] == 0 && src[i + 2] <= 3) { \
if(src[i + 2] != 3 && src[i + 2] != 0) { \
/* startcode, so we must be past the end */ \
length = i; \
} \
break; \
}
#if HAVE_FAST_UNALIGNED
#define FIND_FIRST_ZERO \
if(i > 0 && !src[i]) \
i--; \
while(src[i]) \
i++
#if HAVE_FAST_64BIT
for(i = 0; i + 1 < length; i += 9) {
if(!((~AV_RN64(src + i) &
(AV_RN64(src + i) - 0x0100010001000101ULL)) &
0x8000800080008080ULL))
continue;
FIND_FIRST_ZERO;
STARTCODE_TEST;
i -= 7;
}
#else
for(i = 0; i + 1 < length; i += 5) {
if(!((~AV_RN32(src + i) &
(AV_RN32(src + i) - 0x01000101U)) &
0x80008080U))
continue;
FIND_FIRST_ZERO;
STARTCODE_TEST;
i -= 3;
}
#endif /* HAVE_FAST_64BIT */
#else
for(i = 0; i + 1 < length; i += 2) {
if(src[i])
continue;
if(i > 0 && src[i - 1] == 0)
i--;
STARTCODE_TEST;
}
#endif /* HAVE_FAST_UNALIGNED */
if(i >= length - 1 && small_padding) { // no escaped 0
nal->data =
nal->raw_data = src;
nal->size =
nal->raw_size = length;
return length;
}
else if(i > length)
i = length;
nal->rbsp_buffer = &rbsp->rbsp_buffer[rbsp->rbsp_buffer_size];
dst = nal->rbsp_buffer;
memcpy(dst, src, i);
si = di = i;
while(si + 2 < length) {
// remove escapes (very rare 1:2^22)
if(src[si + 2] > 3) {
dst[di++] = src[si++];
dst[di++] = src[si++];
}
else if(src[si] == 0 && src[si + 1] == 0 && src[si + 2] != 0) {
if(src[si + 2] == 3) { // escape
dst[di++] = 0;
dst[di++] = 0;
si += 3;
if(nal->skipped_bytes_pos) {
nal->skipped_bytes++;
if(nal->skipped_bytes_pos_size < nal->skipped_bytes) {
nal->skipped_bytes_pos_size *= 2;
av_assert0(nal->skipped_bytes_pos_size >= nal->skipped_bytes);
av_reallocp_array(&nal->skipped_bytes_pos,
nal->skipped_bytes_pos_size,
sizeof(*nal->skipped_bytes_pos));
if(!nal->skipped_bytes_pos) {
nal->skipped_bytes_pos_size = 0;
return AVERROR(ENOMEM);
}
}
if(nal->skipped_bytes_pos)
nal->skipped_bytes_pos[nal->skipped_bytes - 1] = di - 1;
}
continue;
}
else // next start code
goto nsc;
}
dst[di++] = src[si++];
}
while(si < length)
dst[di++] = src[si++];
nsc:
memset(dst + di, 0, AV_INPUT_BUFFER_PADDING_SIZE);
nal->data = dst;
nal->size = di;
nal->raw_data = src;
nal->raw_size = si;
rbsp->rbsp_buffer_size += si;
return si;
}
static const char *const hevc_nal_type_name[64] = {
"TRAIL_N", // HEVC_NAL_TRAIL_N
"TRAIL_R", // HEVC_NAL_TRAIL_R
"TSA_N", // HEVC_NAL_TSA_N
"TSA_R", // HEVC_NAL_TSA_R
"STSA_N", // HEVC_NAL_STSA_N
"STSA_R", // HEVC_NAL_STSA_R
"RADL_N", // HEVC_NAL_RADL_N
"RADL_R", // HEVC_NAL_RADL_R
"RASL_N", // HEVC_NAL_RASL_N
"RASL_R", // HEVC_NAL_RASL_R
"RSV_VCL_N10", // HEVC_NAL_VCL_N10
"RSV_VCL_R11", // HEVC_NAL_VCL_R11
"RSV_VCL_N12", // HEVC_NAL_VCL_N12
"RSV_VLC_R13", // HEVC_NAL_VCL_R13
"RSV_VCL_N14", // HEVC_NAL_VCL_N14
"RSV_VCL_R15", // HEVC_NAL_VCL_R15
"BLA_W_LP", // HEVC_NAL_BLA_W_LP
"BLA_W_RADL", // HEVC_NAL_BLA_W_RADL
"BLA_N_LP", // HEVC_NAL_BLA_N_LP
"IDR_W_RADL", // HEVC_NAL_IDR_W_RADL
"IDR_N_LP", // HEVC_NAL_IDR_N_LP
"CRA_NUT", // HEVC_NAL_CRA_NUT
"RSV_IRAP_VCL22", // HEVC_NAL_RSV_IRAP_VCL22
"RSV_IRAP_VCL23", // HEVC_NAL_RSV_IRAP_VCL23
"RSV_VCL24", // HEVC_NAL_RSV_VCL24
"RSV_VCL25", // HEVC_NAL_RSV_VCL25
"RSV_VCL26", // HEVC_NAL_RSV_VCL26
"RSV_VCL27", // HEVC_NAL_RSV_VCL27
"RSV_VCL28", // HEVC_NAL_RSV_VCL28
"RSV_VCL29", // HEVC_NAL_RSV_VCL29
"RSV_VCL30", // HEVC_NAL_RSV_VCL30
"RSV_VCL31", // HEVC_NAL_RSV_VCL31
"VPS", // HEVC_NAL_VPS
"SPS", // HEVC_NAL_SPS
"PPS", // HEVC_NAL_PPS
"AUD", // HEVC_NAL_AUD
"EOS_NUT", // HEVC_NAL_EOS_NUT
"EOB_NUT", // HEVC_NAL_EOB_NUT
"FD_NUT", // HEVC_NAL_FD_NUT
"SEI_PREFIX", // HEVC_NAL_SEI_PREFIX
"SEI_SUFFIX", // HEVC_NAL_SEI_SUFFIX
"RSV_NVCL41", // HEVC_NAL_RSV_NVCL41
"RSV_NVCL42", // HEVC_NAL_RSV_NVCL42
"RSV_NVCL43", // HEVC_NAL_RSV_NVCL43
"RSV_NVCL44", // HEVC_NAL_RSV_NVCL44
"RSV_NVCL45", // HEVC_NAL_RSV_NVCL45
"RSV_NVCL46", // HEVC_NAL_RSV_NVCL46
"RSV_NVCL47", // HEVC_NAL_RSV_NVCL47
"UNSPEC48", // HEVC_NAL_UNSPEC48
"UNSPEC49", // HEVC_NAL_UNSPEC49
"UNSPEC50", // HEVC_NAL_UNSPEC50
"UNSPEC51", // HEVC_NAL_UNSPEC51
"UNSPEC52", // HEVC_NAL_UNSPEC52
"UNSPEC53", // HEVC_NAL_UNSPEC53
"UNSPEC54", // HEVC_NAL_UNSPEC54
"UNSPEC55", // HEVC_NAL_UNSPEC55
"UNSPEC56", // HEVC_NAL_UNSPEC56
"UNSPEC57", // HEVC_NAL_UNSPEC57
"UNSPEC58", // HEVC_NAL_UNSPEC58
"UNSPEC59", // HEVC_NAL_UNSPEC59
"UNSPEC60", // HEVC_NAL_UNSPEC60
"UNSPEC61", // HEVC_NAL_UNSPEC61
"UNSPEC62", // HEVC_NAL_UNSPEC62
"UNSPEC63", // HEVC_NAL_UNSPEC63
};
static const char *hevc_nal_unit_name(int nal_type) {
av_assert0(nal_type >= 0 && nal_type < 64);
return hevc_nal_type_name[nal_type];
}
static const char *const h264_nal_type_name[32] = {
"Unspecified 0", //H264_NAL_UNSPECIFIED
"Coded slice of a non-IDR picture", // H264_NAL_SLICE
"Coded slice data partition A", // H264_NAL_DPA
"Coded slice data partition B", // H264_NAL_DPB
"Coded slice data partition C", // H264_NAL_DPC
"IDR", // H264_NAL_IDR_SLICE
"SEI", // H264_NAL_SEI
"SPS", // H264_NAL_SPS
"PPS", // H264_NAL_PPS
"AUD", // H264_NAL_AUD
"End of sequence", // H264_NAL_END_SEQUENCE
"End of stream", // H264_NAL_END_STREAM
"Filler data", // H264_NAL_FILLER_DATA
"SPS extension", // H264_NAL_SPS_EXT
"Prefix", // H264_NAL_PREFIX
"Subset SPS", // H264_NAL_SUB_SPS
"Depth parameter set", // H264_NAL_DPS
"Reserved 17", // H264_NAL_RESERVED17
"Reserved 18", // H264_NAL_RESERVED18
"Auxiliary coded picture without partitioning", // H264_NAL_AUXILIARY_SLICE
"Slice extension", // H264_NAL_EXTEN_SLICE
"Slice extension for a depth view or a 3D-AVC texture view", // H264_NAL_DEPTH_EXTEN_SLICE
"Reserved 22", // H264_NAL_RESERVED22
"Reserved 23", // H264_NAL_RESERVED23
"Unspecified 24", // H264_NAL_UNSPECIFIED24
"Unspecified 25", // H264_NAL_UNSPECIFIED25
"Unspecified 26", // H264_NAL_UNSPECIFIED26
"Unspecified 27", // H264_NAL_UNSPECIFIED27
"Unspecified 28", // H264_NAL_UNSPECIFIED28
"Unspecified 29", // H264_NAL_UNSPECIFIED29
"Unspecified 30", // H264_NAL_UNSPECIFIED30
"Unspecified 31", // H264_NAL_UNSPECIFIED31
};
static const char *h264_nal_unit_name(int nal_type) {
av_assert0(nal_type >= 0 && nal_type < 32);
return h264_nal_type_name[nal_type];
}
static int get_bit_length(H2645NAL *nal, int skip_trailing_zeros) {
int size = nal->size;
int v;
while(skip_trailing_zeros && size > 0 && nal->data[size - 1] == 0)
size--;
if(!size)
return 0;
v = nal->data[size - 1];
if(size > INT_MAX / 8)
return AVERROR(ERANGE);
size *= 8;
/* remove the stop bit and following trailing zeros,
* or nothing for damaged bitstreams */
if(v)
size -= ff_ctz(v) + 1;
return size;
}
/**
* @return AVERROR_INVALIDDATA if the packet is not a valid NAL unit,
* 0 otherwise
*/
static int hevc_parse_nal_header(H2645NAL *nal, void *logctx) {
GetBitContext *gb = &nal->gb;
if(get_bits1(gb) != 0)
return AVERROR_INVALIDDATA;
nal->type = get_bits(gb, 6);
nal->nuh_layer_id = get_bits(gb, 6);
nal->temporal_id = get_bits(gb, 3) - 1;
if(nal->temporal_id < 0)
return AVERROR_INVALIDDATA;
av_log(logctx, AV_LOG_DEBUG,
"nal_unit_type: %d(%s), nuh_layer_id: %d, temporal_id: %d\n",
nal->type, hevc_nal_unit_name(nal->type), nal->nuh_layer_id, nal->temporal_id);
return 0;
}
static int h264_parse_nal_header(H2645NAL *nal, void *logctx) {
GetBitContext *gb = &nal->gb;
if(get_bits1(gb) != 0)
return AVERROR_INVALIDDATA;
nal->ref_idc = get_bits(gb, 2);
nal->type = get_bits(gb, 5);
av_log(logctx, AV_LOG_DEBUG,
"nal_unit_type: %d(%s), nal_ref_idc: %d\n",
nal->type, h264_nal_unit_name(nal->type), nal->ref_idc);
return 0;
}
static int find_next_start_code(const uint8_t *buf, const uint8_t *next_avc) {
int i = 0;
if(buf + 3 >= next_avc)
return next_avc - buf;
while(buf + i + 3 < next_avc) {
if(buf[i] == 0 && buf[i + 1] == 0 && buf[i + 2] == 1)
break;
i++;
}
return i + 3;
}
static void alloc_rbsp_buffer(H2645RBSP *rbsp, unsigned int size, int use_ref) {
int min_size = size;
if(size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE)
goto fail;
size += AV_INPUT_BUFFER_PADDING_SIZE;
if(rbsp->rbsp_buffer_alloc_size >= size &&
(!rbsp->rbsp_buffer_ref || av_buffer_is_writable(rbsp->rbsp_buffer_ref))) {
av_assert0(rbsp->rbsp_buffer);
memset(rbsp->rbsp_buffer + min_size, 0, AV_INPUT_BUFFER_PADDING_SIZE);
return;
}
size = FFMIN(size + size / 16 + 32, INT_MAX);
if(rbsp->rbsp_buffer_ref)
av_buffer_unref(&rbsp->rbsp_buffer_ref);
else
av_free(rbsp->rbsp_buffer);
rbsp->rbsp_buffer = av_mallocz(size);
if(!rbsp->rbsp_buffer)
goto fail;
rbsp->rbsp_buffer_alloc_size = size;
if(use_ref) {
rbsp->rbsp_buffer_ref = av_buffer_create(rbsp->rbsp_buffer, size,
NULL, NULL, 0);
if(!rbsp->rbsp_buffer_ref)
goto fail;
}
return;
fail:
rbsp->rbsp_buffer_alloc_size = 0;
if(rbsp->rbsp_buffer_ref) {
av_buffer_unref(&rbsp->rbsp_buffer_ref);
rbsp->rbsp_buffer = NULL;
}
else
av_freep(&rbsp->rbsp_buffer);
return;
}
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length,
void *logctx, int is_nalff, int nal_length_size,
enum AVCodecID codec_id, int small_padding, int use_ref) {
GetByteContext bc;
int consumed, ret = 0;
int next_avc = is_nalff ? 0 : length;
int64_t padding = small_padding ? 0 : MAX_MBPAIR_SIZE;
bytestream2_init(&bc, buf, length);
alloc_rbsp_buffer(&pkt->rbsp, length + padding, use_ref);
if(!pkt->rbsp.rbsp_buffer)
return AVERROR(ENOMEM);
pkt->rbsp.rbsp_buffer_size = 0;
pkt->nb_nals = 0;
while(bytestream2_get_bytes_left(&bc) >= 4) {
H2645NAL *nal;
int extract_length = 0;
int skip_trailing_zeros = 1;
if(bytestream2_tell(&bc) == next_avc) {
int i = 0;
extract_length = get_nalsize(nal_length_size,
bc.buffer, bytestream2_get_bytes_left(&bc), &i, logctx);
if(extract_length < 0)
return extract_length;
bytestream2_skip(&bc, nal_length_size);
next_avc = bytestream2_tell(&bc) + extract_length;
}
else {
int buf_index;
if(bytestream2_tell(&bc) > next_avc)
av_log(logctx, AV_LOG_WARNING, "Exceeded next NALFF position, re-syncing.\n");
/* search start code */
buf_index = find_next_start_code(bc.buffer, buf + next_avc);
bytestream2_skip(&bc, buf_index);
if(!bytestream2_get_bytes_left(&bc)) {
if(pkt->nb_nals > 0) {
// No more start codes: we discarded some irrelevant
// bytes at the end of the packet.
return 0;
}
else {
av_log(logctx, AV_LOG_ERROR, "No start code is found.\n");
return AVERROR_INVALIDDATA;
}
}
extract_length = FFMIN(bytestream2_get_bytes_left(&bc), next_avc - bytestream2_tell(&bc));
if(bytestream2_tell(&bc) >= next_avc) {
/* skip to the start of the next NAL */
bytestream2_skip(&bc, next_avc - bytestream2_tell(&bc));
continue;
}
}
if(pkt->nals_allocated < pkt->nb_nals + 1) {
int new_size = pkt->nals_allocated + 1;
void *tmp;
if(new_size >= INT_MAX / sizeof(*pkt->nals))
return AVERROR(ENOMEM);
tmp = av_fast_realloc(pkt->nals, &pkt->nal_buffer_size, new_size * sizeof(*pkt->nals));
if(!tmp)
return AVERROR(ENOMEM);
pkt->nals = tmp;
memset(pkt->nals + pkt->nals_allocated, 0, sizeof(*pkt->nals));
nal = &pkt->nals[pkt->nb_nals];
nal->skipped_bytes_pos_size = FFMIN(1024, extract_length / 3 + 1); // initial buffer size
nal->skipped_bytes_pos = av_malloc_array(nal->skipped_bytes_pos_size, sizeof(*nal->skipped_bytes_pos));
if(!nal->skipped_bytes_pos)
return AVERROR(ENOMEM);
pkt->nals_allocated = new_size;
}
nal = &pkt->nals[pkt->nb_nals];
consumed = ff_h2645_extract_rbsp(bc.buffer, extract_length, &pkt->rbsp, nal, small_padding);
if(consumed < 0)
return consumed;
if(is_nalff && (extract_length != consumed) && extract_length)
av_log(logctx, AV_LOG_DEBUG,
"NALFF: Consumed only %d bytes instead of %d\n",
consumed, extract_length);
bytestream2_skip(&bc, consumed);
/* see commit 3566042a0 */
if(bytestream2_get_bytes_left(&bc) >= 4 &&
bytestream2_peek_be32(&bc) == 0x000001E0)
skip_trailing_zeros = 0;
nal->size_bits = get_bit_length(nal, skip_trailing_zeros);
if(nal->size <= 0 || nal->size_bits <= 0)
continue;
ret = init_get_bits(&nal->gb, nal->data, nal->size_bits);
if(ret < 0)
return ret;
/* Reset type in case it contains a stale value from a previously parsed NAL */
nal->type = 0;
if(codec_id == AV_CODEC_ID_HEVC)
ret = hevc_parse_nal_header(nal, logctx);
else
ret = h264_parse_nal_header(nal, logctx);
if(ret < 0) {
av_log(logctx, AV_LOG_WARNING, "Invalid NAL unit %d, skipping.\n",
nal->type);
continue;
}
pkt->nb_nals++;
}
return 0;
}
void ff_h2645_packet_uninit(H2645Packet *pkt) {
int i;
for(i = 0; i < pkt->nals_allocated; i++) {
av_freep(&pkt->nals[i].skipped_bytes_pos);
}
av_freep(&pkt->nals);
pkt->nals_allocated = pkt->nal_buffer_size = 0;
if(pkt->rbsp.rbsp_buffer_ref) {
av_buffer_unref(&pkt->rbsp.rbsp_buffer_ref);
pkt->rbsp.rbsp_buffer = NULL;
}
else
av_freep(&pkt->rbsp.rbsp_buffer);
pkt->rbsp.rbsp_buffer_alloc_size = pkt->rbsp.rbsp_buffer_size = 0;
}

View File

@ -1,173 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* H.264 parameter set handling
*/
#ifndef AVCODEC_H264_PS_H
#define AVCODEC_H264_PS_H
#include <stdint.h>
#include <libavcodec/avcodec.h>
#include <libavutil/buffer.h>
#include <libavutil/pixfmt.h>
#include <libavutil/rational.h>
#include "cbs/h264.h"
#include "get_bits.h"
#define MAX_SPS_COUNT 32
#define MAX_PPS_COUNT 256
#define MAX_LOG2_MAX_FRAME_NUM (12 + 4)
/**
* Sequence parameter set
*/
typedef struct SPS {
unsigned int sps_id;
int profile_idc;
int level_idc;
int chroma_format_idc;
int transform_bypass; ///< qpprime_y_zero_transform_bypass_flag
int log2_max_frame_num; ///< log2_max_frame_num_minus4 + 4
int poc_type; ///< pic_order_cnt_type
int log2_max_poc_lsb; ///< log2_max_pic_order_cnt_lsb_minus4
int delta_pic_order_always_zero_flag;
int offset_for_non_ref_pic;
int offset_for_top_to_bottom_field;
int poc_cycle_length; ///< num_ref_frames_in_pic_order_cnt_cycle
int ref_frame_count; ///< num_ref_frames
int gaps_in_frame_num_allowed_flag;
int mb_width; ///< pic_width_in_mbs_minus1 + 1
///< (pic_height_in_map_units_minus1 + 1) * (2 - frame_mbs_only_flag)
int mb_height;
int frame_mbs_only_flag;
int mb_aff; ///< mb_adaptive_frame_field_flag
int direct_8x8_inference_flag;
int crop; ///< frame_cropping_flag
/* those 4 are already in luma samples */
unsigned int crop_left; ///< frame_cropping_rect_left_offset
unsigned int crop_right; ///< frame_cropping_rect_right_offset
unsigned int crop_top; ///< frame_cropping_rect_top_offset
unsigned int crop_bottom; ///< frame_cropping_rect_bottom_offset
int vui_parameters_present_flag;
AVRational sar;
int video_signal_type_present_flag;
int full_range;
int colour_description_present_flag;
enum AVColorPrimaries color_primaries;
enum AVColorTransferCharacteristic color_trc;
enum AVColorSpace colorspace;
enum AVChromaLocation chroma_location;
int timing_info_present_flag;
uint32_t num_units_in_tick;
uint32_t time_scale;
int fixed_frame_rate_flag;
int32_t offset_for_ref_frame[256];
int bitstream_restriction_flag;
int num_reorder_frames;
int scaling_matrix_present;
uint8_t scaling_matrix4[6][16];
uint8_t scaling_matrix8[6][64];
int nal_hrd_parameters_present_flag;
int vcl_hrd_parameters_present_flag;
int pic_struct_present_flag;
int time_offset_length;
int cpb_cnt; ///< See H.264 E.1.2
int initial_cpb_removal_delay_length; ///< initial_cpb_removal_delay_length_minus1 + 1
int cpb_removal_delay_length; ///< cpb_removal_delay_length_minus1 + 1
int dpb_output_delay_length; ///< dpb_output_delay_length_minus1 + 1
int bit_depth_luma; ///< bit_depth_luma_minus8 + 8
int bit_depth_chroma; ///< bit_depth_chroma_minus8 + 8
int residual_color_transform_flag; ///< residual_colour_transform_flag
int constraint_set_flags; ///< constraint_set[0-3]_flag
uint8_t data[4096];
size_t data_size;
} SPS;
/**
* Picture parameter set
*/
typedef struct PPS {
unsigned int sps_id;
int cabac; ///< entropy_coding_mode_flag
int pic_order_present; ///< pic_order_present_flag
int slice_group_count; ///< num_slice_groups_minus1 + 1
int mb_slice_group_map_type;
unsigned int ref_count[2]; ///< num_ref_idx_l0/1_active_minus1 + 1
int weighted_pred; ///< weighted_pred_flag
int weighted_bipred_idc;
int init_qp; ///< pic_init_qp_minus26 + 26
int init_qs; ///< pic_init_qs_minus26 + 26
int chroma_qp_index_offset[2];
int deblocking_filter_parameters_present; ///< deblocking_filter_parameters_present_flag
int constrained_intra_pred; ///< constrained_intra_pred_flag
int redundant_pic_cnt_present; ///< redundant_pic_cnt_present_flag
int transform_8x8_mode; ///< transform_8x8_mode_flag
uint8_t scaling_matrix4[6][16];
uint8_t scaling_matrix8[6][64];
uint8_t chroma_qp_table[2][QP_MAX_NUM + 1]; ///< pre-scaled (with chroma_qp_index_offset) version of qp_table
int chroma_qp_diff;
uint8_t data[4096];
size_t data_size;
uint32_t dequant4_buffer[6][QP_MAX_NUM + 1][16];
uint32_t dequant8_buffer[6][QP_MAX_NUM + 1][64];
uint32_t (*dequant4_coeff[6])[16];
uint32_t (*dequant8_coeff[6])[64];
AVBufferRef *sps_ref;
const SPS *sps;
} PPS;
typedef struct H264ParamSets {
AVBufferRef *sps_list[MAX_SPS_COUNT];
AVBufferRef *pps_list[MAX_PPS_COUNT];
AVBufferRef *pps_ref;
/* currently active parameters sets */
const PPS *pps;
const SPS *sps;
int overread_warning_printed[2];
} H264ParamSets;
/**
* Decode SPS
*/
int ff_h264_decode_seq_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
H264ParamSets *ps, int ignore_truncation);
/**
* Decode PPS
*/
int ff_h264_decode_picture_parameter_set(GetBitContext *gb, AVCodecContext *avctx,
H264ParamSets *ps, int bit_length);
/**
* Uninit H264 param sets structure.
*/
void ff_h264_ps_uninit(H264ParamSets *ps);
#endif /* AVCODEC_H264_PS_H */

View File

@ -1,202 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_H264_SEI_H
#define AVCODEC_H264_SEI_H
#include "cbs/sei.h"
#include "get_bits.h"
#include "h264_ps.h"
/**
* pic_struct in picture timing SEI message
*/
typedef enum {
H264_SEI_PIC_STRUCT_FRAME = 0, ///< 0: %frame
H264_SEI_PIC_STRUCT_TOP_FIELD = 1, ///< 1: top field
H264_SEI_PIC_STRUCT_BOTTOM_FIELD = 2, ///< 2: bottom field
H264_SEI_PIC_STRUCT_TOP_BOTTOM = 3, ///< 3: top field, bottom field, in that order
H264_SEI_PIC_STRUCT_BOTTOM_TOP = 4, ///< 4: bottom field, top field, in that order
H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP = 5, ///< 5: top field, bottom field, top field repeated, in that order
H264_SEI_PIC_STRUCT_BOTTOM_TOP_BOTTOM = 6, ///< 6: bottom field, top field, bottom field repeated, in that order
H264_SEI_PIC_STRUCT_FRAME_DOUBLING = 7, ///< 7: %frame doubling
H264_SEI_PIC_STRUCT_FRAME_TRIPLING = 8 ///< 8: %frame tripling
} H264_SEI_PicStructType;
/**
* frame_packing_arrangement types
*/
typedef enum {
H264_SEI_FPA_TYPE_CHECKERBOARD = 0,
H264_SEI_FPA_TYPE_INTERLEAVE_COLUMN = 1,
H264_SEI_FPA_TYPE_INTERLEAVE_ROW = 2,
H264_SEI_FPA_TYPE_SIDE_BY_SIDE = 3,
H264_SEI_FPA_TYPE_TOP_BOTTOM = 4,
H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL = 5,
H264_SEI_FPA_TYPE_2D = 6,
} H264_SEI_FpaType;
typedef struct H264SEITimeCode {
/* When not continuously receiving full timecodes, we have to reference
the previous timecode received */
int full;
int frame;
int seconds;
int minutes;
int hours;
int dropframe;
} H264SEITimeCode;
typedef struct H264SEIPictureTiming {
// maximum size of pic_timing according to the spec should be 274 bits
uint8_t payload[40];
int payload_size_bits;
int present;
H264_SEI_PicStructType pic_struct;
/**
* Bit set of clock types for fields/frames in picture timing SEI message.
* For each found ct_type, appropriate bit is set (e.g., bit 1 for
* interlaced).
*/
int ct_type;
/**
* dpb_output_delay in picture timing SEI message, see H.264 C.2.2
*/
int dpb_output_delay;
/**
* cpb_removal_delay in picture timing SEI message, see H.264 C.1.2
*/
int cpb_removal_delay;
/**
* Maximum three timecodes in a pic_timing SEI.
*/
H264SEITimeCode timecode[3];
/**
* Number of timecode in use
*/
int timecode_cnt;
} H264SEIPictureTiming;
typedef struct H264SEIAFD {
int present;
uint8_t active_format_description;
} H264SEIAFD;
typedef struct H264SEIA53Caption {
AVBufferRef *buf_ref;
} H264SEIA53Caption;
typedef struct H264SEIUnregistered {
int x264_build;
AVBufferRef **buf_ref;
int nb_buf_ref;
} H264SEIUnregistered;
typedef struct H264SEIRecoveryPoint {
/**
* recovery_frame_cnt
*
* Set to -1 if no recovery point SEI message found or to number of frames
* before playback synchronizes. Frames having recovery point are key
* frames.
*/
int recovery_frame_cnt;
} H264SEIRecoveryPoint;
typedef struct H264SEIBufferingPeriod {
int present; ///< Buffering period SEI flag
int initial_cpb_removal_delay[32]; ///< Initial timestamps for CPBs
} H264SEIBufferingPeriod;
typedef struct H264SEIFramePacking {
int present;
int arrangement_id;
int arrangement_cancel_flag; ///< is previous arrangement canceled, -1 if never received
H264_SEI_FpaType arrangement_type;
int arrangement_repetition_period;
int content_interpretation_type;
int quincunx_sampling_flag;
int current_frame_is_frame0_flag;
} H264SEIFramePacking;
typedef struct H264SEIDisplayOrientation {
int present;
int anticlockwise_rotation;
int hflip, vflip;
} H264SEIDisplayOrientation;
typedef struct H264SEIGreenMetaData {
uint8_t green_metadata_type;
uint8_t period_type;
uint16_t num_seconds;
uint16_t num_pictures;
uint8_t percent_non_zero_macroblocks;
uint8_t percent_intra_coded_macroblocks;
uint8_t percent_six_tap_filtering;
uint8_t percent_alpha_point_deblocking_instance;
uint8_t xsd_metric_type;
uint16_t xsd_metric_value;
} H264SEIGreenMetaData;
typedef struct H264SEIAlternativeTransfer {
int present;
int preferred_transfer_characteristics;
} H264SEIAlternativeTransfer;
typedef struct H264SEIContext {
H264SEIPictureTiming picture_timing;
H264SEIAFD afd;
H264SEIA53Caption a53_caption;
H264SEIUnregistered unregistered;
H264SEIRecoveryPoint recovery_point;
H264SEIBufferingPeriod buffering_period;
H264SEIFramePacking frame_packing;
H264SEIDisplayOrientation display_orientation;
H264SEIGreenMetaData green_metadata;
H264SEIAlternativeTransfer alternative_transfer;
} H264SEIContext;
struct H264ParamSets;
int ff_h264_sei_decode(H264SEIContext *h, GetBitContext *gb,
const struct H264ParamSets *ps, void *logctx);
/**
* Reset SEI values at the beginning of the frame.
*/
void ff_h264_sei_uninit(H264SEIContext *h);
/**
* Get stereo_mode string from the h264 frame_packing_arrangement
*/
const char *ff_h264_sei_stereo_mode(const H264SEIFramePacking *h);
/**
* Parse the contents of a picture timing message given an active SPS.
*/
int ff_h264_sei_process_picture_timing(H264SEIPictureTiming *h, const SPS *sps,
void *logctx);
#endif /* AVCODEC_H264_SEI_H */

View File

@ -1,142 +0,0 @@
/*
* HEVC Supplementary Enhancement Information messages
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_HEVC_SEI_H
#define AVCODEC_HEVC_SEI_H
#include <stdint.h>
#include <libavutil/buffer.h>
#include "cbs/sei.h"
#include "get_bits.h"
typedef enum {
HEVC_SEI_PIC_STRUCT_FRAME_DOUBLING = 7,
HEVC_SEI_PIC_STRUCT_FRAME_TRIPLING = 8
} HEVC_SEI_PicStructType;
typedef struct HEVCSEIPictureHash {
uint8_t md5[3][16];
uint8_t is_md5;
} HEVCSEIPictureHash;
typedef struct HEVCSEIFramePacking {
int present;
int arrangement_type;
int content_interpretation_type;
int quincunx_subsampling;
int current_frame_is_frame0_flag;
} HEVCSEIFramePacking;
typedef struct HEVCSEIDisplayOrientation {
int present;
int anticlockwise_rotation;
int hflip, vflip;
} HEVCSEIDisplayOrientation;
typedef struct HEVCSEIPictureTiming {
int picture_struct;
} HEVCSEIPictureTiming;
typedef struct HEVCSEIA53Caption {
AVBufferRef *buf_ref;
} HEVCSEIA53Caption;
typedef struct HEVCSEIUnregistered {
AVBufferRef **buf_ref;
int nb_buf_ref;
} HEVCSEIUnregistered;
typedef struct HEVCSEIMasteringDisplay {
int present;
uint16_t display_primaries[3][2];
uint16_t white_point[2];
uint32_t max_luminance;
uint32_t min_luminance;
} HEVCSEIMasteringDisplay;
typedef struct HEVCSEIDynamicHDRPlus {
AVBufferRef *info;
} HEVCSEIDynamicHDRPlus;
typedef struct HEVCSEIContentLight {
int present;
uint16_t max_content_light_level;
uint16_t max_pic_average_light_level;
} HEVCSEIContentLight;
typedef struct HEVCSEIAlternativeTransfer {
int present;
int preferred_transfer_characteristics;
} HEVCSEIAlternativeTransfer;
typedef struct HEVCSEITimeCode {
int present;
uint8_t num_clock_ts;
uint8_t clock_timestamp_flag[3];
uint8_t units_field_based_flag[3];
uint8_t counting_type[3];
uint8_t full_timestamp_flag[3];
uint8_t discontinuity_flag[3];
uint8_t cnt_dropped_flag[3];
uint16_t n_frames[3];
uint8_t seconds_value[3];
uint8_t minutes_value[3];
uint8_t hours_value[3];
uint8_t seconds_flag[3];
uint8_t minutes_flag[3];
uint8_t hours_flag[3];
uint8_t time_offset_length[3];
int32_t time_offset_value[3];
} HEVCSEITimeCode;
typedef struct HEVCSEI {
HEVCSEIPictureHash picture_hash;
HEVCSEIFramePacking frame_packing;
HEVCSEIDisplayOrientation display_orientation;
HEVCSEIPictureTiming picture_timing;
HEVCSEIA53Caption a53_caption;
HEVCSEIUnregistered unregistered;
HEVCSEIMasteringDisplay mastering_display;
HEVCSEIDynamicHDRPlus dynamic_hdr_plus;
HEVCSEIContentLight content_light;
int active_seq_parameter_set_id;
HEVCSEIAlternativeTransfer alternative_transfer;
HEVCSEITimeCode timecode;
} HEVCSEI;
struct HEVCParamSets;
int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s,
const struct HEVCParamSets *ps, int type);
/**
* Reset SEI values that are stored on the Context.
* e.g. Caption data that was extracted during NAL
* parsing.
*
* @param s HEVCContext.
*/
void ff_hevc_reset_sei(HEVCSEI *s);
#endif /* AVCODEC_HEVC_SEI_H */

View File

@ -1,171 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* AV1 common definitions
*/
#ifndef AVCODEC_AV1_H
#define AVCODEC_AV1_H
// OBU types (section 6.2.2).
typedef enum {
// 0 reserved.
AV1_OBU_SEQUENCE_HEADER = 1,
AV1_OBU_TEMPORAL_DELIMITER = 2,
AV1_OBU_FRAME_HEADER = 3,
AV1_OBU_TILE_GROUP = 4,
AV1_OBU_METADATA = 5,
AV1_OBU_FRAME = 6,
AV1_OBU_REDUNDANT_FRAME_HEADER = 7,
AV1_OBU_TILE_LIST = 8,
// 9-14 reserved.
AV1_OBU_PADDING = 15,
} AV1_OBU_Type;
// Metadata types (section 6.7.1).
enum {
AV1_METADATA_TYPE_HDR_CLL = 1,
AV1_METADATA_TYPE_HDR_MDCV = 2,
AV1_METADATA_TYPE_SCALABILITY = 3,
AV1_METADATA_TYPE_ITUT_T35 = 4,
AV1_METADATA_TYPE_TIMECODE = 5,
};
// Frame types (section 6.8.2).
enum {
AV1_FRAME_KEY = 0,
AV1_FRAME_INTER = 1,
AV1_FRAME_INTRA_ONLY = 2,
AV1_FRAME_SWITCH = 3,
};
// Reference frames (section 6.10.24).
enum {
AV1_REF_FRAME_INTRA = 0,
AV1_REF_FRAME_LAST = 1,
AV1_REF_FRAME_LAST2 = 2,
AV1_REF_FRAME_LAST3 = 3,
AV1_REF_FRAME_GOLDEN = 4,
AV1_REF_FRAME_BWDREF = 5,
AV1_REF_FRAME_ALTREF2 = 6,
AV1_REF_FRAME_ALTREF = 7,
};
// Constants (section 3).
enum {
AV1_MAX_OPERATING_POINTS = 32,
AV1_MAX_SB_SIZE = 128,
AV1_MI_SIZE = 4,
AV1_MAX_TILE_WIDTH = 4096,
AV1_MAX_TILE_AREA = 4096 * 2304,
AV1_MAX_TILE_ROWS = 64,
AV1_MAX_TILE_COLS = 64,
AV1_NUM_REF_FRAMES = 8,
AV1_REFS_PER_FRAME = 7,
AV1_TOTAL_REFS_PER_FRAME = 8,
AV1_PRIMARY_REF_NONE = 7,
AV1_MAX_SEGMENTS = 8,
AV1_SEG_LVL_MAX = 8,
AV1_SEG_LVL_ALT_Q = 0,
AV1_SEG_LVL_ALT_LF_Y_V = 1,
AV1_SEG_LVL_REF_FRAME = 5,
AV1_SEG_LVL_SKIP = 6,
AV1_SEG_LVL_GLOBAL_MV = 7,
AV1_SELECT_SCREEN_CONTENT_TOOLS = 2,
AV1_SELECT_INTEGER_MV = 2,
AV1_SUPERRES_NUM = 8,
AV1_SUPERRES_DENOM_MIN = 9,
AV1_INTERPOLATION_FILTER_SWITCHABLE = 4,
AV1_GM_ABS_ALPHA_BITS = 12,
AV1_GM_ALPHA_PREC_BITS = 15,
AV1_GM_ABS_TRANS_ONLY_BITS = 9,
AV1_GM_TRANS_ONLY_PREC_BITS = 3,
AV1_GM_ABS_TRANS_BITS = 12,
AV1_GM_TRANS_PREC_BITS = 6,
AV1_WARPEDMODEL_PREC_BITS = 16,
AV1_WARP_MODEL_IDENTITY = 0,
AV1_WARP_MODEL_TRANSLATION = 1,
AV1_WARP_MODEL_ROTZOOM = 2,
AV1_WARP_MODEL_AFFINE = 3,
};
// The main colour configuration information uses the same ISO/IEC 23001-8
// (H.273) enums as FFmpeg does, so separate definitions are not required.
// Chroma sample position.
enum {
AV1_CSP_UNKNOWN = 0,
AV1_CSP_VERTICAL = 1, // -> AVCHROMA_LOC_LEFT.
AV1_CSP_COLOCATED = 2, // -> AVCHROMA_LOC_TOPLEFT.
};
// Scalability modes (section 6.7.5)
enum {
AV1_SCALABILITY_L1T2 = 0,
AV1_SCALABILITY_L1T3 = 1,
AV1_SCALABILITY_L2T1 = 2,
AV1_SCALABILITY_L2T2 = 3,
AV1_SCALABILITY_L2T3 = 4,
AV1_SCALABILITY_S2T1 = 5,
AV1_SCALABILITY_S2T2 = 6,
AV1_SCALABILITY_S2T3 = 7,
AV1_SCALABILITY_L2T1h = 8,
AV1_SCALABILITY_L2T2h = 9,
AV1_SCALABILITY_L2T3h = 10,
AV1_SCALABILITY_S2T1h = 11,
AV1_SCALABILITY_S2T2h = 12,
AV1_SCALABILITY_S2T3h = 13,
AV1_SCALABILITY_SS = 14,
AV1_SCALABILITY_L3T1 = 15,
AV1_SCALABILITY_L3T2 = 16,
AV1_SCALABILITY_L3T3 = 17,
AV1_SCALABILITY_S3T1 = 18,
AV1_SCALABILITY_S3T2 = 19,
AV1_SCALABILITY_S3T3 = 20,
AV1_SCALABILITY_L3T2_KEY = 21,
AV1_SCALABILITY_L3T3_KEY = 22,
AV1_SCALABILITY_L4T5_KEY = 23,
AV1_SCALABILITY_L4T7_KEY = 24,
AV1_SCALABILITY_L3T2_KEY_SHIFT = 25,
AV1_SCALABILITY_L3T3_KEY_SHIFT = 26,
AV1_SCALABILITY_L4T5_KEY_SHIFT = 27,
AV1_SCALABILITY_L4T7_KEY_SHIFT = 28,
};
// Frame Restoration types (section 6.10.15)
enum {
AV1_RESTORE_NONE = 0,
AV1_RESTORE_WIENER = 1,
AV1_RESTORE_SGRPROJ = 2,
AV1_RESTORE_SWITCHABLE = 3,
};
#endif /* AVCODEC_AV1_H */

View File

@ -1,448 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_CBS_H
#define AVCODEC_CBS_H
#include <stddef.h>
#include <stdint.h>
#include <libavutil/buffer.h>
#include <libavcodec/avcodec.h>
/*
* This defines a framework for converting between a coded bitstream
* and structures defining all individual syntax elements found in
* such a stream.
*
* Conversion in both directions is possible. Given a coded bitstream
* (any meaningful fragment), it can be parsed and decomposed into
* syntax elements stored in a set of codec-specific structures.
* Similarly, given a set of those same codec-specific structures the
* syntax elements can be serialised and combined to create a coded
* bitstream.
*/
struct CodedBitstreamType;
/**
* The codec-specific type of a bitstream unit.
*
* AV1: obu_type
* H.264 / AVC: nal_unit_type
* H.265 / HEVC: nal_unit_type
* JPEG: marker value (without 0xff prefix)
* MPEG-2: start code value (without prefix)
* VP9: unused, set to zero (every unit is a frame)
*/
typedef uint32_t CodedBitstreamUnitType;
/**
* Coded bitstream unit structure.
*
* A bitstream unit the smallest element of a bitstream which
* is meaningful on its own. For example, an H.264 NAL unit.
*
* See the codec-specific header for the meaning of this for any
* particular codec.
*/
typedef struct CodedBitstreamUnit {
/**
* Codec-specific type of this unit.
*/
CodedBitstreamUnitType type;
/**
* Pointer to the directly-parsable bitstream form of this unit.
*
* May be NULL if the unit currently only exists in decomposed form.
*/
uint8_t *data;
/**
* The number of bytes in the bitstream (including any padding bits
* in the final byte).
*/
size_t data_size;
/**
* The number of bits which should be ignored in the final byte.
*
* This supports non-byte-aligned bitstreams.
*/
size_t data_bit_padding;
/**
* A reference to the buffer containing data.
*
* Must be set if data is not NULL.
*/
AVBufferRef *data_ref;
/**
* Pointer to the decomposed form of this unit.
*
* The type of this structure depends on both the codec and the
* type of this unit. May be NULL if the unit only exists in
* bitstream form.
*/
void *content;
/**
* If content is reference counted, a reference to the buffer containing
* content. Null if content is not reference counted.
*/
AVBufferRef *content_ref;
} CodedBitstreamUnit;
/**
* Coded bitstream fragment structure, combining one or more units.
*
* This is any sequence of units. It need not form some greater whole,
* though in many cases it will. For example, an H.264 access unit,
* which is composed of a sequence of H.264 NAL units.
*/
typedef struct CodedBitstreamFragment {
/**
* Pointer to the bitstream form of this fragment.
*
* May be NULL if the fragment only exists as component units.
*/
uint8_t *data;
/**
* The number of bytes in the bitstream.
*
* The number of bytes in the bitstream (including any padding bits
* in the final byte).
*/
size_t data_size;
/**
* The number of bits which should be ignored in the final byte.
*/
size_t data_bit_padding;
/**
* A reference to the buffer containing data.
*
* Must be set if data is not NULL.
*/
AVBufferRef *data_ref;
/**
* Number of units in this fragment.
*
* This may be zero if the fragment only exists in bitstream form
* and has not been decomposed.
*/
int nb_units;
/**
* Number of allocated units.
*
* Must always be >= nb_units; designed for internal use by cbs.
*/
int nb_units_allocated;
/**
* Pointer to an array of units of length nb_units_allocated.
* Only the first nb_units are valid.
*
* Must be NULL if nb_units_allocated is zero.
*/
CodedBitstreamUnit *units;
} CodedBitstreamFragment;
/**
* Context structure for coded bitstream operations.
*/
typedef struct CodedBitstreamContext {
/**
* Logging context to be passed to all av_log() calls associated
* with this context.
*/
void *log_ctx;
/**
* Internal codec-specific hooks.
*/
const struct CodedBitstreamType *codec;
/**
* Internal codec-specific data.
*
* This contains any information needed when reading/writing
* bitsteams which will not necessarily be present in a fragment.
* For example, for H.264 it contains all currently visible
* parameter sets - they are required to determine the bitstream
* syntax but need not be present in every access unit.
*/
void *priv_data;
/**
* Array of unit types which should be decomposed when reading.
*
* Types not in this list will be available in bitstream form only.
* If NULL, all supported types will be decomposed.
*/
const CodedBitstreamUnitType *decompose_unit_types;
/**
* Length of the decompose_unit_types array.
*/
int nb_decompose_unit_types;
/**
* Enable trace output during read/write operations.
*/
int trace_enable;
/**
* Log level to use for trace output.
*
* From AV_LOG_*; defaults to AV_LOG_TRACE.
*/
int trace_level;
/**
* Write buffer. Used as intermediate buffer when writing units.
* For internal use of cbs only.
*/
uint8_t *write_buffer;
size_t write_buffer_size;
} CodedBitstreamContext;
/**
* Table of all supported codec IDs.
*
* Terminated by AV_CODEC_ID_NONE.
*/
extern const enum AVCodecID ff_cbs_all_codec_ids[];
/**
* Create and initialise a new context for the given codec.
*/
int ff_cbs_init(CodedBitstreamContext **ctx,
enum AVCodecID codec_id, void *log_ctx);
/**
* Reset all internal state in a context.
*/
void ff_cbs_flush(CodedBitstreamContext *ctx);
/**
* Close a context and free all internal state.
*/
void ff_cbs_close(CodedBitstreamContext **ctx);
/**
* Read the extradata bitstream found in codec parameters into a
* fragment, then split into units and decompose.
*
* This also updates the internal state, so will need to be called for
* codecs with extradata to read parameter sets necessary for further
* parsing even if the fragment itself is not desired.
*
* The fragment must have been zeroed or reset via ff_cbs_fragment_reset
* before use.
*/
int ff_cbs_read_extradata(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag,
const AVCodecParameters *par);
/**
* Read the extradata bitstream found in a codec context into a
* fragment, then split into units and decompose.
*
* This acts identical to ff_cbs_read_extradata() for the case where
* you already have a codec context.
*/
int ff_cbs_read_extradata_from_codec(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag,
const AVCodecContext *avctx);
/**
* Read the data bitstream from a packet into a fragment, then
* split into units and decompose.
*
* This also updates the internal state of the coded bitstream context
* with any persistent data from the fragment which may be required to
* read following fragments (e.g. parameter sets).
*
* The fragment must have been zeroed or reset via ff_cbs_fragment_reset
* before use.
*/
int ff_cbs_read_packet(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag,
const AVPacket *pkt);
/**
* Read a bitstream from a memory region into a fragment, then
* split into units and decompose.
*
* This also updates the internal state of the coded bitstream context
* with any persistent data from the fragment which may be required to
* read following fragments (e.g. parameter sets).
*
* The fragment must have been zeroed or reset via ff_cbs_fragment_reset
* before use.
*/
int ff_cbs_read(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag,
const uint8_t *data, size_t size);
/**
* Write the content of the fragment to its own internal buffer.
*
* Writes the content of all units and then assembles them into a new
* data buffer. When modifying the content of decomposed units, this
* can be used to regenerate the bitstream form of units or the whole
* fragment so that it can be extracted for other use.
*
* This also updates the internal state of the coded bitstream context
* with any persistent data from the fragment which may be required to
* write following fragments (e.g. parameter sets).
*/
int ff_cbs_write_fragment_data(CodedBitstreamContext *ctx,
CodedBitstreamFragment *frag);
/**
* Write the bitstream of a fragment to the extradata in codec parameters.
*
* Modifies context and fragment as ff_cbs_write_fragment_data does and
* replaces any existing extradata in the structure.
*/
int ff_cbs_write_extradata(CodedBitstreamContext *ctx,
AVCodecParameters *par,
CodedBitstreamFragment *frag);
/**
* Write the bitstream of a fragment to a packet.
*
* Modifies context and fragment as ff_cbs_write_fragment_data does.
*
* On success, the packet's buf is unreferenced and its buf, data and
* size fields are set to the corresponding values from the newly updated
* fragment; other fields are not touched. On failure, the packet is not
* touched at all.
*/
int ff_cbs_write_packet(CodedBitstreamContext *ctx,
AVPacket *pkt,
CodedBitstreamFragment *frag);
/**
* Free the units contained in a fragment as well as the fragment's
* own data buffer, but not the units array itself.
*/
void ff_cbs_fragment_reset(CodedBitstreamFragment *frag);
/**
* Free the units array of a fragment in addition to what
* ff_cbs_fragment_reset does.
*/
void ff_cbs_fragment_free(CodedBitstreamFragment *frag);
/**
* Allocate a new internal content buffer of the given size in the unit.
*
* The content will be zeroed.
*/
int ff_cbs_alloc_unit_content(CodedBitstreamUnit *unit,
size_t size,
void (*free)(void *opaque, uint8_t *content));
/**
* Allocate a new internal content buffer matching the type of the unit.
*
* The content will be zeroed.
*/
int ff_cbs_alloc_unit_content2(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit);
/**
* Allocate a new internal data buffer of the given size in the unit.
*
* The data buffer will have input padding.
*/
int ff_cbs_alloc_unit_data(CodedBitstreamUnit *unit,
size_t size);
/**
* Insert a new unit into a fragment with the given content.
*
* The content structure continues to be owned by the caller if
* content_buf is not supplied.
*/
int ff_cbs_insert_unit_content(CodedBitstreamFragment *frag,
int position,
CodedBitstreamUnitType type,
void *content,
AVBufferRef *content_buf);
/**
* Insert a new unit into a fragment with the given data bitstream.
*
* If data_buf is not supplied then data must have been allocated with
* av_malloc() and will on success become owned by the unit after this
* call or freed on error.
*/
int ff_cbs_insert_unit_data(CodedBitstreamFragment *frag,
int position,
CodedBitstreamUnitType type,
uint8_t *data, size_t data_size,
AVBufferRef *data_buf);
/**
* Delete a unit from a fragment and free all memory it uses.
*
* Requires position to be >= 0 and < frag->nb_units.
*/
void ff_cbs_delete_unit(CodedBitstreamFragment *frag,
int position);
/**
* Make the content of a unit refcounted.
*
* If the unit is not refcounted, this will do a deep copy of the unit
* content to new refcounted buffers.
*
* It is not valid to call this function on a unit which does not have
* decomposed content.
*/
int ff_cbs_make_unit_refcounted(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit);
/**
* Make the content of a unit writable so that internal fields can be
* modified.
*
* If it is known that there are no other references to the content of
* the unit, does nothing and returns success. Otherwise (including the
* case where the unit content is not refcounted), it does a full clone
* of the content (including any internal buffers) to make a new copy,
* and replaces the existing references inside the unit with that.
*
* It is not valid to call this function on a unit which does not have
* decomposed content.
*/
int ff_cbs_make_unit_writable(CodedBitstreamContext *ctx,
CodedBitstreamUnit *unit);
#endif /* AVCODEC_CBS_H */

View File

@ -1,464 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_CBS_AV1_H
#define AVCODEC_CBS_AV1_H
#include <stddef.h>
#include <stdint.h>
#include "av1.h"
#include "cbs.h"
typedef struct AV1RawOBUHeader {
uint8_t obu_forbidden_bit;
uint8_t obu_type;
uint8_t obu_extension_flag;
uint8_t obu_has_size_field;
uint8_t obu_reserved_1bit;
uint8_t temporal_id;
uint8_t spatial_id;
uint8_t extension_header_reserved_3bits;
} AV1RawOBUHeader;
typedef struct AV1RawColorConfig {
uint8_t high_bitdepth;
uint8_t twelve_bit;
uint8_t mono_chrome;
uint8_t color_description_present_flag;
uint8_t color_primaries;
uint8_t transfer_characteristics;
uint8_t matrix_coefficients;
uint8_t color_range;
uint8_t subsampling_x;
uint8_t subsampling_y;
uint8_t chroma_sample_position;
uint8_t separate_uv_delta_q;
} AV1RawColorConfig;
typedef struct AV1RawTimingInfo {
uint32_t num_units_in_display_tick;
uint32_t time_scale;
uint8_t equal_picture_interval;
uint32_t num_ticks_per_picture_minus_1;
} AV1RawTimingInfo;
typedef struct AV1RawDecoderModelInfo {
uint8_t buffer_delay_length_minus_1;
uint32_t num_units_in_decoding_tick;
uint8_t buffer_removal_time_length_minus_1;
uint8_t frame_presentation_time_length_minus_1;
} AV1RawDecoderModelInfo;
typedef struct AV1RawSequenceHeader {
uint8_t seq_profile;
uint8_t still_picture;
uint8_t reduced_still_picture_header;
uint8_t timing_info_present_flag;
uint8_t decoder_model_info_present_flag;
uint8_t initial_display_delay_present_flag;
uint8_t operating_points_cnt_minus_1;
AV1RawTimingInfo timing_info;
AV1RawDecoderModelInfo decoder_model_info;
uint16_t operating_point_idc[AV1_MAX_OPERATING_POINTS];
uint8_t seq_level_idx[AV1_MAX_OPERATING_POINTS];
uint8_t seq_tier[AV1_MAX_OPERATING_POINTS];
uint8_t decoder_model_present_for_this_op[AV1_MAX_OPERATING_POINTS];
uint32_t decoder_buffer_delay[AV1_MAX_OPERATING_POINTS];
uint32_t encoder_buffer_delay[AV1_MAX_OPERATING_POINTS];
uint8_t low_delay_mode_flag[AV1_MAX_OPERATING_POINTS];
uint8_t initial_display_delay_present_for_this_op[AV1_MAX_OPERATING_POINTS];
uint8_t initial_display_delay_minus_1[AV1_MAX_OPERATING_POINTS];
uint8_t frame_width_bits_minus_1;
uint8_t frame_height_bits_minus_1;
uint16_t max_frame_width_minus_1;
uint16_t max_frame_height_minus_1;
uint8_t frame_id_numbers_present_flag;
uint8_t delta_frame_id_length_minus_2;
uint8_t additional_frame_id_length_minus_1;
uint8_t use_128x128_superblock;
uint8_t enable_filter_intra;
uint8_t enable_intra_edge_filter;
uint8_t enable_interintra_compound;
uint8_t enable_masked_compound;
uint8_t enable_warped_motion;
uint8_t enable_dual_filter;
uint8_t enable_order_hint;
uint8_t enable_jnt_comp;
uint8_t enable_ref_frame_mvs;
uint8_t seq_choose_screen_content_tools;
uint8_t seq_force_screen_content_tools;
uint8_t seq_choose_integer_mv;
uint8_t seq_force_integer_mv;
uint8_t order_hint_bits_minus_1;
uint8_t enable_superres;
uint8_t enable_cdef;
uint8_t enable_restoration;
AV1RawColorConfig color_config;
uint8_t film_grain_params_present;
} AV1RawSequenceHeader;
typedef struct AV1RawFilmGrainParams {
uint8_t apply_grain;
uint16_t grain_seed;
uint8_t update_grain;
uint8_t film_grain_params_ref_idx;
uint8_t num_y_points;
uint8_t point_y_value[14];
uint8_t point_y_scaling[14];
uint8_t chroma_scaling_from_luma;
uint8_t num_cb_points;
uint8_t point_cb_value[10];
uint8_t point_cb_scaling[10];
uint8_t num_cr_points;
uint8_t point_cr_value[10];
uint8_t point_cr_scaling[10];
uint8_t grain_scaling_minus_8;
uint8_t ar_coeff_lag;
uint8_t ar_coeffs_y_plus_128[24];
uint8_t ar_coeffs_cb_plus_128[25];
uint8_t ar_coeffs_cr_plus_128[25];
uint8_t ar_coeff_shift_minus_6;
uint8_t grain_scale_shift;
uint8_t cb_mult;
uint8_t cb_luma_mult;
uint16_t cb_offset;
uint8_t cr_mult;
uint8_t cr_luma_mult;
uint16_t cr_offset;
uint8_t overlap_flag;
uint8_t clip_to_restricted_range;
} AV1RawFilmGrainParams;
typedef struct AV1RawFrameHeader {
uint8_t show_existing_frame;
uint8_t frame_to_show_map_idx;
uint32_t frame_presentation_time;
uint32_t display_frame_id;
uint8_t frame_type;
uint8_t show_frame;
uint8_t showable_frame;
uint8_t error_resilient_mode;
uint8_t disable_cdf_update;
uint8_t allow_screen_content_tools;
uint8_t force_integer_mv;
uint32_t current_frame_id;
uint8_t frame_size_override_flag;
uint8_t order_hint;
uint8_t buffer_removal_time_present_flag;
uint32_t buffer_removal_time[AV1_MAX_OPERATING_POINTS];
uint8_t primary_ref_frame;
uint16_t frame_width_minus_1;
uint16_t frame_height_minus_1;
uint8_t use_superres;
uint8_t coded_denom;
uint8_t render_and_frame_size_different;
uint16_t render_width_minus_1;
uint16_t render_height_minus_1;
uint8_t found_ref[AV1_REFS_PER_FRAME];
uint8_t refresh_frame_flags;
uint8_t allow_intrabc;
uint8_t ref_order_hint[AV1_NUM_REF_FRAMES];
uint8_t frame_refs_short_signaling;
uint8_t last_frame_idx;
uint8_t golden_frame_idx;
int8_t ref_frame_idx[AV1_REFS_PER_FRAME];
uint32_t delta_frame_id_minus1[AV1_REFS_PER_FRAME];
uint8_t allow_high_precision_mv;
uint8_t is_filter_switchable;
uint8_t interpolation_filter;
uint8_t is_motion_mode_switchable;
uint8_t use_ref_frame_mvs;
uint8_t disable_frame_end_update_cdf;
uint8_t uniform_tile_spacing_flag;
uint8_t tile_cols_log2;
uint8_t tile_rows_log2;
uint8_t width_in_sbs_minus_1[AV1_MAX_TILE_COLS];
uint8_t height_in_sbs_minus_1[AV1_MAX_TILE_ROWS];
uint16_t context_update_tile_id;
uint8_t tile_size_bytes_minus1;
// These are derived values, but it's very unhelpful to have to
// recalculate them all the time so we store them here.
uint16_t tile_cols;
uint16_t tile_rows;
uint8_t base_q_idx;
int8_t delta_q_y_dc;
uint8_t diff_uv_delta;
int8_t delta_q_u_dc;
int8_t delta_q_u_ac;
int8_t delta_q_v_dc;
int8_t delta_q_v_ac;
uint8_t using_qmatrix;
uint8_t qm_y;
uint8_t qm_u;
uint8_t qm_v;
uint8_t segmentation_enabled;
uint8_t segmentation_update_map;
uint8_t segmentation_temporal_update;
uint8_t segmentation_update_data;
uint8_t feature_enabled[AV1_MAX_SEGMENTS][AV1_SEG_LVL_MAX];
int16_t feature_value[AV1_MAX_SEGMENTS][AV1_SEG_LVL_MAX];
uint8_t delta_q_present;
uint8_t delta_q_res;
uint8_t delta_lf_present;
uint8_t delta_lf_res;
uint8_t delta_lf_multi;
uint8_t loop_filter_level[4];
uint8_t loop_filter_sharpness;
uint8_t loop_filter_delta_enabled;
uint8_t loop_filter_delta_update;
uint8_t update_ref_delta[AV1_TOTAL_REFS_PER_FRAME];
int8_t loop_filter_ref_deltas[AV1_TOTAL_REFS_PER_FRAME];
uint8_t update_mode_delta[2];
int8_t loop_filter_mode_deltas[2];
uint8_t cdef_damping_minus_3;
uint8_t cdef_bits;
uint8_t cdef_y_pri_strength[8];
uint8_t cdef_y_sec_strength[8];
uint8_t cdef_uv_pri_strength[8];
uint8_t cdef_uv_sec_strength[8];
uint8_t lr_type[3];
uint8_t lr_unit_shift;
uint8_t lr_uv_shift;
uint8_t tx_mode;
uint8_t reference_select;
uint8_t skip_mode_present;
uint8_t allow_warped_motion;
uint8_t reduced_tx_set;
uint8_t is_global[AV1_TOTAL_REFS_PER_FRAME];
uint8_t is_rot_zoom[AV1_TOTAL_REFS_PER_FRAME];
uint8_t is_translation[AV1_TOTAL_REFS_PER_FRAME];
//AV1RawSubexp gm_params[AV1_TOTAL_REFS_PER_FRAME][6];
uint32_t gm_params[AV1_TOTAL_REFS_PER_FRAME][6];
AV1RawFilmGrainParams film_grain;
} AV1RawFrameHeader;
typedef struct AV1RawTileData {
uint8_t *data;
AVBufferRef *data_ref;
size_t data_size;
} AV1RawTileData;
typedef struct AV1RawTileGroup {
uint8_t tile_start_and_end_present_flag;
uint16_t tg_start;
uint16_t tg_end;
AV1RawTileData tile_data;
} AV1RawTileGroup;
typedef struct AV1RawFrame {
AV1RawFrameHeader header;
AV1RawTileGroup tile_group;
} AV1RawFrame;
typedef struct AV1RawTileList {
uint8_t output_frame_width_in_tiles_minus_1;
uint8_t output_frame_height_in_tiles_minus_1;
uint16_t tile_count_minus_1;
AV1RawTileData tile_data;
} AV1RawTileList;
typedef struct AV1RawMetadataHDRCLL {
uint16_t max_cll;
uint16_t max_fall;
} AV1RawMetadataHDRCLL;
typedef struct AV1RawMetadataHDRMDCV {
uint16_t primary_chromaticity_x[3];
uint16_t primary_chromaticity_y[3];
uint16_t white_point_chromaticity_x;
uint16_t white_point_chromaticity_y;
uint32_t luminance_max;
uint32_t luminance_min;
} AV1RawMetadataHDRMDCV;
typedef struct AV1RawMetadataScalability {
uint8_t scalability_mode_idc;
uint8_t spatial_layers_cnt_minus_1;
uint8_t spatial_layer_dimensions_present_flag;
uint8_t spatial_layer_description_present_flag;
uint8_t temporal_group_description_present_flag;
uint8_t scalability_structure_reserved_3bits;
uint16_t spatial_layer_max_width[4];
uint16_t spatial_layer_max_height[4];
uint8_t spatial_layer_ref_id[4];
uint8_t temporal_group_size;
uint8_t temporal_group_temporal_id[255];
uint8_t temporal_group_temporal_switching_up_point_flag[255];
uint8_t temporal_group_spatial_switching_up_point_flag[255];
uint8_t temporal_group_ref_cnt[255];
uint8_t temporal_group_ref_pic_diff[255][7];
} AV1RawMetadataScalability;
typedef struct AV1RawMetadataITUTT35 {
uint8_t itu_t_t35_country_code;
uint8_t itu_t_t35_country_code_extension_byte;
uint8_t *payload;
AVBufferRef *payload_ref;
size_t payload_size;
} AV1RawMetadataITUTT35;
typedef struct AV1RawMetadataTimecode {
uint8_t counting_type;
uint8_t full_timestamp_flag;
uint8_t discontinuity_flag;
uint8_t cnt_dropped_flag;
uint16_t n_frames;
uint8_t seconds_value;
uint8_t minutes_value;
uint8_t hours_value;
uint8_t seconds_flag;
uint8_t minutes_flag;
uint8_t hours_flag;
uint8_t time_offset_length;
uint32_t time_offset_value;
} AV1RawMetadataTimecode;
typedef struct AV1RawMetadata {
uint64_t metadata_type;
union {
AV1RawMetadataHDRCLL hdr_cll;
AV1RawMetadataHDRMDCV hdr_mdcv;
AV1RawMetadataScalability scalability;
AV1RawMetadataITUTT35 itut_t35;
AV1RawMetadataTimecode timecode;
} metadata;
} AV1RawMetadata;
typedef struct AV1RawPadding {
uint8_t *payload;
AVBufferRef *payload_ref;
size_t payload_size;
} AV1RawPadding;
typedef struct AV1RawOBU {
AV1RawOBUHeader header;
size_t obu_size;
union {
AV1RawSequenceHeader sequence_header;
AV1RawFrameHeader frame_header;
AV1RawFrame frame;
AV1RawTileGroup tile_group;
AV1RawTileList tile_list;
AV1RawMetadata metadata;
AV1RawPadding padding;
} obu;
} AV1RawOBU;
typedef struct AV1ReferenceFrameState {
int valid; // RefValid
int frame_id; // RefFrameId
int upscaled_width; // RefUpscaledWidth
int frame_width; // RefFrameWidth
int frame_height; // RefFrameHeight
int render_width; // RefRenderWidth
int render_height; // RefRenderHeight
int frame_type; // RefFrameType
int subsampling_x; // RefSubsamplingX
int subsampling_y; // RefSubsamplingY
int bit_depth; // RefBitDepth
int order_hint; // RefOrderHint
int8_t loop_filter_ref_deltas[AV1_TOTAL_REFS_PER_FRAME];
int8_t loop_filter_mode_deltas[2];
uint8_t feature_enabled[AV1_MAX_SEGMENTS][AV1_SEG_LVL_MAX];
int16_t feature_value[AV1_MAX_SEGMENTS][AV1_SEG_LVL_MAX];
} AV1ReferenceFrameState;
typedef struct CodedBitstreamAV1Context {
const AVClass *class;
AV1RawSequenceHeader *sequence_header;
AVBufferRef *sequence_header_ref;
int seen_frame_header;
AVBufferRef *frame_header_ref;
uint8_t *frame_header;
size_t frame_header_size;
int temporal_id;
int spatial_id;
int operating_point_idc;
int bit_depth;
int order_hint;
int frame_width;
int frame_height;
int upscaled_width;
int render_width;
int render_height;
int num_planes;
int coded_lossless;
int all_lossless;
int tile_cols;
int tile_rows;
int tile_num;
AV1ReferenceFrameState ref[AV1_NUM_REF_FRAMES];
// AVOptions
int operating_point;
} CodedBitstreamAV1Context;
#endif /* AVCODEC_CBS_AV1_H */

View File

@ -1,131 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_CBS_BSF_H
#define AVCODEC_CBS_BSF_H
#include "cbs.h"
typedef struct CBSBSFType {
enum AVCodecID codec_id;
// Name of a frame fragment in this codec (e.g. "access unit",
// "temporal unit").
const char *fragment_name;
// Name of a unit for this BSF, for use in error messages (e.g.
// "NAL unit", "OBU").
const char *unit_name;
// Update the content of a fragment with whatever metadata changes
// are desired. The associated AVPacket is provided so that any side
// data associated with the fragment can be inspected or edited. If
// pkt is NULL, then an extradata header fragment is being updated.
int (*update_fragment)(AVBSFContext *bsf, AVPacket *pkt,
CodedBitstreamFragment *frag);
} CBSBSFType;
// Common structure for all generic CBS BSF users. An instance of this
// structure must be the first member of the BSF private context (to be
// pointed to by AVBSFContext.priv_data).
typedef struct CBSBSFContext {
const AVClass *class;
const CBSBSFType *type;
CodedBitstreamContext *input;
CodedBitstreamContext *output;
CodedBitstreamFragment fragment;
} CBSBSFContext;
/**
* Initialise generic CBS BSF setup.
*
* Creates the input and output CBS instances, and applies the filter to
* the extradata on the input codecpar if any is present.
*
* Since it calls the update_fragment() function immediately to deal with
* extradata, this should be called after any codec-specific setup is done
* (probably at the end of the AVBitStreamFilter.init function).
*/
int ff_cbs_bsf_generic_init(AVBSFContext *bsf, const CBSBSFType *type);
/**
* Close a generic CBS BSF instance.
*
* If no other deinitialisation is required then this function can be used
* directly as AVBitStreamFilter.close.
*/
void ff_cbs_bsf_generic_close(AVBSFContext *bsf);
/**
* Filter operation for CBS BSF.
*
* Reads the input packet into a CBS fragment, calls update_fragment() on
* it, then writes the result to an output packet. If the input packet
* has AV_PKT_DATA_NEW_EXTRADATA side-data associated with it then it does
* the same thing to that new extradata to form the output side-data first.
*
* If the BSF does not do anything else then this function can be used
* directly as AVBitStreamFilter.filter.
*/
int ff_cbs_bsf_generic_filter(AVBSFContext *bsf, AVPacket *pkt);
// Options for element manipulation.
enum {
// Pass this element through unchanged.
BSF_ELEMENT_PASS,
// Insert this element, replacing any existing instances of it.
// Associated values may be provided explicitly (as addtional options)
// or implicitly (either as side data or deduced from other parts of
// the stream).
BSF_ELEMENT_INSERT,
// Remove this element if it appears in the stream.
BSF_ELEMENT_REMOVE,
// Extract this element to side data, so that further manipulation
// can happen elsewhere.
BSF_ELEMENT_EXTRACT,
};
#define BSF_ELEMENT_OPTIONS_PIR(name, help, field, opt_flags) \
{ name, help, OFFSET(field), AV_OPT_TYPE_INT, \
{ .i64 = BSF_ELEMENT_PASS }, \
BSF_ELEMENT_PASS, BSF_ELEMENT_REMOVE, opt_flags, name }, \
{ "pass", NULL, 0, AV_OPT_TYPE_CONST, \
{ .i64 = BSF_ELEMENT_PASS }, .flags = opt_flags, .unit = name }, \
{ "insert", NULL, 0, AV_OPT_TYPE_CONST, \
{ .i64 = BSF_ELEMENT_INSERT }, .flags = opt_flags, .unit = name }, \
{ "remove", NULL, 0, AV_OPT_TYPE_CONST, \
{ .i64 = BSF_ELEMENT_REMOVE }, .flags = opt_flags, .unit = name }
#define BSF_ELEMENT_OPTIONS_PIRE(name, help, field, opt_flags) \
{ name, help, OFFSET(field), AV_OPT_TYPE_INT, \
{ .i64 = BSF_ELEMENT_PASS }, \
BSF_ELEMENT_PASS, BSF_ELEMENT_EXTRACT, opt_flags, name }, \
{ "pass", NULL, 0, AV_OPT_TYPE_CONST, \
{ .i64 = BSF_ELEMENT_PASS }, .flags = opt_flags, .unit = name }, \
{ "insert", NULL, 0, AV_OPT_TYPE_CONST, \
{ .i64 = BSF_ELEMENT_INSERT }, .flags = opt_flags, .unit = name }, \
{ "remove", NULL, 0, AV_OPT_TYPE_CONST, \
{ .i64 = BSF_ELEMENT_REMOVE }, .flags = opt_flags, .unit = name }, \
{ "extract", NULL, 0, AV_OPT_TYPE_CONST, \
{ .i64 = BSF_ELEMENT_EXTRACT }, .flags = opt_flags, .unit = name }
#endif /* AVCODEC_CBS_BSF_H */

View File

@ -1,406 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_CBS_H264_H
#define AVCODEC_CBS_H264_H
#include <stddef.h>
#include <stdint.h>
#include "cbs.h"
#include "cbs_h2645.h"
#include "cbs_sei.h"
#include "h264.h"
typedef struct H264RawNALUnitHeader {
uint8_t nal_ref_idc;
uint8_t nal_unit_type;
uint8_t svc_extension_flag;
uint8_t avc_3d_extension_flag;
} H264RawNALUnitHeader;
typedef struct H264RawScalingList {
int8_t delta_scale[64];
} H264RawScalingList;
typedef struct H264RawHRD {
uint8_t cpb_cnt_minus1;
uint8_t bit_rate_scale;
uint8_t cpb_size_scale;
uint32_t bit_rate_value_minus1[H264_MAX_CPB_CNT];
uint32_t cpb_size_value_minus1[H264_MAX_CPB_CNT];
uint8_t cbr_flag[H264_MAX_CPB_CNT];
uint8_t initial_cpb_removal_delay_length_minus1;
uint8_t cpb_removal_delay_length_minus1;
uint8_t dpb_output_delay_length_minus1;
uint8_t time_offset_length;
} H264RawHRD;
typedef struct H264RawVUI {
uint8_t aspect_ratio_info_present_flag;
uint8_t aspect_ratio_idc;
uint16_t sar_width;
uint16_t sar_height;
uint8_t overscan_info_present_flag;
uint8_t overscan_appropriate_flag;
uint8_t video_signal_type_present_flag;
uint8_t video_format;
uint8_t video_full_range_flag;
uint8_t colour_description_present_flag;
uint8_t colour_primaries;
uint8_t transfer_characteristics;
uint8_t matrix_coefficients;
uint8_t chroma_loc_info_present_flag;
uint8_t chroma_sample_loc_type_top_field;
uint8_t chroma_sample_loc_type_bottom_field;
uint8_t timing_info_present_flag;
uint32_t num_units_in_tick;
uint32_t time_scale;
uint8_t fixed_frame_rate_flag;
uint8_t nal_hrd_parameters_present_flag;
H264RawHRD nal_hrd_parameters;
uint8_t vcl_hrd_parameters_present_flag;
H264RawHRD vcl_hrd_parameters;
uint8_t low_delay_hrd_flag;
uint8_t pic_struct_present_flag;
uint8_t bitstream_restriction_flag;
uint8_t motion_vectors_over_pic_boundaries_flag;
uint8_t max_bytes_per_pic_denom;
uint8_t max_bits_per_mb_denom;
uint8_t log2_max_mv_length_horizontal;
uint8_t log2_max_mv_length_vertical;
uint8_t max_num_reorder_frames;
uint8_t max_dec_frame_buffering;
} H264RawVUI;
typedef struct H264RawSPS {
H264RawNALUnitHeader nal_unit_header;
uint8_t profile_idc;
uint8_t constraint_set0_flag;
uint8_t constraint_set1_flag;
uint8_t constraint_set2_flag;
uint8_t constraint_set3_flag;
uint8_t constraint_set4_flag;
uint8_t constraint_set5_flag;
uint8_t reserved_zero_2bits;
uint8_t level_idc;
uint8_t seq_parameter_set_id;
uint8_t chroma_format_idc;
uint8_t separate_colour_plane_flag;
uint8_t bit_depth_luma_minus8;
uint8_t bit_depth_chroma_minus8;
uint8_t qpprime_y_zero_transform_bypass_flag;
uint8_t seq_scaling_matrix_present_flag;
uint8_t seq_scaling_list_present_flag[12];
H264RawScalingList scaling_list_4x4[6];
H264RawScalingList scaling_list_8x8[6];
uint8_t log2_max_frame_num_minus4;
uint8_t pic_order_cnt_type;
uint8_t log2_max_pic_order_cnt_lsb_minus4;
uint8_t delta_pic_order_always_zero_flag;
int32_t offset_for_non_ref_pic;
int32_t offset_for_top_to_bottom_field;
uint8_t num_ref_frames_in_pic_order_cnt_cycle;
int32_t offset_for_ref_frame[256];
uint8_t max_num_ref_frames;
uint8_t gaps_in_frame_num_allowed_flag;
uint16_t pic_width_in_mbs_minus1;
uint16_t pic_height_in_map_units_minus1;
uint8_t frame_mbs_only_flag;
uint8_t mb_adaptive_frame_field_flag;
uint8_t direct_8x8_inference_flag;
uint8_t frame_cropping_flag;
uint16_t frame_crop_left_offset;
uint16_t frame_crop_right_offset;
uint16_t frame_crop_top_offset;
uint16_t frame_crop_bottom_offset;
uint8_t vui_parameters_present_flag;
H264RawVUI vui;
} H264RawSPS;
typedef struct H264RawSPSExtension {
H264RawNALUnitHeader nal_unit_header;
uint8_t seq_parameter_set_id;
uint8_t aux_format_idc;
uint8_t bit_depth_aux_minus8;
uint8_t alpha_incr_flag;
uint16_t alpha_opaque_value;
uint16_t alpha_transparent_value;
uint8_t additional_extension_flag;
} H264RawSPSExtension;
typedef struct H264RawPPS {
H264RawNALUnitHeader nal_unit_header;
uint8_t pic_parameter_set_id;
uint8_t seq_parameter_set_id;
uint8_t entropy_coding_mode_flag;
uint8_t bottom_field_pic_order_in_frame_present_flag;
uint8_t num_slice_groups_minus1;
uint8_t slice_group_map_type;
uint16_t run_length_minus1[H264_MAX_SLICE_GROUPS];
uint16_t top_left[H264_MAX_SLICE_GROUPS];
uint16_t bottom_right[H264_MAX_SLICE_GROUPS];
uint8_t slice_group_change_direction_flag;
uint16_t slice_group_change_rate_minus1;
uint16_t pic_size_in_map_units_minus1;
uint8_t *slice_group_id;
AVBufferRef *slice_group_id_ref;
uint8_t num_ref_idx_l0_default_active_minus1;
uint8_t num_ref_idx_l1_default_active_minus1;
uint8_t weighted_pred_flag;
uint8_t weighted_bipred_idc;
int8_t pic_init_qp_minus26;
int8_t pic_init_qs_minus26;
int8_t chroma_qp_index_offset;
uint8_t deblocking_filter_control_present_flag;
uint8_t constrained_intra_pred_flag;
uint8_t more_rbsp_data;
uint8_t redundant_pic_cnt_present_flag;
uint8_t transform_8x8_mode_flag;
uint8_t pic_scaling_matrix_present_flag;
uint8_t pic_scaling_list_present_flag[12];
H264RawScalingList scaling_list_4x4[6];
H264RawScalingList scaling_list_8x8[6];
int8_t second_chroma_qp_index_offset;
} H264RawPPS;
typedef struct H264RawAUD {
H264RawNALUnitHeader nal_unit_header;
uint8_t primary_pic_type;
} H264RawAUD;
typedef struct H264RawSEIBufferingPeriod {
uint8_t seq_parameter_set_id;
struct {
uint32_t initial_cpb_removal_delay[H264_MAX_CPB_CNT];
uint32_t initial_cpb_removal_delay_offset[H264_MAX_CPB_CNT];
} nal, vcl;
} H264RawSEIBufferingPeriod;
typedef struct H264RawSEIPicTimestamp {
uint8_t ct_type;
uint8_t nuit_field_based_flag;
uint8_t counting_type;
uint8_t full_timestamp_flag;
uint8_t discontinuity_flag;
uint8_t cnt_dropped_flag;
uint8_t n_frames;
uint8_t seconds_flag;
uint8_t seconds_value;
uint8_t minutes_flag;
uint8_t minutes_value;
uint8_t hours_flag;
uint8_t hours_value;
int32_t time_offset;
} H264RawSEIPicTimestamp;
typedef struct H264RawSEIPicTiming {
uint32_t cpb_removal_delay;
uint32_t dpb_output_delay;
uint8_t pic_struct;
uint8_t clock_timestamp_flag[3];
H264RawSEIPicTimestamp timestamp[3];
} H264RawSEIPicTiming;
typedef struct H264RawSEIPanScanRect {
uint32_t pan_scan_rect_id;
uint8_t pan_scan_rect_cancel_flag;
uint8_t pan_scan_cnt_minus1;
int32_t pan_scan_rect_left_offset[3];
int32_t pan_scan_rect_right_offset[3];
int32_t pan_scan_rect_top_offset[3];
int32_t pan_scan_rect_bottom_offset[3];
uint16_t pan_scan_rect_repetition_period;
} H264RawSEIPanScanRect;
typedef struct H264RawSEIRecoveryPoint {
uint16_t recovery_frame_cnt;
uint8_t exact_match_flag;
uint8_t broken_link_flag;
uint8_t changing_slice_group_idc;
} H264RawSEIRecoveryPoint;
typedef struct H264RawSEIDisplayOrientation {
uint8_t display_orientation_cancel_flag;
uint8_t hor_flip;
uint8_t ver_flip;
uint16_t anticlockwise_rotation;
uint16_t display_orientation_repetition_period;
uint8_t display_orientation_extension_flag;
} H264RawSEIDisplayOrientation;
typedef struct H264RawSEI {
H264RawNALUnitHeader nal_unit_header;
SEIRawMessageList message_list;
} H264RawSEI;
typedef struct H264RawSliceHeader {
H264RawNALUnitHeader nal_unit_header;
uint32_t first_mb_in_slice;
uint8_t slice_type;
uint8_t pic_parameter_set_id;
uint8_t colour_plane_id;
uint16_t frame_num;
uint8_t field_pic_flag;
uint8_t bottom_field_flag;
uint16_t idr_pic_id;
uint16_t pic_order_cnt_lsb;
int32_t delta_pic_order_cnt_bottom;
int32_t delta_pic_order_cnt[2];
uint8_t redundant_pic_cnt;
uint8_t direct_spatial_mv_pred_flag;
uint8_t num_ref_idx_active_override_flag;
uint8_t num_ref_idx_l0_active_minus1;
uint8_t num_ref_idx_l1_active_minus1;
uint8_t ref_pic_list_modification_flag_l0;
uint8_t ref_pic_list_modification_flag_l1;
struct {
uint8_t modification_of_pic_nums_idc;
int32_t abs_diff_pic_num_minus1;
uint8_t long_term_pic_num;
} rplm_l0[H264_MAX_RPLM_COUNT], rplm_l1[H264_MAX_RPLM_COUNT];
uint8_t luma_log2_weight_denom;
uint8_t chroma_log2_weight_denom;
uint8_t luma_weight_l0_flag[H264_MAX_REFS];
int8_t luma_weight_l0[H264_MAX_REFS];
int8_t luma_offset_l0[H264_MAX_REFS];
uint8_t chroma_weight_l0_flag[H264_MAX_REFS];
int8_t chroma_weight_l0[H264_MAX_REFS][2];
int8_t chroma_offset_l0[H264_MAX_REFS][2];
uint8_t luma_weight_l1_flag[H264_MAX_REFS];
int8_t luma_weight_l1[H264_MAX_REFS];
int8_t luma_offset_l1[H264_MAX_REFS];
uint8_t chroma_weight_l1_flag[H264_MAX_REFS];
int8_t chroma_weight_l1[H264_MAX_REFS][2];
int8_t chroma_offset_l1[H264_MAX_REFS][2];
uint8_t no_output_of_prior_pics_flag;
uint8_t long_term_reference_flag;
uint8_t adaptive_ref_pic_marking_mode_flag;
struct {
uint8_t memory_management_control_operation;
int32_t difference_of_pic_nums_minus1;
uint8_t long_term_pic_num;
uint8_t long_term_frame_idx;
uint8_t max_long_term_frame_idx_plus1;
} mmco[H264_MAX_MMCO_COUNT];
uint8_t cabac_init_idc;
int8_t slice_qp_delta;
uint8_t sp_for_switch_flag;
int8_t slice_qs_delta;
uint8_t disable_deblocking_filter_idc;
int8_t slice_alpha_c0_offset_div2;
int8_t slice_beta_offset_div2;
uint16_t slice_group_change_cycle;
} H264RawSliceHeader;
typedef struct H264RawSlice {
H264RawSliceHeader header;
uint8_t *data;
AVBufferRef *data_ref;
size_t data_size;
int data_bit_start;
} H264RawSlice;
typedef struct H264RawFiller {
H264RawNALUnitHeader nal_unit_header;
uint32_t filler_size;
} H264RawFiller;
typedef struct CodedBitstreamH264Context {
// Reader/writer context in common with the H.265 implementation.
CodedBitstreamH2645Context common;
// All currently available parameter sets. These are updated when
// any parameter set NAL unit is read/written with this context.
AVBufferRef *sps_ref[H264_MAX_SPS_COUNT];
AVBufferRef *pps_ref[H264_MAX_PPS_COUNT];
H264RawSPS *sps[H264_MAX_SPS_COUNT];
H264RawPPS *pps[H264_MAX_PPS_COUNT];
// The currently active parameter sets. These are updated when any
// NAL unit refers to the relevant parameter set. These pointers
// must also be present in the arrays above.
const H264RawSPS *active_sps;
const H264RawPPS *active_pps;
// The NAL unit type of the most recent normal slice. This is required
// to be able to read/write auxiliary slices, because IdrPicFlag is
// otherwise unknown.
uint8_t last_slice_nal_unit_type;
} CodedBitstreamH264Context;
#endif /* AVCODEC_CBS_H264_H */

View File

@ -1,36 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_CBS_H2645_H
#define AVCODEC_CBS_H2645_H
#include "h2645_parse.h"
typedef struct CodedBitstreamH2645Context {
// If set, the stream being read is in MP4 (AVCC/HVCC) format. If not
// set, the stream is assumed to be in annex B format.
int mp4;
// Size in bytes of the NAL length field for MP4 format.
int nal_length_size;
// Packet reader.
H2645Packet read_packet;
} CodedBitstreamH2645Context;
#endif /* AVCODEC_CBS_H2645_H */

View File

@ -1,679 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_CBS_H265_H
#define AVCODEC_CBS_H265_H
#include <stddef.h>
#include <stdint.h>
#include "cbs_h2645.h"
#include "cbs_sei.h"
#include "hevc.h"
typedef struct H265RawNALUnitHeader {
uint8_t nal_unit_type;
uint8_t nuh_layer_id;
uint8_t nuh_temporal_id_plus1;
} H265RawNALUnitHeader;
typedef struct H265RawProfileTierLevel {
uint8_t general_profile_space;
uint8_t general_tier_flag;
uint8_t general_profile_idc;
uint8_t general_profile_compatibility_flag[32];
uint8_t general_progressive_source_flag;
uint8_t general_interlaced_source_flag;
uint8_t general_non_packed_constraint_flag;
uint8_t general_frame_only_constraint_flag;
uint8_t general_max_12bit_constraint_flag;
uint8_t general_max_10bit_constraint_flag;
uint8_t general_max_8bit_constraint_flag;
uint8_t general_max_422chroma_constraint_flag;
uint8_t general_max_420chroma_constraint_flag;
uint8_t general_max_monochrome_constraint_flag;
uint8_t general_intra_constraint_flag;
uint8_t general_one_picture_only_constraint_flag;
uint8_t general_lower_bit_rate_constraint_flag;
uint8_t general_max_14bit_constraint_flag;
uint8_t general_inbld_flag;
uint8_t general_level_idc;
uint8_t sub_layer_profile_present_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_level_present_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_profile_space[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_tier_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_profile_idc[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_profile_compatibility_flag[HEVC_MAX_SUB_LAYERS][32];
uint8_t sub_layer_progressive_source_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_interlaced_source_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_non_packed_constraint_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_frame_only_constraint_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_max_12bit_constraint_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_max_10bit_constraint_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_max_8bit_constraint_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_max_422chroma_constraint_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_max_420chroma_constraint_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_max_monochrome_constraint_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_intra_constraint_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_one_picture_only_constraint_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_lower_bit_rate_constraint_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_max_14bit_constraint_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_inbld_flag[HEVC_MAX_SUB_LAYERS];
uint8_t sub_layer_level_idc[HEVC_MAX_SUB_LAYERS];
} H265RawProfileTierLevel;
typedef struct H265RawSubLayerHRDParameters {
uint32_t bit_rate_value_minus1[HEVC_MAX_CPB_CNT];
uint32_t cpb_size_value_minus1[HEVC_MAX_CPB_CNT];
uint32_t cpb_size_du_value_minus1[HEVC_MAX_CPB_CNT];
uint32_t bit_rate_du_value_minus1[HEVC_MAX_CPB_CNT];
uint8_t cbr_flag[HEVC_MAX_CPB_CNT];
} H265RawSubLayerHRDParameters;
typedef struct H265RawHRDParameters {
uint8_t nal_hrd_parameters_present_flag;
uint8_t vcl_hrd_parameters_present_flag;
uint8_t sub_pic_hrd_params_present_flag;
uint8_t tick_divisor_minus2;
uint8_t du_cpb_removal_delay_increment_length_minus1;
uint8_t sub_pic_cpb_params_in_pic_timing_sei_flag;
uint8_t dpb_output_delay_du_length_minus1;
uint8_t bit_rate_scale;
uint8_t cpb_size_scale;
uint8_t cpb_size_du_scale;
uint8_t initial_cpb_removal_delay_length_minus1;
uint8_t au_cpb_removal_delay_length_minus1;
uint8_t dpb_output_delay_length_minus1;
uint8_t fixed_pic_rate_general_flag[HEVC_MAX_SUB_LAYERS];
uint8_t fixed_pic_rate_within_cvs_flag[HEVC_MAX_SUB_LAYERS];
uint16_t elemental_duration_in_tc_minus1[HEVC_MAX_SUB_LAYERS];
uint8_t low_delay_hrd_flag[HEVC_MAX_SUB_LAYERS];
uint8_t cpb_cnt_minus1[HEVC_MAX_SUB_LAYERS];
H265RawSubLayerHRDParameters nal_sub_layer_hrd_parameters[HEVC_MAX_SUB_LAYERS];
H265RawSubLayerHRDParameters vcl_sub_layer_hrd_parameters[HEVC_MAX_SUB_LAYERS];
} H265RawHRDParameters;
typedef struct H265RawVUI {
uint8_t aspect_ratio_info_present_flag;
uint8_t aspect_ratio_idc;
uint16_t sar_width;
uint16_t sar_height;
uint8_t overscan_info_present_flag;
uint8_t overscan_appropriate_flag;
uint8_t video_signal_type_present_flag;
uint8_t video_format;
uint8_t video_full_range_flag;
uint8_t colour_description_present_flag;
uint8_t colour_primaries;
uint8_t transfer_characteristics;
uint8_t matrix_coefficients;
uint8_t chroma_loc_info_present_flag;
uint8_t chroma_sample_loc_type_top_field;
uint8_t chroma_sample_loc_type_bottom_field;
uint8_t neutral_chroma_indication_flag;
uint8_t field_seq_flag;
uint8_t frame_field_info_present_flag;
uint8_t default_display_window_flag;
uint16_t def_disp_win_left_offset;
uint16_t def_disp_win_right_offset;
uint16_t def_disp_win_top_offset;
uint16_t def_disp_win_bottom_offset;
uint8_t vui_timing_info_present_flag;
uint32_t vui_num_units_in_tick;
uint32_t vui_time_scale;
uint8_t vui_poc_proportional_to_timing_flag;
uint32_t vui_num_ticks_poc_diff_one_minus1;
uint8_t vui_hrd_parameters_present_flag;
H265RawHRDParameters hrd_parameters;
uint8_t bitstream_restriction_flag;
uint8_t tiles_fixed_structure_flag;
uint8_t motion_vectors_over_pic_boundaries_flag;
uint8_t restricted_ref_pic_lists_flag;
uint16_t min_spatial_segmentation_idc;
uint8_t max_bytes_per_pic_denom;
uint8_t max_bits_per_min_cu_denom;
uint8_t log2_max_mv_length_horizontal;
uint8_t log2_max_mv_length_vertical;
} H265RawVUI;
typedef struct H265RawExtensionData {
uint8_t *data;
AVBufferRef *data_ref;
size_t bit_length;
} H265RawExtensionData;
typedef struct H265RawVPS {
H265RawNALUnitHeader nal_unit_header;
uint8_t vps_video_parameter_set_id;
uint8_t vps_base_layer_internal_flag;
uint8_t vps_base_layer_available_flag;
uint8_t vps_max_layers_minus1;
uint8_t vps_max_sub_layers_minus1;
uint8_t vps_temporal_id_nesting_flag;
H265RawProfileTierLevel profile_tier_level;
uint8_t vps_sub_layer_ordering_info_present_flag;
uint8_t vps_max_dec_pic_buffering_minus1[HEVC_MAX_SUB_LAYERS];
uint8_t vps_max_num_reorder_pics[HEVC_MAX_SUB_LAYERS];
uint32_t vps_max_latency_increase_plus1[HEVC_MAX_SUB_LAYERS];
uint8_t vps_max_layer_id;
uint16_t vps_num_layer_sets_minus1;
uint8_t layer_id_included_flag[HEVC_MAX_LAYER_SETS][HEVC_MAX_LAYERS];
uint8_t vps_timing_info_present_flag;
uint32_t vps_num_units_in_tick;
uint32_t vps_time_scale;
uint8_t vps_poc_proportional_to_timing_flag;
uint32_t vps_num_ticks_poc_diff_one_minus1;
uint16_t vps_num_hrd_parameters;
uint16_t hrd_layer_set_idx[HEVC_MAX_LAYER_SETS];
uint8_t cprms_present_flag[HEVC_MAX_LAYER_SETS];
H265RawHRDParameters hrd_parameters[HEVC_MAX_LAYER_SETS];
uint8_t vps_extension_flag;
H265RawExtensionData extension_data;
} H265RawVPS;
typedef struct H265RawSTRefPicSet {
uint8_t inter_ref_pic_set_prediction_flag;
uint8_t delta_idx_minus1;
uint8_t delta_rps_sign;
uint16_t abs_delta_rps_minus1;
uint8_t used_by_curr_pic_flag[HEVC_MAX_REFS];
uint8_t use_delta_flag[HEVC_MAX_REFS];
uint8_t num_negative_pics;
uint8_t num_positive_pics;
uint16_t delta_poc_s0_minus1[HEVC_MAX_REFS];
uint8_t used_by_curr_pic_s0_flag[HEVC_MAX_REFS];
uint16_t delta_poc_s1_minus1[HEVC_MAX_REFS];
uint8_t used_by_curr_pic_s1_flag[HEVC_MAX_REFS];
} H265RawSTRefPicSet;
typedef struct H265RawScalingList {
uint8_t scaling_list_pred_mode_flag[4][6];
uint8_t scaling_list_pred_matrix_id_delta[4][6];
int16_t scaling_list_dc_coef_minus8[4][6];
int8_t scaling_list_delta_coeff[4][6][64];
} H265RawScalingList;
typedef struct H265RawSPS {
H265RawNALUnitHeader nal_unit_header;
uint8_t sps_video_parameter_set_id;
uint8_t sps_max_sub_layers_minus1;
uint8_t sps_temporal_id_nesting_flag;
H265RawProfileTierLevel profile_tier_level;
uint8_t sps_seq_parameter_set_id;
uint8_t chroma_format_idc;
uint8_t separate_colour_plane_flag;
uint16_t pic_width_in_luma_samples;
uint16_t pic_height_in_luma_samples;
uint8_t conformance_window_flag;
uint16_t conf_win_left_offset;
uint16_t conf_win_right_offset;
uint16_t conf_win_top_offset;
uint16_t conf_win_bottom_offset;
uint8_t bit_depth_luma_minus8;
uint8_t bit_depth_chroma_minus8;
uint8_t log2_max_pic_order_cnt_lsb_minus4;
uint8_t sps_sub_layer_ordering_info_present_flag;
uint8_t sps_max_dec_pic_buffering_minus1[HEVC_MAX_SUB_LAYERS];
uint8_t sps_max_num_reorder_pics[HEVC_MAX_SUB_LAYERS];
uint32_t sps_max_latency_increase_plus1[HEVC_MAX_SUB_LAYERS];
uint8_t log2_min_luma_coding_block_size_minus3;
uint8_t log2_diff_max_min_luma_coding_block_size;
uint8_t log2_min_luma_transform_block_size_minus2;
uint8_t log2_diff_max_min_luma_transform_block_size;
uint8_t max_transform_hierarchy_depth_inter;
uint8_t max_transform_hierarchy_depth_intra;
uint8_t scaling_list_enabled_flag;
uint8_t sps_scaling_list_data_present_flag;
H265RawScalingList scaling_list;
uint8_t amp_enabled_flag;
uint8_t sample_adaptive_offset_enabled_flag;
uint8_t pcm_enabled_flag;
uint8_t pcm_sample_bit_depth_luma_minus1;
uint8_t pcm_sample_bit_depth_chroma_minus1;
uint8_t log2_min_pcm_luma_coding_block_size_minus3;
uint8_t log2_diff_max_min_pcm_luma_coding_block_size;
uint8_t pcm_loop_filter_disabled_flag;
uint8_t num_short_term_ref_pic_sets;
H265RawSTRefPicSet st_ref_pic_set[HEVC_MAX_SHORT_TERM_REF_PIC_SETS];
uint8_t long_term_ref_pics_present_flag;
uint8_t num_long_term_ref_pics_sps;
uint16_t lt_ref_pic_poc_lsb_sps[HEVC_MAX_LONG_TERM_REF_PICS];
uint8_t used_by_curr_pic_lt_sps_flag[HEVC_MAX_LONG_TERM_REF_PICS];
uint8_t sps_temporal_mvp_enabled_flag;
uint8_t strong_intra_smoothing_enabled_flag;
uint8_t vui_parameters_present_flag;
H265RawVUI vui;
uint8_t sps_extension_present_flag;
uint8_t sps_range_extension_flag;
uint8_t sps_multilayer_extension_flag;
uint8_t sps_3d_extension_flag;
uint8_t sps_scc_extension_flag;
uint8_t sps_extension_4bits;
H265RawExtensionData extension_data;
// Range extension.
uint8_t transform_skip_rotation_enabled_flag;
uint8_t transform_skip_context_enabled_flag;
uint8_t implicit_rdpcm_enabled_flag;
uint8_t explicit_rdpcm_enabled_flag;
uint8_t extended_precision_processing_flag;
uint8_t intra_smoothing_disabled_flag;
uint8_t high_precision_offsets_enabled_flag;
uint8_t persistent_rice_adaptation_enabled_flag;
uint8_t cabac_bypass_alignment_enabled_flag;
// Screen content coding extension.
uint8_t sps_curr_pic_ref_enabled_flag;
uint8_t palette_mode_enabled_flag;
uint8_t palette_max_size;
uint8_t delta_palette_max_predictor_size;
uint8_t sps_palette_predictor_initializer_present_flag;
uint8_t sps_num_palette_predictor_initializer_minus1;
uint16_t sps_palette_predictor_initializers[3][128];
uint8_t motion_vector_resolution_control_idc;
uint8_t intra_boundary_filtering_disable_flag;
} H265RawSPS;
typedef struct H265RawPPS {
H265RawNALUnitHeader nal_unit_header;
uint8_t pps_pic_parameter_set_id;
uint8_t pps_seq_parameter_set_id;
uint8_t dependent_slice_segments_enabled_flag;
uint8_t output_flag_present_flag;
uint8_t num_extra_slice_header_bits;
uint8_t sign_data_hiding_enabled_flag;
uint8_t cabac_init_present_flag;
uint8_t num_ref_idx_l0_default_active_minus1;
uint8_t num_ref_idx_l1_default_active_minus1;
int8_t init_qp_minus26;
uint8_t constrained_intra_pred_flag;
uint8_t transform_skip_enabled_flag;
uint8_t cu_qp_delta_enabled_flag;
uint8_t diff_cu_qp_delta_depth;
int8_t pps_cb_qp_offset;
int8_t pps_cr_qp_offset;
uint8_t pps_slice_chroma_qp_offsets_present_flag;
uint8_t weighted_pred_flag;
uint8_t weighted_bipred_flag;
uint8_t transquant_bypass_enabled_flag;
uint8_t tiles_enabled_flag;
uint8_t entropy_coding_sync_enabled_flag;
uint8_t num_tile_columns_minus1;
uint8_t num_tile_rows_minus1;
uint8_t uniform_spacing_flag;
uint16_t column_width_minus1[HEVC_MAX_TILE_COLUMNS];
uint16_t row_height_minus1[HEVC_MAX_TILE_ROWS];
uint8_t loop_filter_across_tiles_enabled_flag;
uint8_t pps_loop_filter_across_slices_enabled_flag;
uint8_t deblocking_filter_control_present_flag;
uint8_t deblocking_filter_override_enabled_flag;
uint8_t pps_deblocking_filter_disabled_flag;
int8_t pps_beta_offset_div2;
int8_t pps_tc_offset_div2;
uint8_t pps_scaling_list_data_present_flag;
H265RawScalingList scaling_list;
uint8_t lists_modification_present_flag;
uint8_t log2_parallel_merge_level_minus2;
uint8_t slice_segment_header_extension_present_flag;
uint8_t pps_extension_present_flag;
uint8_t pps_range_extension_flag;
uint8_t pps_multilayer_extension_flag;
uint8_t pps_3d_extension_flag;
uint8_t pps_scc_extension_flag;
uint8_t pps_extension_4bits;
H265RawExtensionData extension_data;
// Range extension.
uint8_t log2_max_transform_skip_block_size_minus2;
uint8_t cross_component_prediction_enabled_flag;
uint8_t chroma_qp_offset_list_enabled_flag;
uint8_t diff_cu_chroma_qp_offset_depth;
uint8_t chroma_qp_offset_list_len_minus1;
int8_t cb_qp_offset_list[6];
int8_t cr_qp_offset_list[6];
uint8_t log2_sao_offset_scale_luma;
uint8_t log2_sao_offset_scale_chroma;
// Screen content coding extension.
uint8_t pps_curr_pic_ref_enabled_flag;
uint8_t residual_adaptive_colour_transform_enabled_flag;
uint8_t pps_slice_act_qp_offsets_present_flag;
int8_t pps_act_y_qp_offset_plus5;
int8_t pps_act_cb_qp_offset_plus5;
int8_t pps_act_cr_qp_offset_plus3;
uint8_t pps_palette_predictor_initializer_present_flag;
uint8_t pps_num_palette_predictor_initializer;
uint8_t monochrome_palette_flag;
uint8_t luma_bit_depth_entry_minus8;
uint8_t chroma_bit_depth_entry_minus8;
uint16_t pps_palette_predictor_initializers[3][128];
} H265RawPPS;
typedef struct H265RawAUD {
H265RawNALUnitHeader nal_unit_header;
uint8_t pic_type;
} H265RawAUD;
typedef struct H265RawSliceHeader {
H265RawNALUnitHeader nal_unit_header;
uint8_t first_slice_segment_in_pic_flag;
uint8_t no_output_of_prior_pics_flag;
uint8_t slice_pic_parameter_set_id;
uint8_t dependent_slice_segment_flag;
uint16_t slice_segment_address;
uint8_t slice_reserved_flag[8];
uint8_t slice_type;
uint8_t pic_output_flag;
uint8_t colour_plane_id;
uint16_t slice_pic_order_cnt_lsb;
uint8_t short_term_ref_pic_set_sps_flag;
H265RawSTRefPicSet short_term_ref_pic_set;
uint8_t short_term_ref_pic_set_idx;
uint8_t num_long_term_sps;
uint8_t num_long_term_pics;
uint8_t lt_idx_sps[HEVC_MAX_REFS];
uint8_t poc_lsb_lt[HEVC_MAX_REFS];
uint8_t used_by_curr_pic_lt_flag[HEVC_MAX_REFS];
uint8_t delta_poc_msb_present_flag[HEVC_MAX_REFS];
uint32_t delta_poc_msb_cycle_lt[HEVC_MAX_REFS];
uint8_t slice_temporal_mvp_enabled_flag;
uint8_t slice_sao_luma_flag;
uint8_t slice_sao_chroma_flag;
uint8_t num_ref_idx_active_override_flag;
uint8_t num_ref_idx_l0_active_minus1;
uint8_t num_ref_idx_l1_active_minus1;
uint8_t ref_pic_list_modification_flag_l0;
uint8_t list_entry_l0[HEVC_MAX_REFS];
uint8_t ref_pic_list_modification_flag_l1;
uint8_t list_entry_l1[HEVC_MAX_REFS];
uint8_t mvd_l1_zero_flag;
uint8_t cabac_init_flag;
uint8_t collocated_from_l0_flag;
uint8_t collocated_ref_idx;
uint8_t luma_log2_weight_denom;
int8_t delta_chroma_log2_weight_denom;
uint8_t luma_weight_l0_flag[HEVC_MAX_REFS];
uint8_t chroma_weight_l0_flag[HEVC_MAX_REFS];
int8_t delta_luma_weight_l0[HEVC_MAX_REFS];
int16_t luma_offset_l0[HEVC_MAX_REFS];
int8_t delta_chroma_weight_l0[HEVC_MAX_REFS][2];
int16_t chroma_offset_l0[HEVC_MAX_REFS][2];
uint8_t luma_weight_l1_flag[HEVC_MAX_REFS];
uint8_t chroma_weight_l1_flag[HEVC_MAX_REFS];
int8_t delta_luma_weight_l1[HEVC_MAX_REFS];
int16_t luma_offset_l1[HEVC_MAX_REFS];
int8_t delta_chroma_weight_l1[HEVC_MAX_REFS][2];
int16_t chroma_offset_l1[HEVC_MAX_REFS][2];
uint8_t five_minus_max_num_merge_cand;
uint8_t use_integer_mv_flag;
int8_t slice_qp_delta;
int8_t slice_cb_qp_offset;
int8_t slice_cr_qp_offset;
int8_t slice_act_y_qp_offset;
int8_t slice_act_cb_qp_offset;
int8_t slice_act_cr_qp_offset;
uint8_t cu_chroma_qp_offset_enabled_flag;
uint8_t deblocking_filter_override_flag;
uint8_t slice_deblocking_filter_disabled_flag;
int8_t slice_beta_offset_div2;
int8_t slice_tc_offset_div2;
uint8_t slice_loop_filter_across_slices_enabled_flag;
uint16_t num_entry_point_offsets;
uint8_t offset_len_minus1;
uint32_t entry_point_offset_minus1[HEVC_MAX_ENTRY_POINT_OFFSETS];
uint16_t slice_segment_header_extension_length;
uint8_t slice_segment_header_extension_data_byte[256];
} H265RawSliceHeader;
typedef struct H265RawSlice {
H265RawSliceHeader header;
uint8_t *data;
AVBufferRef *data_ref;
size_t data_size;
int data_bit_start;
} H265RawSlice;
typedef struct H265RawSEIBufferingPeriod {
uint8_t bp_seq_parameter_set_id;
uint8_t irap_cpb_params_present_flag;
uint32_t cpb_delay_offset;
uint32_t dpb_delay_offset;
uint8_t concatenation_flag;
uint32_t au_cpb_removal_delay_delta_minus1;
uint32_t nal_initial_cpb_removal_delay[HEVC_MAX_CPB_CNT];
uint32_t nal_initial_cpb_removal_offset[HEVC_MAX_CPB_CNT];
uint32_t nal_initial_alt_cpb_removal_delay[HEVC_MAX_CPB_CNT];
uint32_t nal_initial_alt_cpb_removal_offset[HEVC_MAX_CPB_CNT];
uint32_t vcl_initial_cpb_removal_delay[HEVC_MAX_CPB_CNT];
uint32_t vcl_initial_cpb_removal_offset[HEVC_MAX_CPB_CNT];
uint32_t vcl_initial_alt_cpb_removal_delay[HEVC_MAX_CPB_CNT];
uint32_t vcl_initial_alt_cpb_removal_offset[HEVC_MAX_CPB_CNT];
uint8_t use_alt_cpb_params_flag;
} H265RawSEIBufferingPeriod;
typedef struct H265RawSEIPicTiming {
uint8_t pic_struct;
uint8_t source_scan_type;
uint8_t duplicate_flag;
uint32_t au_cpb_removal_delay_minus1;
uint32_t pic_dpb_output_delay;
uint32_t pic_dpb_output_du_delay;
uint16_t num_decoding_units_minus1;
uint8_t du_common_cpb_removal_delay_flag;
uint32_t du_common_cpb_removal_delay_increment_minus1;
uint16_t num_nalus_in_du_minus1[HEVC_MAX_SLICE_SEGMENTS];
uint32_t du_cpb_removal_delay_increment_minus1[HEVC_MAX_SLICE_SEGMENTS];
} H265RawSEIPicTiming;
typedef struct H265RawSEIPanScanRect {
uint32_t pan_scan_rect_id;
uint8_t pan_scan_rect_cancel_flag;
uint8_t pan_scan_cnt_minus1;
int32_t pan_scan_rect_left_offset[3];
int32_t pan_scan_rect_right_offset[3];
int32_t pan_scan_rect_top_offset[3];
int32_t pan_scan_rect_bottom_offset[3];
uint16_t pan_scan_rect_persistence_flag;
} H265RawSEIPanScanRect;
typedef struct H265RawSEIRecoveryPoint {
int16_t recovery_poc_cnt;
uint8_t exact_match_flag;
uint8_t broken_link_flag;
} H265RawSEIRecoveryPoint;
typedef struct H265RawSEIDisplayOrientation {
uint8_t display_orientation_cancel_flag;
uint8_t hor_flip;
uint8_t ver_flip;
uint16_t anticlockwise_rotation;
uint16_t display_orientation_repetition_period;
uint8_t display_orientation_persistence_flag;
} H265RawSEIDisplayOrientation;
typedef struct H265RawSEIActiveParameterSets {
uint8_t active_video_parameter_set_id;
uint8_t self_contained_cvs_flag;
uint8_t no_parameter_set_update_flag;
uint8_t num_sps_ids_minus1;
uint8_t active_seq_parameter_set_id[HEVC_MAX_SPS_COUNT];
uint8_t layer_sps_idx[HEVC_MAX_LAYERS];
} H265RawSEIActiveParameterSets;
typedef struct H265RawSEIDecodedPictureHash {
uint8_t hash_type;
uint8_t picture_md5[3][16];
uint16_t picture_crc[3];
uint32_t picture_checksum[3];
} H265RawSEIDecodedPictureHash;
typedef struct H265RawSEITimeCode {
uint8_t num_clock_ts;
uint8_t clock_timestamp_flag[3];
uint8_t units_field_based_flag[3];
uint8_t counting_type[3];
uint8_t full_timestamp_flag[3];
uint8_t discontinuity_flag[3];
uint8_t cnt_dropped_flag[3];
uint16_t n_frames[3];
uint8_t seconds_value[3];
uint8_t minutes_value[3];
uint8_t hours_value[3];
uint8_t seconds_flag[3];
uint8_t minutes_flag[3];
uint8_t hours_flag[3];
uint8_t time_offset_length[3];
int32_t time_offset_value[3];
} H265RawSEITimeCode;
typedef struct H265RawSEIAlphaChannelInfo {
uint8_t alpha_channel_cancel_flag;
uint8_t alpha_channel_use_idc;
uint8_t alpha_channel_bit_depth_minus8;
uint16_t alpha_transparent_value;
uint16_t alpha_opaque_value;
uint8_t alpha_channel_incr_flag;
uint8_t alpha_channel_clip_flag;
uint8_t alpha_channel_clip_type_flag;
} H265RawSEIAlphaChannelInfo;
typedef struct H265RawSEI {
H265RawNALUnitHeader nal_unit_header;
SEIRawMessageList message_list;
} H265RawSEI;
typedef struct CodedBitstreamH265Context {
// Reader/writer context in common with the H.264 implementation.
CodedBitstreamH2645Context common;
// All currently available parameter sets. These are updated when
// any parameter set NAL unit is read/written with this context.
AVBufferRef *vps_ref[HEVC_MAX_VPS_COUNT];
AVBufferRef *sps_ref[HEVC_MAX_SPS_COUNT];
AVBufferRef *pps_ref[HEVC_MAX_PPS_COUNT];
H265RawVPS *vps[HEVC_MAX_VPS_COUNT];
H265RawSPS *sps[HEVC_MAX_SPS_COUNT];
H265RawPPS *pps[HEVC_MAX_PPS_COUNT];
// The currently active parameter sets. These are updated when any
// NAL unit refers to the relevant parameter set. These pointers
// must also be present in the arrays above.
const H265RawVPS *active_vps;
const H265RawSPS *active_sps;
const H265RawPPS *active_pps;
} CodedBitstreamH265Context;
#endif /* AVCODEC_CBS_H265_H */

View File

@ -1,123 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_CBS_JPEG_H
#define AVCODEC_CBS_JPEG_H
#include <stddef.h>
#include <stdint.h>
#include <libavutil/buffer.h>
enum {
JPEG_MARKER_SOF0 = 0xc0,
JPEG_MARKER_SOF1 = 0xc1,
JPEG_MARKER_SOF2 = 0xc2,
JPEG_MARKER_SOF3 = 0xc3,
JPEG_MARKER_DHT = 0xc4,
JPEG_MARKER_SOI = 0xd8,
JPEG_MARKER_EOI = 0xd9,
JPEG_MARKER_SOS = 0xda,
JPEG_MARKER_DQT = 0xdb,
JPEG_MARKER_APPN = 0xe0,
JPEG_MARKER_JPGN = 0xf0,
JPEG_MARKER_COM = 0xfe,
};
enum {
JPEG_MAX_COMPONENTS = 255,
JPEG_MAX_HEIGHT = 65535,
JPEG_MAX_WIDTH = 65535,
};
typedef struct JPEGRawFrameHeader {
uint16_t Lf;
uint8_t P;
uint16_t Y;
uint16_t X;
uint16_t Nf;
uint8_t C[JPEG_MAX_COMPONENTS];
uint8_t H[JPEG_MAX_COMPONENTS];
uint8_t V[JPEG_MAX_COMPONENTS];
uint8_t Tq[JPEG_MAX_COMPONENTS];
} JPEGRawFrameHeader;
typedef struct JPEGRawScanHeader {
uint16_t Ls;
uint8_t Ns;
uint8_t Cs[JPEG_MAX_COMPONENTS];
uint8_t Td[JPEG_MAX_COMPONENTS];
uint8_t Ta[JPEG_MAX_COMPONENTS];
uint8_t Ss;
uint8_t Se;
uint8_t Ah;
uint8_t Al;
} JPEGRawScanHeader;
typedef struct JPEGRawScan {
JPEGRawScanHeader header;
uint8_t *data;
AVBufferRef *data_ref;
size_t data_size;
} JPEGRawScan;
typedef struct JPEGRawQuantisationTable {
uint8_t Pq;
uint8_t Tq;
uint16_t Q[64];
} JPEGRawQuantisationTable;
typedef struct JPEGRawQuantisationTableSpecification {
uint16_t Lq;
JPEGRawQuantisationTable table[4];
} JPEGRawQuantisationTableSpecification;
typedef struct JPEGRawHuffmanTable {
uint8_t Tc;
uint8_t Th;
uint8_t L[16];
uint8_t V[224];
} JPEGRawHuffmanTable;
typedef struct JPEGRawHuffmanTableSpecification {
uint16_t Lh;
JPEGRawHuffmanTable table[8];
} JPEGRawHuffmanTableSpecification;
typedef struct JPEGRawApplicationData {
uint16_t Lp;
uint8_t *Ap;
AVBufferRef *Ap_ref;
} JPEGRawApplicationData;
typedef struct JPEGRawComment {
uint16_t Lc;
uint8_t *Cm;
AVBufferRef *Cm_ref;
} JPEGRawComment;
#endif /* AVCODEC_CBS_JPEG_H */

View File

@ -1,231 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_CBS_MPEG2_H
#define AVCODEC_CBS_MPEG2_H
#include <stddef.h>
#include <stdint.h>
#include <libavutil/buffer.h>
enum {
MPEG2_START_PICTURE = 0x00,
MPEG2_START_SLICE_MIN = 0x01,
MPEG2_START_SLICE_MAX = 0xaf,
MPEG2_START_USER_DATA = 0xb2,
MPEG2_START_SEQUENCE_HEADER = 0xb3,
MPEG2_START_SEQUENCE_ERROR = 0xb4,
MPEG2_START_EXTENSION = 0xb5,
MPEG2_START_SEQUENCE_END = 0xb7,
MPEG2_START_GROUP = 0xb8,
};
#define MPEG2_START_IS_SLICE(type) \
((type) >= MPEG2_START_SLICE_MIN && \
(type) <= MPEG2_START_SLICE_MAX)
enum {
MPEG2_EXTENSION_SEQUENCE = 0x1,
MPEG2_EXTENSION_SEQUENCE_DISPLAY = 0x2,
MPEG2_EXTENSION_QUANT_MATRIX = 0x3,
MPEG2_EXTENSION_COPYRIGHT = 0x4,
MPEG2_EXTENSION_SEQUENCE_SCALABLE = 0x5,
MPEG2_EXTENSION_PICTURE_DISPLAY = 0x7,
MPEG2_EXTENSION_PICTURE_CODING = 0x8,
MPEG2_EXTENSION_PICTURE_SPATIAL_SCALABLE = 0x9,
MPEG2_EXTENSION_PICTURE_TEMPORAL_SCALABLE = 0xa,
MPEG2_EXTENSION_CAMERA_PARAMETERS = 0xb,
MPEG2_EXTENSION_ITU_T = 0xc,
};
typedef struct MPEG2RawSequenceHeader {
uint8_t sequence_header_code;
uint16_t horizontal_size_value;
uint16_t vertical_size_value;
uint8_t aspect_ratio_information;
uint8_t frame_rate_code;
uint32_t bit_rate_value;
uint16_t vbv_buffer_size_value;
uint8_t constrained_parameters_flag;
uint8_t load_intra_quantiser_matrix;
uint8_t intra_quantiser_matrix[64];
uint8_t load_non_intra_quantiser_matrix;
uint8_t non_intra_quantiser_matrix[64];
} MPEG2RawSequenceHeader;
typedef struct MPEG2RawUserData {
uint8_t user_data_start_code;
uint8_t *user_data;
AVBufferRef *user_data_ref;
size_t user_data_length;
} MPEG2RawUserData;
typedef struct MPEG2RawSequenceExtension {
uint8_t profile_and_level_indication;
uint8_t progressive_sequence;
uint8_t chroma_format;
uint8_t horizontal_size_extension;
uint8_t vertical_size_extension;
uint16_t bit_rate_extension;
uint8_t vbv_buffer_size_extension;
uint8_t low_delay;
uint8_t frame_rate_extension_n;
uint8_t frame_rate_extension_d;
} MPEG2RawSequenceExtension;
typedef struct MPEG2RawSequenceDisplayExtension {
uint8_t video_format;
uint8_t colour_description;
uint8_t colour_primaries;
uint8_t transfer_characteristics;
uint8_t matrix_coefficients;
uint16_t display_horizontal_size;
uint16_t display_vertical_size;
} MPEG2RawSequenceDisplayExtension;
typedef struct MPEG2RawGroupOfPicturesHeader {
uint8_t group_start_code;
uint32_t time_code;
uint8_t closed_gop;
uint8_t broken_link;
} MPEG2RawGroupOfPicturesHeader;
typedef struct MPEG2RawExtraInformation {
uint8_t *extra_information;
AVBufferRef *extra_information_ref;
size_t extra_information_length;
} MPEG2RawExtraInformation;
typedef struct MPEG2RawPictureHeader {
uint8_t picture_start_code;
uint16_t temporal_reference;
uint8_t picture_coding_type;
uint16_t vbv_delay;
uint8_t full_pel_forward_vector;
uint8_t forward_f_code;
uint8_t full_pel_backward_vector;
uint8_t backward_f_code;
MPEG2RawExtraInformation extra_information_picture;
} MPEG2RawPictureHeader;
typedef struct MPEG2RawPictureCodingExtension {
uint8_t f_code[2][2];
uint8_t intra_dc_precision;
uint8_t picture_structure;
uint8_t top_field_first;
uint8_t frame_pred_frame_dct;
uint8_t concealment_motion_vectors;
uint8_t q_scale_type;
uint8_t intra_vlc_format;
uint8_t alternate_scan;
uint8_t repeat_first_field;
uint8_t chroma_420_type;
uint8_t progressive_frame;
uint8_t composite_display_flag;
uint8_t v_axis;
uint8_t field_sequence;
uint8_t sub_carrier;
uint8_t burst_amplitude;
uint8_t sub_carrier_phase;
} MPEG2RawPictureCodingExtension;
typedef struct MPEG2RawQuantMatrixExtension {
uint8_t load_intra_quantiser_matrix;
uint8_t intra_quantiser_matrix[64];
uint8_t load_non_intra_quantiser_matrix;
uint8_t non_intra_quantiser_matrix[64];
uint8_t load_chroma_intra_quantiser_matrix;
uint8_t chroma_intra_quantiser_matrix[64];
uint8_t load_chroma_non_intra_quantiser_matrix;
uint8_t chroma_non_intra_quantiser_matrix[64];
} MPEG2RawQuantMatrixExtension;
typedef struct MPEG2RawPictureDisplayExtension {
int16_t frame_centre_horizontal_offset[3];
int16_t frame_centre_vertical_offset[3];
} MPEG2RawPictureDisplayExtension;
typedef struct MPEG2RawExtensionData {
uint8_t extension_start_code;
uint8_t extension_start_code_identifier;
union {
MPEG2RawSequenceExtension sequence;
MPEG2RawSequenceDisplayExtension sequence_display;
MPEG2RawQuantMatrixExtension quant_matrix;
MPEG2RawPictureCodingExtension picture_coding;
MPEG2RawPictureDisplayExtension picture_display;
} data;
} MPEG2RawExtensionData;
typedef struct MPEG2RawSliceHeader {
uint8_t slice_vertical_position;
uint8_t slice_vertical_position_extension;
uint8_t priority_breakpoint;
uint8_t quantiser_scale_code;
uint8_t slice_extension_flag;
uint8_t intra_slice;
uint8_t slice_picture_id_enable;
uint8_t slice_picture_id;
MPEG2RawExtraInformation extra_information_slice;
} MPEG2RawSliceHeader;
typedef struct MPEG2RawSlice {
MPEG2RawSliceHeader header;
uint8_t *data;
AVBufferRef *data_ref;
size_t data_size;
int data_bit_start;
} MPEG2RawSlice;
typedef struct MPEG2RawSequenceEnd {
uint8_t sequence_end_code;
} MPEG2RawSequenceEnd;
typedef struct CodedBitstreamMPEG2Context {
// Elements stored in headers which are required for other decoding.
uint16_t horizontal_size;
uint16_t vertical_size;
uint8_t scalable;
uint8_t scalable_mode;
uint8_t progressive_sequence;
uint8_t number_of_frame_centre_offsets;
} CodedBitstreamMPEG2Context;
#endif /* AVCODEC_CBS_MPEG2_H */

View File

@ -1,200 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_CBS_SEI_H
#define AVCODEC_CBS_SEI_H
#include <stddef.h>
#include <stdint.h>
#include <libavutil/buffer.h>
#include "cbs.h"
#include "sei.h"
typedef struct SEIRawFillerPayload {
uint32_t payload_size;
} SEIRawFillerPayload;
typedef struct SEIRawUserDataRegistered {
uint8_t itu_t_t35_country_code;
uint8_t itu_t_t35_country_code_extension_byte;
uint8_t *data;
AVBufferRef *data_ref;
size_t data_length;
} SEIRawUserDataRegistered;
typedef struct SEIRawUserDataUnregistered {
uint8_t uuid_iso_iec_11578[16];
uint8_t *data;
AVBufferRef *data_ref;
size_t data_length;
} SEIRawUserDataUnregistered;
typedef struct SEIRawMasteringDisplayColourVolume {
uint16_t display_primaries_x[3];
uint16_t display_primaries_y[3];
uint16_t white_point_x;
uint16_t white_point_y;
uint32_t max_display_mastering_luminance;
uint32_t min_display_mastering_luminance;
} SEIRawMasteringDisplayColourVolume;
typedef struct SEIRawContentLightLevelInfo {
uint16_t max_content_light_level;
uint16_t max_pic_average_light_level;
} SEIRawContentLightLevelInfo;
typedef struct SEIRawAlternativeTransferCharacteristics {
uint8_t preferred_transfer_characteristics;
} SEIRawAlternativeTransferCharacteristics;
typedef struct SEIRawMessage {
uint32_t payload_type;
uint32_t payload_size;
void *payload;
AVBufferRef *payload_ref;
uint8_t *extension_data;
AVBufferRef *extension_data_ref;
size_t extension_bit_length;
} SEIRawMessage;
typedef struct SEIRawMessageList {
SEIRawMessage *messages;
int nb_messages;
int nb_messages_allocated;
} SEIRawMessageList;
typedef struct SEIMessageState {
// The type of the payload being written.
uint32_t payload_type;
// When reading, contains the size of the payload to allow finding the
// end of variable-length fields (such as user_data_payload_byte[]).
// (When writing, the size will be derived from the total number of
// bytes actually written.)
uint32_t payload_size;
// When writing, indicates that payload extension data is present so
// all extended fields must be written. May be updated by the writer
// to indicate that extended fields have been written, so the extension
// end bits must be written too.
uint8_t extension_present;
} SEIMessageState;
struct GetBitContext;
struct PutBitContext;
typedef int (*SEIMessageReadFunction)(CodedBitstreamContext *ctx,
struct GetBitContext *rw,
void *current,
SEIMessageState *sei);
typedef int (*SEIMessageWriteFunction)(CodedBitstreamContext *ctx,
struct PutBitContext *rw,
void *current,
SEIMessageState *sei);
typedef struct SEIMessageTypeDescriptor {
// Payload type for the message. (-1 in this field ends a list.)
int type;
// Valid in a prefix SEI NAL unit (always for H.264).
uint8_t prefix;
// Valid in a suffix SEI NAL unit (never for H.264).
uint8_t suffix;
// Size of the decomposed structure.
size_t size;
// Read bitstream into SEI message.
SEIMessageReadFunction read;
// Write bitstream from SEI message.
SEIMessageWriteFunction write;
} SEIMessageTypeDescriptor;
// Macro for the read/write pair. The clumsy cast is needed because the
// current pointer is typed in all of the read/write functions but has to
// be void here to fit all cases.
#define SEI_MESSAGE_RW(codec, name) \
.read = (SEIMessageReadFunction)cbs_##codec##_read_##name, \
.write = (SEIMessageWriteFunction)cbs_##codec##_write_##name
// End-of-list sentinel element.
#define SEI_MESSAGE_TYPE_END \
{ .type = -1 }
/**
* Find the type descriptor for the given payload type.
*
* Returns NULL if the payload type is not known.
*/
const SEIMessageTypeDescriptor *ff_cbs_sei_find_type(CodedBitstreamContext *ctx,
int payload_type);
/**
* Allocate a new payload for the given SEI message.
*/
int ff_cbs_sei_alloc_message_payload(SEIRawMessage *message,
const SEIMessageTypeDescriptor *desc);
/**
* Allocate a new empty SEI message in a message list.
*
* The new message is in place nb_messages - 1.
*/
int ff_cbs_sei_list_add(SEIRawMessageList *list);
/**
* Free all SEI messages in a message list.
*/
void ff_cbs_sei_free_message_list(SEIRawMessageList *list);
/**
* Add an SEI message to an access unit.
*
* Will add to an existing SEI NAL unit, or create a new one for the
* message if there is no suitable existing one.
*
* Takes a new reference to payload_buf, if set. If payload_buf is
* NULL then the new message will not be reference counted.
*/
int ff_cbs_sei_add_message(CodedBitstreamContext *ctx,
CodedBitstreamFragment *au,
int prefix,
uint32_t payload_type,
void *payload_data,
AVBufferRef *payload_buf);
/**
* Iterate over messages with the given payload type in an access unit.
*
* Set message to NULL in the first call. Returns 0 while more messages
* are available, AVERROR(ENOENT) when all messages have been found.
*/
int ff_cbs_sei_find_message(CodedBitstreamContext *ctx,
CodedBitstreamFragment *au,
uint32_t payload_type,
SEIRawMessage **message);
/**
* Delete all messages with the given payload type from an access unit.
*/
void ff_cbs_sei_delete_message_type(CodedBitstreamContext *ctx,
CodedBitstreamFragment *au,
uint32_t payload_type);
#endif /* AVCODEC_CBS_SEI_H */

View File

@ -1,213 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_CBS_VP9_H
#define AVCODEC_CBS_VP9_H
#include <stddef.h>
#include <stdint.h>
#include "cbs.h"
// Miscellaneous constants (section 3).
enum {
VP9_REFS_PER_FRAME = 3,
VP9_MIN_TILE_WIDTH_B64 = 4,
VP9_MAX_TILE_WIDTH_B64 = 64,
VP9_NUM_REF_FRAMES = 8,
VP9_MAX_REF_FRAMES = 4,
VP9_MAX_SEGMENTS = 8,
VP9_SEG_LVL_MAX = 4,
};
// Frame types (section 7.2).
enum {
VP9_KEY_FRAME = 0,
VP9_NON_KEY_FRAME = 1,
};
// Frame sync bytes (section 7.2.1).
enum {
VP9_FRAME_SYNC_0 = 0x49,
VP9_FRAME_SYNC_1 = 0x83,
VP9_FRAME_SYNC_2 = 0x42,
};
// Color space values (section 7.2.2).
enum {
VP9_CS_UNKNOWN = 0,
VP9_CS_BT_601 = 1,
VP9_CS_BT_709 = 2,
VP9_CS_SMPTE_170 = 3,
VP9_CS_SMPTE_240 = 4,
VP9_CS_BT_2020 = 5,
VP9_CS_RESERVED = 6,
VP9_CS_RGB = 7,
};
// Reference frame types (section 7.4.12).
enum {
VP9_INTRA_FRAME = 0,
VP9_LAST_FRAME = 1,
VP9_GOLDEN_FRAME = 2,
VP9_ALTREF_FRAME = 3,
};
// Superframe properties (section B.3).
enum {
VP9_MAX_FRAMES_IN_SUPERFRAME = 8,
VP9_SUPERFRAME_MARKER = 6,
};
typedef struct VP9RawFrameHeader {
uint8_t frame_marker;
uint8_t profile_low_bit;
uint8_t profile_high_bit;
uint8_t show_existing_frame;
uint8_t frame_to_show_map_idx;
uint8_t frame_type;
uint8_t show_frame;
uint8_t error_resilient_mode;
// Color config.
uint8_t ten_or_twelve_bit;
uint8_t color_space;
uint8_t color_range;
uint8_t subsampling_x;
uint8_t subsampling_y;
uint8_t refresh_frame_flags;
uint8_t intra_only;
uint8_t reset_frame_context;
uint8_t ref_frame_idx[VP9_REFS_PER_FRAME];
uint8_t ref_frame_sign_bias[VP9_MAX_REF_FRAMES];
uint8_t allow_high_precision_mv;
uint8_t refresh_frame_context;
uint8_t frame_parallel_decoding_mode;
uint8_t frame_context_idx;
// Frame/render size.
uint8_t found_ref[VP9_REFS_PER_FRAME];
uint16_t frame_width_minus_1;
uint16_t frame_height_minus_1;
uint8_t render_and_frame_size_different;
uint16_t render_width_minus_1;
uint16_t render_height_minus_1;
// Interpolation filter.
uint8_t is_filter_switchable;
uint8_t raw_interpolation_filter_type;
// Loop filter params.
uint8_t loop_filter_level;
uint8_t loop_filter_sharpness;
uint8_t loop_filter_delta_enabled;
uint8_t loop_filter_delta_update;
uint8_t update_ref_delta[VP9_MAX_REF_FRAMES];
int8_t loop_filter_ref_deltas[VP9_MAX_REF_FRAMES];
uint8_t update_mode_delta[2];
int8_t loop_filter_mode_deltas[2];
// Quantization params.
uint8_t base_q_idx;
int8_t delta_q_y_dc;
int8_t delta_q_uv_dc;
int8_t delta_q_uv_ac;
// Segmentation params.
uint8_t segmentation_enabled;
uint8_t segmentation_update_map;
uint8_t segmentation_tree_probs[7];
uint8_t segmentation_temporal_update;
uint8_t segmentation_pred_prob[3];
uint8_t segmentation_update_data;
uint8_t segmentation_abs_or_delta_update;
uint8_t feature_enabled[VP9_MAX_SEGMENTS][VP9_SEG_LVL_MAX];
uint8_t feature_value[VP9_MAX_SEGMENTS][VP9_SEG_LVL_MAX];
uint8_t feature_sign[VP9_MAX_SEGMENTS][VP9_SEG_LVL_MAX];
// Tile info.
uint8_t tile_cols_log2;
uint8_t tile_rows_log2;
uint16_t header_size_in_bytes;
} VP9RawFrameHeader;
typedef struct VP9RawFrame {
VP9RawFrameHeader header;
uint8_t *data;
AVBufferRef *data_ref;
size_t data_size;
} VP9RawFrame;
typedef struct VP9RawSuperframeIndex {
uint8_t superframe_marker;
uint8_t bytes_per_framesize_minus_1;
uint8_t frames_in_superframe_minus_1;
uint32_t frame_sizes[VP9_MAX_FRAMES_IN_SUPERFRAME];
} VP9RawSuperframeIndex;
typedef struct VP9RawSuperframe {
VP9RawFrame frames[VP9_MAX_FRAMES_IN_SUPERFRAME];
VP9RawSuperframeIndex index;
} VP9RawSuperframe;
typedef struct VP9ReferenceFrameState {
int frame_width; // RefFrameWidth
int frame_height; // RefFrameHeight
int subsampling_x; // RefSubsamplingX
int subsampling_y; // RefSubsamplingY
int bit_depth; // RefBitDepth
} VP9ReferenceFrameState;
typedef struct CodedBitstreamVP9Context {
int profile;
// Frame dimensions in 8x8 mode info blocks.
uint16_t mi_cols;
uint16_t mi_rows;
// Frame dimensions in 64x64 superblocks.
uint16_t sb64_cols;
uint16_t sb64_rows;
int frame_width;
int frame_height;
uint8_t subsampling_x;
uint8_t subsampling_y;
int bit_depth;
VP9ReferenceFrameState ref[VP9_NUM_REF_FRAMES];
} CodedBitstreamVP9Context;
#endif /* AVCODEC_CBS_VP9_H */

View File

@ -1,113 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* H.264 common definitions
*/
#ifndef AVCODEC_H264_H
#define AVCODEC_H264_H
#define QP_MAX_NUM (51 + 6 * 6) // The maximum supported qp
/*
* Table 7-1 NAL unit type codes, syntax element categories, and NAL unit type classes in
* T-REC-H.264-201704
*/
enum {
H264_NAL_UNSPECIFIED = 0,
H264_NAL_SLICE = 1,
H264_NAL_DPA = 2,
H264_NAL_DPB = 3,
H264_NAL_DPC = 4,
H264_NAL_IDR_SLICE = 5,
H264_NAL_SEI = 6,
H264_NAL_SPS = 7,
H264_NAL_PPS = 8,
H264_NAL_AUD = 9,
H264_NAL_END_SEQUENCE = 10,
H264_NAL_END_STREAM = 11,
H264_NAL_FILLER_DATA = 12,
H264_NAL_SPS_EXT = 13,
H264_NAL_PREFIX = 14,
H264_NAL_SUB_SPS = 15,
H264_NAL_DPS = 16,
H264_NAL_RESERVED17 = 17,
H264_NAL_RESERVED18 = 18,
H264_NAL_AUXILIARY_SLICE = 19,
H264_NAL_EXTEN_SLICE = 20,
H264_NAL_DEPTH_EXTEN_SLICE = 21,
H264_NAL_RESERVED22 = 22,
H264_NAL_RESERVED23 = 23,
H264_NAL_UNSPECIFIED24 = 24,
H264_NAL_UNSPECIFIED25 = 25,
H264_NAL_UNSPECIFIED26 = 26,
H264_NAL_UNSPECIFIED27 = 27,
H264_NAL_UNSPECIFIED28 = 28,
H264_NAL_UNSPECIFIED29 = 29,
H264_NAL_UNSPECIFIED30 = 30,
H264_NAL_UNSPECIFIED31 = 31,
};
enum {
// 7.4.2.1.1: seq_parameter_set_id is in [0, 31].
H264_MAX_SPS_COUNT = 32,
// 7.4.2.2: pic_parameter_set_id is in [0, 255].
H264_MAX_PPS_COUNT = 256,
// A.3: MaxDpbFrames is bounded above by 16.
H264_MAX_DPB_FRAMES = 16,
// 7.4.2.1.1: max_num_ref_frames is in [0, MaxDpbFrames], and
// each reference frame can have two fields.
H264_MAX_REFS = 2 * H264_MAX_DPB_FRAMES,
// 7.4.3.1: modification_of_pic_nums_idc is not equal to 3 at most
// num_ref_idx_lN_active_minus1 + 1 times (that is, once for each
// possible reference), then equal to 3 once.
H264_MAX_RPLM_COUNT = H264_MAX_REFS + 1,
// 7.4.3.3: in the worst case, we begin with a full short-term
// reference picture list. Each picture in turn is moved to the
// long-term list (type 3) and then discarded from there (type 2).
// Then, we set the length of the long-term list (type 4), mark
// the current picture as long-term (type 6) and terminate the
// process (type 0).
H264_MAX_MMCO_COUNT = H264_MAX_REFS * 2 + 3,
// A.2.1, A.2.3: profiles supporting FMO constrain
// num_slice_groups_minus1 to be in [0, 7].
H264_MAX_SLICE_GROUPS = 8,
// E.2.2: cpb_cnt_minus1 is in [0, 31].
H264_MAX_CPB_CNT = 32,
// A.3: in table A-1 the highest level allows a MaxFS of 139264.
H264_MAX_MB_PIC_SIZE = 139264,
// A.3.1, A.3.2: PicWidthInMbs and PicHeightInMbs are constrained
// to be not greater than sqrt(MaxFS * 8). Hence height/width are
// bounded above by sqrt(139264 * 8) = 1055.5 macroblocks.
H264_MAX_MB_WIDTH = 1055,
H264_MAX_MB_HEIGHT = 1055,
H264_MAX_WIDTH = H264_MAX_MB_WIDTH * 16,
H264_MAX_HEIGHT = H264_MAX_MB_HEIGHT * 16,
};
#endif /* AVCODEC_H264_H */

View File

@ -1,152 +0,0 @@
/*
* H.264/HEVC common parsing code
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_H2645_PARSE_H
#define AVCODEC_H2645_PARSE_H
#include <stdint.h>
#include <libavcodec/avcodec.h>
#include <libavutil/buffer.h>
/**
* CACHED_BITSTREAM_READER can only be true if it's used by a decoder
* Thus, Sunshine doesn't need to worry about it
*/
typedef struct GetBitContext {
const uint8_t *buffer, *buffer_end;
#if CACHED_BITSTREAM_READER
uint64_t cache;
unsigned bits_left;
#endif
int index;
int size_in_bits;
int size_in_bits_plus8;
} GetBitContext;
#define MAX_MBPAIR_SIZE (256 * 1024) // a tighter bound could be calculated if someone cares about a few bytes
typedef struct H2645NAL {
uint8_t *rbsp_buffer;
int size;
const uint8_t *data;
/**
* Size, in bits, of just the data, excluding the stop bit and any trailing
* padding. I.e. what HEVC calls SODB.
*/
int size_bits;
int raw_size;
const uint8_t *raw_data;
GetBitContext gb;
/**
* NAL unit type
*/
int type;
/**
* HEVC only, nuh_temporal_id_plus_1 - 1
*/
int temporal_id;
/*
* HEVC only, identifier of layer to which nal unit belongs
*/
int nuh_layer_id;
int skipped_bytes;
int skipped_bytes_pos_size;
int *skipped_bytes_pos;
/**
* H.264 only, nal_ref_idc
*/
int ref_idc;
} H2645NAL;
typedef struct H2645RBSP {
uint8_t *rbsp_buffer;
AVBufferRef *rbsp_buffer_ref;
int rbsp_buffer_alloc_size;
int rbsp_buffer_size;
} H2645RBSP;
/* an input packet split into unescaped NAL units */
typedef struct H2645Packet {
H2645NAL *nals;
H2645RBSP rbsp;
int nb_nals;
int nals_allocated;
unsigned nal_buffer_size;
} H2645Packet;
/**
* Extract the raw (unescaped) bitstream.
*/
int ff_h2645_extract_rbsp(const uint8_t *src, int length, H2645RBSP *rbsp,
H2645NAL *nal, int small_padding);
/**
* Split an input packet into NAL units.
*
* If data == raw_data holds true for a NAL unit of the returned pkt, then
* said NAL unit does not contain any emulation_prevention_three_byte and
* the data is contained in the input buffer pointed to by buf.
* Otherwise, the unescaped data is part of the rbsp_buffer described by the
* packet's H2645RBSP.
*
* If the packet's rbsp_buffer_ref is not NULL, the underlying AVBuffer must
* own rbsp_buffer. If not and rbsp_buffer is not NULL, use_ref must be 0.
* If use_ref is set, rbsp_buffer will be reference-counted and owned by
* the underlying AVBuffer of rbsp_buffer_ref.
*/
int ff_h2645_packet_split(H2645Packet *pkt, const uint8_t *buf, int length,
void *logctx, int is_nalff, int nal_length_size,
enum AVCodecID codec_id, int small_padding, int use_ref);
/**
* Free all the allocated memory in the packet.
*/
void ff_h2645_packet_uninit(H2645Packet *pkt);
static inline int get_nalsize(int nal_length_size, const uint8_t *buf,
int buf_size, int *buf_index, void *logctx) {
int i, nalsize = 0;
if(*buf_index >= buf_size - nal_length_size) {
// the end of the buffer is reached, refill it
return AVERROR(EAGAIN);
}
for(i = 0; i < nal_length_size; i++)
nalsize = ((unsigned)nalsize << 8) | buf[(*buf_index)++];
if(nalsize <= 0 || nalsize > buf_size - *buf_index) {
av_log(logctx, AV_LOG_ERROR,
"Invalid NAL unit size (%d > %d).\n", nalsize, buf_size - *buf_index);
return AVERROR_INVALIDDATA;
}
return nalsize;
}
#endif /* AVCODEC_H2645_PARSE_H */

View File

@ -1,160 +0,0 @@
/*
* HEVC shared code
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_HEVC_H
#define AVCODEC_HEVC_H
/**
* Table 7-1 NAL unit type codes and NAL unit type classes in
* T-REC-H.265-201802
*/
enum HEVCNALUnitType {
HEVC_NAL_TRAIL_N = 0,
HEVC_NAL_TRAIL_R = 1,
HEVC_NAL_TSA_N = 2,
HEVC_NAL_TSA_R = 3,
HEVC_NAL_STSA_N = 4,
HEVC_NAL_STSA_R = 5,
HEVC_NAL_RADL_N = 6,
HEVC_NAL_RADL_R = 7,
HEVC_NAL_RASL_N = 8,
HEVC_NAL_RASL_R = 9,
HEVC_NAL_VCL_N10 = 10,
HEVC_NAL_VCL_R11 = 11,
HEVC_NAL_VCL_N12 = 12,
HEVC_NAL_VCL_R13 = 13,
HEVC_NAL_VCL_N14 = 14,
HEVC_NAL_VCL_R15 = 15,
HEVC_NAL_BLA_W_LP = 16,
HEVC_NAL_BLA_W_RADL = 17,
HEVC_NAL_BLA_N_LP = 18,
HEVC_NAL_IDR_W_RADL = 19,
HEVC_NAL_IDR_N_LP = 20,
HEVC_NAL_CRA_NUT = 21,
HEVC_NAL_RSV_IRAP_VCL22 = 22,
HEVC_NAL_RSV_IRAP_VCL23 = 23,
HEVC_NAL_RSV_VCL24 = 24,
HEVC_NAL_RSV_VCL25 = 25,
HEVC_NAL_RSV_VCL26 = 26,
HEVC_NAL_RSV_VCL27 = 27,
HEVC_NAL_RSV_VCL28 = 28,
HEVC_NAL_RSV_VCL29 = 29,
HEVC_NAL_RSV_VCL30 = 30,
HEVC_NAL_RSV_VCL31 = 31,
HEVC_NAL_VPS = 32,
HEVC_NAL_SPS = 33,
HEVC_NAL_PPS = 34,
HEVC_NAL_AUD = 35,
HEVC_NAL_EOS_NUT = 36,
HEVC_NAL_EOB_NUT = 37,
HEVC_NAL_FD_NUT = 38,
HEVC_NAL_SEI_PREFIX = 39,
HEVC_NAL_SEI_SUFFIX = 40,
HEVC_NAL_RSV_NVCL41 = 41,
HEVC_NAL_RSV_NVCL42 = 42,
HEVC_NAL_RSV_NVCL43 = 43,
HEVC_NAL_RSV_NVCL44 = 44,
HEVC_NAL_RSV_NVCL45 = 45,
HEVC_NAL_RSV_NVCL46 = 46,
HEVC_NAL_RSV_NVCL47 = 47,
HEVC_NAL_UNSPEC48 = 48,
HEVC_NAL_UNSPEC49 = 49,
HEVC_NAL_UNSPEC50 = 50,
HEVC_NAL_UNSPEC51 = 51,
HEVC_NAL_UNSPEC52 = 52,
HEVC_NAL_UNSPEC53 = 53,
HEVC_NAL_UNSPEC54 = 54,
HEVC_NAL_UNSPEC55 = 55,
HEVC_NAL_UNSPEC56 = 56,
HEVC_NAL_UNSPEC57 = 57,
HEVC_NAL_UNSPEC58 = 58,
HEVC_NAL_UNSPEC59 = 59,
HEVC_NAL_UNSPEC60 = 60,
HEVC_NAL_UNSPEC61 = 61,
HEVC_NAL_UNSPEC62 = 62,
HEVC_NAL_UNSPEC63 = 63,
};
enum HEVCSliceType {
HEVC_SLICE_B = 0,
HEVC_SLICE_P = 1,
HEVC_SLICE_I = 2,
};
enum {
// 7.4.3.1: vps_max_layers_minus1 is in [0, 62].
HEVC_MAX_LAYERS = 63,
// 7.4.3.1: vps_max_sub_layers_minus1 is in [0, 6].
HEVC_MAX_SUB_LAYERS = 7,
// 7.4.3.1: vps_num_layer_sets_minus1 is in [0, 1023].
HEVC_MAX_LAYER_SETS = 1024,
// 7.4.2.1: vps_video_parameter_set_id is u(4).
HEVC_MAX_VPS_COUNT = 16,
// 7.4.3.2.1: sps_seq_parameter_set_id is in [0, 15].
HEVC_MAX_SPS_COUNT = 16,
// 7.4.3.3.1: pps_pic_parameter_set_id is in [0, 63].
HEVC_MAX_PPS_COUNT = 64,
// A.4.2: MaxDpbSize is bounded above by 16.
HEVC_MAX_DPB_SIZE = 16,
// 7.4.3.1: vps_max_dec_pic_buffering_minus1[i] is in [0, MaxDpbSize - 1].
HEVC_MAX_REFS = HEVC_MAX_DPB_SIZE,
// 7.4.3.2.1: num_short_term_ref_pic_sets is in [0, 64].
HEVC_MAX_SHORT_TERM_REF_PIC_SETS = 64,
// 7.4.3.2.1: num_long_term_ref_pics_sps is in [0, 32].
HEVC_MAX_LONG_TERM_REF_PICS = 32,
// A.3: all profiles require that CtbLog2SizeY is in [4, 6].
HEVC_MIN_LOG2_CTB_SIZE = 4,
HEVC_MAX_LOG2_CTB_SIZE = 6,
// E.3.2: cpb_cnt_minus1[i] is in [0, 31].
HEVC_MAX_CPB_CNT = 32,
// A.4.1: in table A.6 the highest level allows a MaxLumaPs of 35 651 584.
HEVC_MAX_LUMA_PS = 35651584,
// A.4.1: pic_width_in_luma_samples and pic_height_in_luma_samples are
// constrained to be not greater than sqrt(MaxLumaPs * 8). Hence height/
// width are bounded above by sqrt(8 * 35651584) = 16888.2 samples.
HEVC_MAX_WIDTH = 16888,
HEVC_MAX_HEIGHT = 16888,
// A.4.1: table A.6 allows at most 22 tile rows for any level.
HEVC_MAX_TILE_ROWS = 22,
// A.4.1: table A.6 allows at most 20 tile columns for any level.
HEVC_MAX_TILE_COLUMNS = 20,
// A.4.2: table A.6 allows at most 600 slice segments for any level.
HEVC_MAX_SLICE_SEGMENTS = 600,
// 7.4.7.1: in the worst case (tiles_enabled_flag and
// entropy_coding_sync_enabled_flag are both set), entry points can be
// placed at the beginning of every Ctb row in every tile, giving an
// upper bound of (num_tile_columns_minus1 + 1) * PicHeightInCtbsY - 1.
// Only a stream with very high resolution and perverse parameters could
// get near that, though, so set a lower limit here with the maximum
// possible value for 4K video (at most 135 16x16 Ctb rows).
HEVC_MAX_ENTRY_POINT_OFFSETS = HEVC_MAX_TILE_COLUMNS * 135,
};
#endif /* AVCODEC_HEVC_H */

View File

@ -1,140 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_SEI_H
#define AVCODEC_SEI_H
// SEI payload types form a common namespace between the H.264, H.265
// and H.266 standards. A given payload type always has the same
// meaning, but some names have different payload types in different
// standards (e.g. scalable-nesting is 30 in H.264 but 133 in H.265).
// The content of the payload data depends on the standard, though
// many generic parts have the same interpretation everywhere (such as
// mastering-display-colour-volume and user-data-unregistered).
enum {
SEI_TYPE_BUFFERING_PERIOD = 0,
SEI_TYPE_PIC_TIMING = 1,
SEI_TYPE_PAN_SCAN_RECT = 2,
SEI_TYPE_FILLER_PAYLOAD = 3,
SEI_TYPE_USER_DATA_REGISTERED_ITU_T_T35 = 4,
SEI_TYPE_USER_DATA_UNREGISTERED = 5,
SEI_TYPE_RECOVERY_POINT = 6,
SEI_TYPE_DEC_REF_PIC_MARKING_REPETITION = 7,
SEI_TYPE_SPARE_PIC = 8,
SEI_TYPE_SCENE_INFO = 9,
SEI_TYPE_SUB_SEQ_INFO = 10,
SEI_TYPE_SUB_SEQ_LAYER_CHARACTERISTICS = 11,
SEI_TYPE_SUB_SEQ_CHARACTERISTICS = 12,
SEI_TYPE_FULL_FRAME_FREEZE = 13,
SEI_TYPE_FULL_FRAME_FREEZE_RELEASE = 14,
SEI_TYPE_FULL_FRAME_SNAPSHOT = 15,
SEI_TYPE_PROGRESSIVE_REFINEMENT_SEGMENT_START = 16,
SEI_TYPE_PROGRESSIVE_REFINEMENT_SEGMENT_END = 17,
SEI_TYPE_MOTION_CONSTRAINED_SLICE_GROUP_SET = 18,
SEI_TYPE_FILM_GRAIN_CHARACTERISTICS = 19,
SEI_TYPE_DEBLOCKING_FILTER_DISPLAY_PREFERENCE = 20,
SEI_TYPE_STEREO_VIDEO_INFO = 21,
SEI_TYPE_POST_FILTER_HINT = 22,
SEI_TYPE_TONE_MAPPING_INFO = 23,
SEI_TYPE_SCALABILITY_INFO = 24,
SEI_TYPE_SUB_PIC_SCALABLE_LAYER = 25,
SEI_TYPE_NON_REQUIRED_LAYER_REP = 26,
SEI_TYPE_PRIORITY_LAYER_INFO = 27,
SEI_TYPE_LAYERS_NOT_PRESENT_4 = 28,
SEI_TYPE_LAYER_DEPENDENCY_CHANGE = 29,
SEI_TYPE_SCALABLE_NESTING_4 = 30,
SEI_TYPE_BASE_LAYER_TEMPORAL_HRD = 31,
SEI_TYPE_QUALITY_LAYER_INTEGRITY_CHECK = 32,
SEI_TYPE_REDUNDANT_PIC_PROPERTY = 33,
SEI_TYPE_TL0_DEP_REP_INDEX = 34,
SEI_TYPE_TL_SWITCHING_POINT = 35,
SEI_TYPE_PARALLEL_DECODING_INFO = 36,
SEI_TYPE_MVC_SCALABLE_NESTING = 37,
SEI_TYPE_VIEW_SCALABILITY_INFO = 38,
SEI_TYPE_MULTIVIEW_SCENE_INFO_4 = 39,
SEI_TYPE_MULTIVIEW_ACQUISITION_INFO_4 = 40,
SEI_TYPE_NON_REQUIRED_VIEW_COMPONENT = 41,
SEI_TYPE_VIEW_DEPENDENCY_CHANGE = 42,
SEI_TYPE_OPERATION_POINTS_NOT_PRESENT = 43,
SEI_TYPE_BASE_VIEW_TEMPORAL_HRD = 44,
SEI_TYPE_FRAME_PACKING_ARRANGEMENT = 45,
SEI_TYPE_MULTIVIEW_VIEW_POSITION_4 = 46,
SEI_TYPE_DISPLAY_ORIENTATION = 47,
SEI_TYPE_MVCD_SCALABLE_NESTING = 48,
SEI_TYPE_MVCD_VIEW_SCALABILITY_INFO = 49,
SEI_TYPE_DEPTH_REPRESENTATION_INFO_4 = 50,
SEI_TYPE_THREE_DIMENSIONAL_REFERENCE_DISPLAYS_INFO_4 = 51,
SEI_TYPE_DEPTH_TIMING = 52,
SEI_TYPE_DEPTH_SAMPLING_INFO = 53,
SEI_TYPE_CONSTRAINED_DEPTH_PARAMETER_SET_IDENTIFIER = 54,
SEI_TYPE_GREEN_METADATA = 56,
SEI_TYPE_STRUCTURE_OF_PICTURES_INFO = 128,
SEI_TYPE_ACTIVE_PARAMETER_SETS = 129,
SEI_TYPE_PARAMETER_SETS_INCLUSION_INDICATION = SEI_TYPE_ACTIVE_PARAMETER_SETS,
SEI_TYPE_DECODING_UNIT_INFO = 130,
SEI_TYPE_TEMPORAL_SUB_LAYER_ZERO_IDX = 131,
SEI_TYPE_DECODED_PICTURE_HASH = 132,
SEI_TYPE_SCALABLE_NESTING_5 = 133,
SEI_TYPE_REGION_REFRESH_INFO = 134,
SEI_TYPE_NO_DISPLAY = 135,
SEI_TYPE_TIME_CODE = 136,
SEI_TYPE_MASTERING_DISPLAY_COLOUR_VOLUME = 137,
SEI_TYPE_SEGMENTED_RECT_FRAME_PACKING_ARRANGEMENT = 138,
SEI_TYPE_TEMPORAL_MOTION_CONSTRAINED_TILE_SETS = 139,
SEI_TYPE_CHROMA_RESAMPLING_FILTER_HINT = 140,
SEI_TYPE_KNEE_FUNCTION_INFO = 141,
SEI_TYPE_COLOUR_REMAPPING_INFO = 142,
SEI_TYPE_DEINTERLACED_FIELD_IDENTIFICATION = 143,
SEI_TYPE_CONTENT_LIGHT_LEVEL_INFO = 144,
SEI_TYPE_DEPENDENT_RAP_INDICATION = 145,
SEI_TYPE_CODED_REGION_COMPLETION = 146,
SEI_TYPE_ALTERNATIVE_TRANSFER_CHARACTERISTICS = 147,
SEI_TYPE_AMBIENT_VIEWING_ENVIRONMENT = 148,
SEI_TYPE_CONTENT_COLOUR_VOLUME = 149,
SEI_TYPE_EQUIRECTANGULAR_PROJECTION = 150,
SEI_TYPE_CUBEMAP_PROJECTION = 151,
SEI_TYPE_FISHEYE_VIDEO_INFO = 152,
SEI_TYPE_SPHERE_ROTATION = 154,
SEI_TYPE_REGIONWISE_PACKING = 155,
SEI_TYPE_OMNI_VIEWPORT = 156,
SEI_TYPE_REGIONAL_NESTING = 157,
SEI_TYPE_MCTS_EXTRACTION_INFO_SETS = 158,
SEI_TYPE_MCTS_EXTRACTION_INFO_NESTING = 159,
SEI_TYPE_LAYERS_NOT_PRESENT_5 = 160,
SEI_TYPE_INTER_LAYER_CONSTRAINED_TILE_SETS = 161,
SEI_TYPE_BSP_NESTING = 162,
SEI_TYPE_BSP_INITIAL_ARRIVAL_TIME = 163,
SEI_TYPE_SUB_BITSTREAM_PROPERTY = 164,
SEI_TYPE_ALPHA_CHANNEL_INFO = 165,
SEI_TYPE_OVERLAY_INFO = 166,
SEI_TYPE_TEMPORAL_MV_PREDICTION_CONSTRAINTS = 167,
SEI_TYPE_FRAME_FIELD_INFO = 168,
SEI_TYPE_THREE_DIMENSIONAL_REFERENCE_DISPLAYS_INFO = 176,
SEI_TYPE_DEPTH_REPRESENTATION_INFO_5 = 177,
SEI_TYPE_MULTIVIEW_SCENE_INFO_5 = 178,
SEI_TYPE_MULTIVIEW_ACQUISITION_INFO_5 = 179,
SEI_TYPE_MULTIVIEW_VIEW_POSITION_5 = 180,
SEI_TYPE_ALTERNATIVE_DEPTH_INFO = 181,
SEI_TYPE_SEI_MANIFEST = 200,
SEI_TYPE_SEI_PREFIX_INDICATION = 201,
SEI_TYPE_ANNOTATED_REGIONS = 202,
SEI_TYPE_SUBPIC_LEVEL_INFO = 203,
SEI_TYPE_SAMPLE_ASPECT_RATIO_INFO = 204,
};
#endif /* AVCODEC_SEI_H */

View File

@ -1,112 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_H264_LEVELS_H
#define AVCODEC_H264_LEVELS_H
#include <stdint.h>
#include "cbs_h265.h"
typedef struct H265LevelDescriptor {
const char *name;
uint8_t level_idc;
// Table A.6.
uint32_t max_luma_ps;
uint32_t max_cpb_main;
uint32_t max_cpb_high;
uint16_t max_slice_segments_per_picture;
uint8_t max_tile_rows;
uint8_t max_tile_cols;
// Table A.7.
uint32_t max_luma_sr;
uint32_t max_br_main;
uint32_t max_br_high;
uint8_t min_cr_base_main;
uint8_t min_cr_base_high;
} H265LevelDescriptor;
typedef struct H265ProfileDescriptor {
const char *name;
uint8_t profile_idc;
uint8_t high_throughput;
// Tables A.2, A.3 and A.5.
uint8_t max_14bit;
uint8_t max_12bit;
uint8_t max_10bit;
uint8_t max_8bit;
uint8_t max_422chroma;
uint8_t max_420chroma;
uint8_t max_monochrome;
uint8_t intra;
uint8_t one_picture_only;
uint8_t lower_bit_rate;
// Table A.8.
uint16_t cpb_vcl_factor;
uint16_t cpb_nal_factor;
float format_capability_factor;
float min_cr_scale_factor;
uint8_t max_dpb_pic_buf;
} H265ProfileDescriptor;
typedef struct H264LevelDescriptor {
const char *name;
uint8_t level_idc;
uint8_t constraint_set3_flag;
uint32_t max_mbps;
uint32_t max_fs;
uint32_t max_dpb_mbs;
uint32_t max_br;
uint32_t max_cpb;
uint16_t max_v_mv_r;
uint8_t min_cr;
uint8_t max_mvs_per_2mb;
} H264LevelDescriptor;
const H265ProfileDescriptor *ff_h265_get_profile(const H265RawProfileTierLevel *ptl);
/**
* Guess the level of a stream from some parameters.
*
* Unknown parameters may be zero, in which case they are ignored.
*/
const H265LevelDescriptor *ff_h265_guess_level(const H265RawProfileTierLevel *ptl,
int64_t bitrate,
int width, int height,
int slice_segments,
int tile_rows, int tile_cols,
int max_dec_pic_buffering);
/**
* Guess the level of a stream from some parameters.
*
* Unknown parameters may be zero, in which case they are ignored.
*/
const H264LevelDescriptor *ff_h264_guess_level(int profile_idc,
int64_t bitrate,
int framerate,
int width, int height,
int max_dec_frame_buffering);
#endif /* AVCODEC_H264_LEVELS_H */

View File

@ -1,152 +0,0 @@
/*
* Copyright (c) 2010 Mans Rullgard <mans@mansr.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVUTIL_INTMATH_H
#define AVUTIL_INTMATH_H
#include <stdint.h>
#include <libavutil/attributes.h>
#if HAVE_FAST_CLZ
#if AV_GCC_VERSION_AT_LEAST(3, 4)
#ifndef ff_log2
#define ff_log2(x) (31 - __builtin_clz((x) | 1))
#ifndef ff_log2_16bit
#define ff_log2_16bit av_log2
#endif
#endif /* ff_log2 */
#endif /* AV_GCC_VERSION_AT_LEAST(3,4) */
#endif
extern const uint8_t ff_log2_tab[256];
#ifndef ff_log2
#define ff_log2 ff_log2_c
static av_always_inline av_const int ff_log2_c(unsigned int v) {
int n = 0;
if(v & 0xffff0000) {
v >>= 16;
n += 16;
}
if(v & 0xff00) {
v >>= 8;
n += 8;
}
n += ff_log2_tab[v];
return n;
}
#endif
#ifndef ff_log2_16bit
#define ff_log2_16bit ff_log2_16bit_c
static av_always_inline av_const int ff_log2_16bit_c(unsigned int v) {
int n = 0;
if(v & 0xff00) {
v >>= 8;
n += 8;
}
n += ff_log2_tab[v];
return n;
}
#endif
#define av_log2 ff_log2
#define av_log2_16bit ff_log2_16bit
/**
* @addtogroup lavu_math
* @{
*/
#if HAVE_FAST_CLZ
#if AV_GCC_VERSION_AT_LEAST(3, 4)
#ifndef ff_ctz
#define ff_ctz(v) __builtin_ctz(v)
#endif
#ifndef ff_ctzll
#define ff_ctzll(v) __builtin_ctzll(v)
#endif
#ifndef ff_clz
#define ff_clz(v) __builtin_clz(v)
#endif
#endif
#endif
#ifndef ff_ctz
#define ff_ctz ff_ctz_c
/**
* Trailing zero bit count.
*
* @param v input value. If v is 0, the result is undefined.
* @return the number of trailing 0-bits
*/
/* We use the De-Bruijn method outlined in:
* http://supertech.csail.mit.edu/papers/debruijn.pdf. */
static av_always_inline av_const int ff_ctz_c(int v) {
static const uint8_t debruijn_ctz32[32] = {
0, 1, 28, 2, 29, 14, 24, 3, 30, 22, 20, 15, 25, 17, 4, 8,
31, 27, 13, 23, 21, 19, 16, 7, 26, 12, 18, 6, 11, 5, 10, 9
};
return debruijn_ctz32[(uint32_t)((v & -v) * 0x077CB531U) >> 27];
}
#endif
#ifndef ff_ctzll
#define ff_ctzll ff_ctzll_c
/* We use the De-Bruijn method outlined in:
* http://supertech.csail.mit.edu/papers/debruijn.pdf. */
static av_always_inline av_const int ff_ctzll_c(long long v) {
static const uint8_t debruijn_ctz64[64] = {
0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12
};
return debruijn_ctz64[(uint64_t)((v & -v) * 0x022FDD63CC95386DU) >> 58];
}
#endif
#ifndef ff_clz
#define ff_clz ff_clz_c
static av_always_inline av_const unsigned ff_clz_c(unsigned x) {
unsigned i = sizeof(x) * 8;
while(x) {
x >>= 1;
i--;
}
return i;
}
#endif
#if AV_GCC_VERSION_AT_LEAST(3, 4)
#ifndef av_parity
#define av_parity __builtin_parity
#endif
#endif
/**
* @}
*/
#endif /* AVUTIL_INTMATH_H */

View File

@ -1,243 +0,0 @@
/*
* simple math operations
* Copyright (c) 2001, 2002 Fabrice Bellard
* Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_MATHOPS_H
#define AVCODEC_MATHOPS_H
#include "config.h"
#include <stdint.h>
#include <libavutil/common.h>
#define MAX_NEG_CROP 1024
extern const uint8_t ff_reverse[256];
extern const uint32_t ff_inverse[257];
extern const uint8_t ff_sqrt_tab[256];
extern const uint8_t ff_crop_tab[256 + 2 * MAX_NEG_CROP];
extern const uint8_t ff_zigzag_direct[64];
extern const uint8_t ff_zigzag_scan[16 + 1];
#ifndef MUL64
#define MUL64(a, b) ((int64_t)(a) * (int64_t)(b))
#endif
#ifndef MULL
#define MULL(a, b, s) (MUL64(a, b) >> (s))
#endif
#ifndef MULH
static av_always_inline int MULH(int a, int b) {
return MUL64(a, b) >> 32;
}
#endif
#ifndef UMULH
static av_always_inline unsigned UMULH(unsigned a, unsigned b) {
return ((uint64_t)(a) * (uint64_t)(b)) >> 32;
}
#endif
#ifndef MAC64
#define MAC64(d, a, b) ((d) += MUL64(a, b))
#endif
#ifndef MLS64
#define MLS64(d, a, b) ((d) -= MUL64(a, b))
#endif
/* signed 16x16 -> 32 multiply add accumulate */
#ifndef MAC16
#define MAC16(rt, ra, rb) rt += (ra) * (rb)
#endif
/* signed 16x16 -> 32 multiply */
#ifndef MUL16
#define MUL16(ra, rb) ((ra) * (rb))
#endif
#ifndef MLS16
#define MLS16(rt, ra, rb) ((rt) -= (ra) * (rb))
#endif
/* median of 3 */
#ifndef mid_pred
#define mid_pred mid_pred
static inline av_const int mid_pred(int a, int b, int c) {
if(a > b) {
if(c > b) {
if(c > a) b = a;
else
b = c;
}
}
else {
if(b > c) {
if(c > a) b = c;
else
b = a;
}
}
return b;
}
#endif
#ifndef median4
#define median4 median4
static inline av_const int median4(int a, int b, int c, int d) {
if(a < b) {
if(c < d) return (FFMIN(b, d) + FFMAX(a, c)) / 2;
else
return (FFMIN(b, c) + FFMAX(a, d)) / 2;
}
else {
if(c < d) return (FFMIN(a, d) + FFMAX(b, c)) / 2;
else
return (FFMIN(a, c) + FFMAX(b, d)) / 2;
}
}
#endif
#ifndef sign_extend
static inline av_const int sign_extend(int val, unsigned bits) {
unsigned shift = 8 * sizeof(int) - bits;
union {
unsigned u;
int s;
} v = { (unsigned)val << shift };
return v.s >> shift;
}
#endif
#ifndef zero_extend
static inline av_const unsigned zero_extend(unsigned val, unsigned bits) {
return (val << ((8 * sizeof(int)) - bits)) >> ((8 * sizeof(int)) - bits);
}
#endif
#ifndef COPY3_IF_LT
#define COPY3_IF_LT(x, y, a, b, c, d) \
if((y) < (x)) { \
(x) = (y); \
(a) = (b); \
(c) = (d); \
}
#endif
#ifndef MASK_ABS
#define MASK_ABS(mask, level) \
do { \
mask = level >> 31; \
level = (level ^ mask) - mask; \
} while(0)
#endif
#ifndef NEG_SSR32
#define NEG_SSR32(a, s) (((int32_t)(a)) >> (32 - (s)))
#endif
#ifndef NEG_USR32
#define NEG_USR32(a, s) (((uint32_t)(a)) >> (32 - (s)))
#endif
#if HAVE_BIGENDIAN
#ifndef PACK_2U8
#define PACK_2U8(a, b) (((a) << 8) | (b))
#endif
#ifndef PACK_4U8
#define PACK_4U8(a, b, c, d) (((a) << 24) | ((b) << 16) | ((c) << 8) | (d))
#endif
#ifndef PACK_2U16
#define PACK_2U16(a, b) (((a) << 16) | (b))
#endif
#else
#ifndef PACK_2U8
#define PACK_2U8(a, b) (((b) << 8) | (a))
#endif
#ifndef PACK_4U2
#define PACK_4U8(a, b, c, d) (((d) << 24) | ((c) << 16) | ((b) << 8) | (a))
#endif
#ifndef PACK_2U16
#define PACK_2U16(a, b) (((b) << 16) | (a))
#endif
#endif
#ifndef PACK_2S8
#define PACK_2S8(a, b) PACK_2U8((a)&255, (b)&255)
#endif
#ifndef PACK_4S8
#define PACK_4S8(a, b, c, d) PACK_4U8((a)&255, (b)&255, (c)&255, (d)&255)
#endif
#ifndef PACK_2S16
#define PACK_2S16(a, b) PACK_2U16((a)&0xffff, (b)&0xffff)
#endif
#ifndef FASTDIV
#define FASTDIV(a, b) ((uint32_t)((((uint64_t)a) * ff_inverse[b]) >> 32))
#endif /* FASTDIV */
#ifndef ff_sqrt
#define ff_sqrt ff_sqrt
static inline av_const unsigned int ff_sqrt(unsigned int a) {
unsigned int b;
if(a < 255) return (ff_sqrt_tab[a + 1] - 1) >> 4;
else if(a < (1 << 12))
b = ff_sqrt_tab[a >> 4] >> 2;
#if !CONFIG_SMALL
else if(a < (1 << 14))
b = ff_sqrt_tab[a >> 6] >> 1;
else if(a < (1 << 16))
b = ff_sqrt_tab[a >> 8];
#endif
else {
int s = av_log2_16bit(a >> 16) >> 1;
unsigned int c = a >> (s + 2);
b = ff_sqrt_tab[c >> (s + 8)];
b = FASTDIV(c, b) + (b << s);
}
return b - (a < b * b);
}
#endif
static inline av_const float ff_sqrf(float a) {
return a * a;
}
static inline int8_t ff_u8_to_s8(uint8_t a) {
union {
uint8_t u8;
int8_t s8;
} b;
b.u8 = a;
return b.s8;
}
static av_always_inline uint32_t bitswap_32(uint32_t x) {
return (uint32_t)ff_reverse[x & 0xFF] << 24 |
(uint32_t)ff_reverse[(x >> 8) & 0xFF] << 16 |
(uint32_t)ff_reverse[(x >> 16) & 0xFF] << 8 |
(uint32_t)ff_reverse[x >> 24];
}
#endif /* AVCODEC_MATHOPS_H */

View File

@ -1,406 +0,0 @@
/*
* copyright (c) 2004 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* bitstream writer API
*/
#ifndef AVCODEC_PUT_BITS_H
#define AVCODEC_PUT_BITS_H
#include "config.h"
#include <stddef.h>
#include <stdint.h>
#include <libavutil/avassert.h>
#include <libavutil/intreadwrite.h>
#if HAVE_FAST_64BIT
typedef uint64_t BitBuf;
#define AV_WBBUF AV_WB64
#define AV_WLBUF AV_WL64
#else
typedef uint32_t BitBuf;
#define AV_WBBUF AV_WB32
#define AV_WLBUF AV_WL32
#endif
static const int BUF_BITS = 8 * sizeof(BitBuf);
typedef struct PutBitContext {
BitBuf bit_buf;
int bit_left;
uint8_t *buf, *buf_ptr, *buf_end;
} PutBitContext;
/**
* Initialize the PutBitContext s.
*
* @param buffer the buffer where to put bits
* @param buffer_size the size in bytes of buffer
*/
static inline void init_put_bits(PutBitContext *s, uint8_t *buffer,
int buffer_size) {
if(buffer_size < 0) {
buffer_size = 0;
buffer = NULL;
}
s->buf = buffer;
s->buf_end = s->buf + buffer_size;
s->buf_ptr = s->buf;
s->bit_left = BUF_BITS;
s->bit_buf = 0;
}
/**
* @return the total number of bits written to the bitstream.
*/
static inline int put_bits_count(PutBitContext *s) {
return (s->buf_ptr - s->buf) * 8 + BUF_BITS - s->bit_left;
}
/**
* @return the number of bytes output so far; may only be called
* when the PutBitContext is freshly initialized or flushed.
*/
static inline int put_bytes_output(const PutBitContext *s) {
av_assert2(s->bit_left == BUF_BITS);
return s->buf_ptr - s->buf;
}
/**
* @param round_up When set, the number of bits written so far will be
* rounded up to the next byte.
* @return the number of bytes output so far.
*/
static inline int put_bytes_count(const PutBitContext *s, int round_up) {
return s->buf_ptr - s->buf + ((BUF_BITS - s->bit_left + (round_up ? 7 : 0)) >> 3);
}
/**
* Rebase the bit writer onto a reallocated buffer.
*
* @param buffer the buffer where to put bits
* @param buffer_size the size in bytes of buffer,
* must be large enough to hold everything written so far
*/
static inline void rebase_put_bits(PutBitContext *s, uint8_t *buffer,
int buffer_size) {
av_assert0(8 * buffer_size >= put_bits_count(s));
s->buf_end = buffer + buffer_size;
s->buf_ptr = buffer + (s->buf_ptr - s->buf);
s->buf = buffer;
}
/**
* @return the number of bits available in the bitstream.
*/
static inline int put_bits_left(PutBitContext *s) {
return (s->buf_end - s->buf_ptr) * 8 - BUF_BITS + s->bit_left;
}
/**
* @param round_up When set, the number of bits written will be
* rounded up to the next byte.
* @return the number of bytes left.
*/
static inline int put_bytes_left(const PutBitContext *s, int round_up) {
return s->buf_end - s->buf_ptr - ((BUF_BITS - s->bit_left + (round_up ? 7 : 0)) >> 3);
}
/**
* Pad the end of the output stream with zeros.
*/
static inline void flush_put_bits(PutBitContext *s) {
#ifndef BITSTREAM_WRITER_LE
if(s->bit_left < BUF_BITS)
s->bit_buf <<= s->bit_left;
#endif
while(s->bit_left < BUF_BITS) {
av_assert0(s->buf_ptr < s->buf_end);
#ifdef BITSTREAM_WRITER_LE
*s->buf_ptr++ = s->bit_buf;
s->bit_buf >>= 8;
#else
*s->buf_ptr++ = s->bit_buf >> (BUF_BITS - 8);
s->bit_buf <<= 8;
#endif
s->bit_left += 8;
}
s->bit_left = BUF_BITS;
s->bit_buf = 0;
}
static inline void flush_put_bits_le(PutBitContext *s) {
while(s->bit_left < BUF_BITS) {
av_assert0(s->buf_ptr < s->buf_end);
*s->buf_ptr++ = s->bit_buf;
s->bit_buf >>= 8;
s->bit_left += 8;
}
s->bit_left = BUF_BITS;
s->bit_buf = 0;
}
#ifdef BITSTREAM_WRITER_LE
#define ff_put_string ff_put_string_unsupported_here
#define ff_copy_bits ff_copy_bits_unsupported_here
#else
/**
* Put the string string in the bitstream.
*
* @param terminate_string 0-terminates the written string if value is 1
*/
void ff_put_string(PutBitContext *pb, const char *string,
int terminate_string);
/**
* Copy the content of src to the bitstream.
*
* @param length the number of bits of src to copy
*/
void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length);
#endif
static inline void put_bits_no_assert(PutBitContext *s, int n, BitBuf value) {
BitBuf bit_buf;
int bit_left;
bit_buf = s->bit_buf;
bit_left = s->bit_left;
/* XXX: optimize */
#ifdef BITSTREAM_WRITER_LE
bit_buf |= value << (BUF_BITS - bit_left);
if(n >= bit_left) {
if(s->buf_end - s->buf_ptr >= sizeof(BitBuf)) {
AV_WLBUF(s->buf_ptr, bit_buf);
s->buf_ptr += sizeof(BitBuf);
}
else {
av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
av_assert2(0);
}
bit_buf = value >> bit_left;
bit_left += BUF_BITS;
}
bit_left -= n;
#else
if(n < bit_left) {
bit_buf = (bit_buf << n) | value;
bit_left -= n;
}
else {
bit_buf <<= bit_left;
bit_buf |= value >> (n - bit_left);
if(s->buf_end - s->buf_ptr >= sizeof(BitBuf)) {
AV_WBBUF(s->buf_ptr, bit_buf);
s->buf_ptr += sizeof(BitBuf);
}
else {
av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
av_assert2(0);
}
bit_left += BUF_BITS - n;
bit_buf = value;
}
#endif
s->bit_buf = bit_buf;
s->bit_left = bit_left;
}
/**
* Write up to 31 bits into a bitstream.
* Use put_bits32 to write 32 bits.
*/
static inline void put_bits(PutBitContext *s, int n, BitBuf value) {
av_assert2(n <= 31 && value < (1UL << n));
put_bits_no_assert(s, n, value);
}
static inline void put_bits_le(PutBitContext *s, int n, BitBuf value) {
BitBuf bit_buf;
int bit_left;
av_assert2(n <= 31 && value < (1UL << n));
bit_buf = s->bit_buf;
bit_left = s->bit_left;
bit_buf |= value << (BUF_BITS - bit_left);
if(n >= bit_left) {
if(s->buf_end - s->buf_ptr >= sizeof(BitBuf)) {
AV_WLBUF(s->buf_ptr, bit_buf);
s->buf_ptr += sizeof(BitBuf);
}
else {
av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
av_assert2(0);
}
bit_buf = value >> bit_left;
bit_left += BUF_BITS;
}
bit_left -= n;
s->bit_buf = bit_buf;
s->bit_left = bit_left;
}
static inline void put_sbits(PutBitContext *pb, int n, int32_t value) {
av_assert2(n >= 0 && n <= 31);
put_bits(pb, n, av_mod_uintp2(value, n));
}
/**
* Write exactly 32 bits into a bitstream.
*/
static void av_unused put_bits32(PutBitContext *s, uint32_t value) {
BitBuf bit_buf;
int bit_left;
if(BUF_BITS > 32) {
put_bits_no_assert(s, 32, value);
return;
}
bit_buf = s->bit_buf;
bit_left = s->bit_left;
#ifdef BITSTREAM_WRITER_LE
bit_buf |= (BitBuf)value << (BUF_BITS - bit_left);
if(s->buf_end - s->buf_ptr >= sizeof(BitBuf)) {
AV_WLBUF(s->buf_ptr, bit_buf);
s->buf_ptr += sizeof(BitBuf);
}
else {
av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
av_assert2(0);
}
bit_buf = (uint64_t)value >> bit_left;
#else
bit_buf = (uint64_t)bit_buf << bit_left;
bit_buf |= (BitBuf)value >> (BUF_BITS - bit_left);
if(s->buf_end - s->buf_ptr >= sizeof(BitBuf)) {
AV_WBBUF(s->buf_ptr, bit_buf);
s->buf_ptr += sizeof(BitBuf);
}
else {
av_log(NULL, AV_LOG_ERROR, "Internal error, put_bits buffer too small\n");
av_assert2(0);
}
bit_buf = value;
#endif
s->bit_buf = bit_buf;
s->bit_left = bit_left;
}
/**
* Write up to 64 bits into a bitstream.
*/
static inline void put_bits64(PutBitContext *s, int n, uint64_t value) {
av_assert2((n == 64) || (n < 64 && value < (UINT64_C(1) << n)));
if(n < 32)
put_bits(s, n, value);
else if(n == 32)
put_bits32(s, value);
else if(n < 64) {
uint32_t lo = value & 0xffffffff;
uint32_t hi = value >> 32;
#ifdef BITSTREAM_WRITER_LE
put_bits32(s, lo);
put_bits(s, n - 32, hi);
#else
put_bits(s, n - 32, hi);
put_bits32(s, lo);
#endif
}
else {
uint32_t lo = value & 0xffffffff;
uint32_t hi = value >> 32;
#ifdef BITSTREAM_WRITER_LE
put_bits32(s, lo);
put_bits32(s, hi);
#else
put_bits32(s, hi);
put_bits32(s, lo);
#endif
}
}
/**
* Return the pointer to the byte where the bitstream writer will put
* the next bit.
*/
static inline uint8_t *put_bits_ptr(PutBitContext *s) {
return s->buf_ptr;
}
/**
* Skip the given number of bytes.
* PutBitContext must be flushed & aligned to a byte boundary before calling this.
*/
static inline void skip_put_bytes(PutBitContext *s, int n) {
av_assert2((put_bits_count(s) & 7) == 0);
av_assert2(s->bit_left == BUF_BITS);
av_assert0(n <= s->buf_end - s->buf_ptr);
s->buf_ptr += n;
}
/**
* Skip the given number of bits.
* Must only be used if the actual values in the bitstream do not matter.
* If n is < 0 the behavior is undefined.
*/
static inline void skip_put_bits(PutBitContext *s, int n) {
unsigned bits = BUF_BITS - s->bit_left + n;
s->buf_ptr += sizeof(BitBuf) * (bits / BUF_BITS);
s->bit_left = BUF_BITS - (bits & (BUF_BITS - 1));
}
/**
* Change the end of the buffer.
*
* @param size the new size in bytes of the buffer where to put bits
*/
static inline void set_put_bits_buffer_size(PutBitContext *s, int size) {
av_assert0(size <= INT_MAX / 8 - BUF_BITS);
s->buf_end = s->buf + size;
}
/**
* Pad the bitstream with zeros up to the next byte boundary.
*/
static inline void align_put_bits(PutBitContext *s) {
put_bits(s, s->bit_left & 7, 0);
}
#undef AV_WBBUF
#undef AV_WLBUF
#endif /* AVCODEC_PUT_BITS_H */

View File

@ -1,349 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <libavcodec/avcodec.h>
#include "include/cbs/video_levels.h"
// H.264 table A-1.
static const H264LevelDescriptor h264_levels[] = {
// Name MaxMBPS MaxBR MinCR
// | level_idc | MaxFS | MaxCPB | MaxMvsPer2Mb
// | | cs3f | | MaxDpbMbs | | MaxVmvR | |
{ "1", 10, 0, 1485, 99, 396, 64, 175, 64, 2, 0 },
{ "1b", 11, 1, 1485, 99, 396, 128, 350, 64, 2, 0 },
{ "1b", 9, 0, 1485, 99, 396, 128, 350, 64, 2, 0 },
{ "1.1", 11, 0, 3000, 396, 900, 192, 500, 128, 2, 0 },
{ "1.2", 12, 0, 6000, 396, 2376, 384, 1000, 128, 2, 0 },
{ "1.3", 13, 0, 11880, 396, 2376, 768, 2000, 128, 2, 0 },
{ "2", 20, 0, 11880, 396, 2376, 2000, 2000, 128, 2, 0 },
{ "2.1", 21, 0, 19800, 792, 4752, 4000, 4000, 256, 2, 0 },
{ "2.2", 22, 0, 20250, 1620, 8100, 4000, 4000, 256, 2, 0 },
{ "3", 30, 0, 40500, 1620, 8100, 10000, 10000, 256, 2, 32 },
{ "3.1", 31, 0, 108000, 3600, 18000, 14000, 14000, 512, 4, 16 },
{ "3.2", 32, 0, 216000, 5120, 20480, 20000, 20000, 512, 4, 16 },
{ "4", 40, 0, 245760, 8192, 32768, 20000, 25000, 512, 4, 16 },
{ "4.1", 41, 0, 245760, 8192, 32768, 50000, 62500, 512, 2, 16 },
{ "4.2", 42, 0, 522240, 8704, 34816, 50000, 62500, 512, 2, 16 },
{ "5", 50, 0, 589824, 22080, 110400, 135000, 135000, 512, 2, 16 },
{ "5.1", 51, 0, 983040, 36864, 184320, 240000, 240000, 512, 2, 16 },
{ "5.2", 52, 0, 2073600, 36864, 184320, 240000, 240000, 512, 2, 16 },
{ "6", 60, 0, 4177920, 139264, 696320, 240000, 240000, 8192, 2, 16 },
{ "6.1", 61, 0, 8355840, 139264, 696320, 480000, 480000, 8192, 2, 16 },
{ "6.2", 62, 0, 16711680, 139264, 696320, 800000, 800000, 8192, 2, 16 },
};
// H.264 table A-2 plus values from A-1.
static const struct {
int profile_idc;
int cpb_br_vcl_factor;
int cpb_br_nal_factor;
} h264_br_factors[] = {
{ 66, 1000, 1200 },
{ 77, 1000, 1200 },
{ 88, 1000, 1200 },
{ 100, 1250, 1500 },
{ 110, 3000, 3600 },
{ 122, 4000, 4800 },
{ 244, 4000, 4800 },
{ 44, 4000, 4800 },
};
// We are only ever interested in the NAL bitrate factor.
static int h264_get_br_factor(int profile_idc) {
int i;
for(i = 0; i < FF_ARRAY_ELEMS(h264_br_factors); i++) {
if(h264_br_factors[i].profile_idc == profile_idc)
return h264_br_factors[i].cpb_br_nal_factor;
}
// Default to the non-high profile value if not specified.
return 1200;
}
const H264LevelDescriptor *ff_h264_guess_level(int profile_idc,
int64_t bitrate,
int framerate,
int width, int height,
int max_dec_frame_buffering) {
int width_mbs = (width + 15) / 16;
int height_mbs = (height + 15) / 16;
int no_cs3f = !(profile_idc == 66 ||
profile_idc == 77 ||
profile_idc == 88);
int i;
for(i = 0; i < FF_ARRAY_ELEMS(h264_levels); i++) {
const H264LevelDescriptor *level = &h264_levels[i];
if(level->constraint_set3_flag && no_cs3f)
continue;
if(bitrate > (int64_t)level->max_br * h264_get_br_factor(profile_idc))
continue;
if(width_mbs * height_mbs > level->max_fs)
continue;
if(width_mbs * width_mbs > 8 * level->max_fs)
continue;
if(height_mbs * height_mbs > 8 * level->max_fs)
continue;
if(width_mbs && height_mbs) {
int max_dpb_frames =
FFMIN(level->max_dpb_mbs / (width_mbs * height_mbs), 16);
if(max_dec_frame_buffering > max_dpb_frames)
continue;
if(framerate > (level->max_mbps / (width_mbs * height_mbs)))
continue;
}
return level;
}
// No usable levels found - frame is too big or bitrate is too high.
return NULL;
}
static const H265LevelDescriptor h265_levels[] = {
// Name CpbFactor-Main MaxSliceSegmentsPerPicture
// | level_idc | CpbFactor-High MaxLumaSr BrFactor-High
// | | MaxLumaPs | | | MaxTileRows | BrFactor-Main | MinCr-Main
// | | | | | | | MaxTileCols | | | MinCr-High
{ "1", 30, 36864, 350, 0, 16, 1, 1, 552960, 128, 0, 2, 2 },
{ "2", 60, 122880, 1500, 0, 16, 1, 1, 3686400, 1500, 0, 2, 2 },
{ "2.1", 63, 245760, 3000, 0, 20, 1, 1, 7372800, 3000, 0, 2, 2 },
{ "3", 90, 552960, 6000, 0, 30, 2, 2, 16588800, 6000, 0, 2, 2 },
{ "3.1", 93, 983040, 10000, 0, 40, 3, 3, 33177600, 10000, 0, 2, 2 },
{ "4", 120, 2228224, 12000, 30000, 75, 5, 5, 66846720, 12000, 30000, 4, 4 },
{ "4.1", 123, 2228224, 20000, 50000, 75, 5, 5, 133693440, 20000, 50000, 4, 4 },
{ "5", 150, 8912896, 25000, 100000, 200, 11, 10, 267386880, 25000, 100000, 6, 4 },
{ "5.1", 153, 8912896, 40000, 160000, 200, 11, 10, 534773760, 40000, 160000, 8, 4 },
{ "5.2", 156, 8912896, 60000, 240000, 200, 11, 10, 1069547520, 60000, 240000, 8, 4 },
{ "6", 180, 35651584, 60000, 240000, 600, 22, 20, 1069547520, 60000, 240000, 8, 4 },
{ "6.1", 183, 35651584, 120000, 480000, 600, 22, 20, 2139095040, 120000, 480000, 8, 4 },
{ "6.2", 186, 35651584, 240000, 800000, 600, 22, 20, 4278190080, 240000, 800000, 6, 4 },
};
static const H265ProfileDescriptor h265_profiles[] = {
// profile_idc 8bit one-picture
// HT-profile | 422chroma | lower-bit-rate
// | 14bit | | 420chroma | | CpbVclFactor MinCrScaleFactor
// | | 12bit | | | monochrome| | CpbNalFactor | maxDpbPicBuf
// | | | 10bit | | | intra | | | FormatCapabilityFactor
{ "Monochrome", // | | | | | | | | | | |
4, 0, 2, 1, 1, 1, 1, 1, 1, 0, 0, 1, 667, 733, 1.000, 1.0, 6 },
{ "Monochrome 10",
4, 0, 2, 1, 1, 0, 1, 1, 1, 0, 0, 1, 833, 917, 1.250, 1.0, 6 },
{ "Monochrome 12",
4, 0, 2, 1, 0, 0, 1, 1, 1, 0, 0, 1, 1000, 1100, 1.500, 1.0, 6 },
{ "Monochrome 16",
4, 0, 2, 0, 0, 0, 1, 1, 1, 0, 0, 1, 1333, 1467, 2.000, 1.0, 6 },
{ "Main",
1, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1000, 1100, 1.500, 1.0, 6 },
{ "Screen-Extended Main",
9, 0, 1, 1, 1, 1, 1, 1, 0, 0, 0, 1, 1000, 1100, 1.500, 1.0, 7 },
{ "Main 10",
2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 0, 2, 1000, 1100, 1.875, 1.0, 6 },
{ "Screen-Extended Main 10",
9, 0, 1, 1, 1, 0, 1, 1, 0, 0, 0, 1, 1000, 1100, 1.875, 1.0, 7 },
{ "Main 12",
4, 0, 2, 1, 0, 0, 1, 1, 0, 0, 0, 1, 1500, 1650, 2.250, 1.0, 6 },
{ "Main Still Picture",
3, 0, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 1000, 1100, 1.500, 1.0, 6 },
{ "Main 10 Still Picture",
2, 0, 2, 2, 2, 2, 2, 2, 2, 2, 1, 2, 1000, 1100, 1.875, 1.0, 6 },
{ "Main 4:2:2 10",
4, 0, 2, 1, 1, 0, 1, 0, 0, 0, 0, 1, 1667, 1833, 2.500, 0.5, 6 },
{ "Main 4:2:2 12",
4, 0, 2, 1, 0, 0, 1, 0, 0, 0, 0, 1, 2000, 2200, 3.000, 0.5, 6 },
{ "Main 4:4:4",
4, 0, 2, 1, 1, 1, 0, 0, 0, 0, 0, 1, 2000, 2200, 3.000, 0.5, 6 },
{ "High Throughput 4:4:4",
5, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 2000, 2200, 3.000, 0.5, 6 },
{ "Screen-Extended Main 4:4:4",
9, 0, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 2000, 2200, 3.000, 0.5, 7 },
{ "Screen-Extended High Throughput 4:4:4",
9, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 1, 2000, 2200, 3.000, 0.5, 7 },
{ "Main 4:4:4 10",
4, 0, 2, 1, 1, 0, 0, 0, 0, 0, 0, 1, 2500, 2750, 3.750, 0.5, 6 },
{ "High Throughput 4:4:4 10",
5, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 2500, 2750, 3.750, 0.5, 6 },
{ "Screen-Extended Main 4:4:4 10",
9, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 2500, 2750, 3.750, 0.5, 7 },
{ "Screen-Extended High Throughput 4:4:4 10",
9, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 1, 2500, 2750, 3.750, 0.5, 7 },
{ "Main 4:4:4 12",
4, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 1, 3000, 3300, 4.500, 0.5, 6 },
{ "High Throughput 4:4:4 14",
5, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3500, 3850, 5.250, 0.5, 6 },
{ "Screen-Extended High Throughput 4:4:4 14",
9, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 1, 3500, 3850, 5.250, 0.5, 7 },
{ "Main Intra",
4, 0, 2, 1, 1, 1, 1, 1, 0, 1, 0, 2, 1000, 1100, 1.500, 1.0, 6 },
{ "Main 10 Intra",
4, 0, 2, 1, 1, 0, 1, 1, 0, 1, 0, 2, 1000, 1100, 1.875, 1.0, 6 },
{ "Main 12 Intra",
4, 0, 2, 1, 0, 0, 1, 1, 0, 1, 0, 2, 1500, 1650, 2.250, 1.0, 6 },
{ "Main 4:2:2 10 Intra",
4, 0, 2, 1, 1, 0, 1, 0, 0, 1, 0, 2, 1667, 1833, 2.500, 0.5, 6 },
{ "Main 4:2:2 12 Intra",
4, 0, 2, 1, 0, 0, 1, 0, 0, 1, 0, 2, 2000, 2200, 3.000, 0.5, 6 },
{ "Main 4:4:4 Intra",
4, 0, 2, 1, 1, 1, 0, 0, 0, 1, 0, 2, 2000, 2200, 3.000, 0.5, 6 },
{ "Main 4:4:4 10 Intra",
4, 0, 2, 1, 1, 0, 0, 0, 0, 1, 0, 2, 2500, 2750, 3.750, 0.5, 6 },
{ "Main 4:4:4 12 Intra",
4, 0, 2, 1, 0, 0, 0, 0, 0, 1, 0, 2, 3000, 3300, 4.500, 0.5, 6 },
{ "Main 4:4:4 16 Intra",
4, 0, 2, 0, 0, 0, 0, 0, 0, 1, 0, 2, 4000, 4400, 6.000, 0.5, 6 },
{ "Main 4:4:4 Still Picture",
4, 0, 2, 1, 1, 1, 0, 0, 0, 1, 1, 2, 2000, 2200, 3.000, 0.5, 6 },
{ "Main 4:4:4 16 Still Picture",
4, 0, 2, 0, 0, 0, 0, 0, 0, 1, 1, 2, 4000, 4400, 6.000, 0.5, 6 },
{ "High Throughput 4:4:4 16 Intra",
5, 1, 0, 0, 0, 0, 0, 0, 0, 1, 0, 2, 4000, 4400, 6.000, 0.5, 6 },
};
const H265ProfileDescriptor *ff_h265_get_profile(const H265RawProfileTierLevel *ptl) {
int i;
if(ptl->general_profile_space)
return NULL;
for(i = 0; i < FF_ARRAY_ELEMS(h265_profiles); i++) {
const H265ProfileDescriptor *profile = &h265_profiles[i];
if(ptl->general_profile_idc &&
ptl->general_profile_idc != profile->profile_idc)
continue;
if(!ptl->general_profile_compatibility_flag[profile->profile_idc])
continue;
#define check_flag(name) \
if(profile->name < 2) { \
if(profile->name != ptl->general_##name##_constraint_flag) \
continue; \
}
check_flag(max_14bit);
check_flag(max_12bit);
check_flag(max_10bit);
check_flag(max_8bit);
check_flag(max_422chroma);
check_flag(max_420chroma);
check_flag(max_monochrome);
check_flag(intra);
check_flag(one_picture_only);
check_flag(lower_bit_rate);
#undef check_flag
return profile;
}
return NULL;
}
const H265LevelDescriptor *ff_h265_guess_level(const H265RawProfileTierLevel *ptl,
int64_t bitrate,
int width, int height,
int slice_segments,
int tile_rows, int tile_cols,
int max_dec_pic_buffering) {
const H265ProfileDescriptor *profile;
int pic_size, tier_flag, lbr_flag, hbr_factor;
int i;
if(ptl)
profile = ff_h265_get_profile(ptl);
else
profile = NULL;
if(!profile) {
// Default to using multiplication factors for Main profile.
profile = &h265_profiles[4];
}
pic_size = width * height;
if(ptl) {
tier_flag = ptl->general_tier_flag;
lbr_flag = ptl->general_lower_bit_rate_constraint_flag;
}
else {
tier_flag = 0;
lbr_flag = profile->lower_bit_rate > 0;
}
if(profile->profile_idc == 1 || profile->profile_idc == 2) {
hbr_factor = 1;
}
else if(profile->high_throughput) {
if(profile->intra)
hbr_factor = 24 - 12 * lbr_flag;
else
hbr_factor = 6;
}
else {
hbr_factor = 2 - lbr_flag;
}
for(i = 0; i < FF_ARRAY_ELEMS(h265_levels); i++) {
const H265LevelDescriptor *level = &h265_levels[i];
int max_br, max_dpb_size;
if(tier_flag && !level->max_br_high)
continue;
if(pic_size > level->max_luma_ps)
continue;
if(width * width > 8 * level->max_luma_ps)
continue;
if(height * height > 8 * level->max_luma_ps)
continue;
if(slice_segments > level->max_slice_segments_per_picture)
continue;
if(tile_rows > level->max_tile_rows)
continue;
if(tile_cols > level->max_tile_cols)
continue;
if(tier_flag)
max_br = level->max_br_high;
else
max_br = level->max_br_main;
if(!max_br)
continue;
if(bitrate > (int64_t)profile->cpb_nal_factor * hbr_factor * max_br)
continue;
if(pic_size <= (level->max_luma_ps >> 2))
max_dpb_size = FFMIN(4 * profile->max_dpb_pic_buf, 16);
else if(pic_size <= (level->max_luma_ps >> 1))
max_dpb_size = FFMIN(2 * profile->max_dpb_pic_buf, 16);
else if(pic_size <= (3 * level->max_luma_ps >> 2))
max_dpb_size = FFMIN(4 * profile->max_dpb_pic_buf / 3, 16);
else
max_dpb_size = profile->max_dpb_pic_buf;
if(max_dec_pic_buffering > max_dpb_size)
continue;
return level;
}
return NULL;
}

140
third-party/cbs/vlc.h vendored
View File

@ -1,140 +0,0 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVCODEC_VLC_H
#define AVCODEC_VLC_H
#include <stdint.h>
#define VLC_TYPE int16_t
typedef struct VLC {
int bits;
VLC_TYPE (*table)
[2]; ///< code, bits
int table_size, table_allocated;
} VLC;
typedef struct RL_VLC_ELEM {
int16_t level;
int8_t len;
uint8_t run;
} RL_VLC_ELEM;
#define init_vlc(vlc, nb_bits, nb_codes, \
bits, bits_wrap, bits_size, \
codes, codes_wrap, codes_size, \
flags) \
ff_init_vlc_sparse(vlc, nb_bits, nb_codes, \
bits, bits_wrap, bits_size, \
codes, codes_wrap, codes_size, \
NULL, 0, 0, flags)
int ff_init_vlc_sparse(VLC *vlc, int nb_bits, int nb_codes,
const void *bits, int bits_wrap, int bits_size,
const void *codes, int codes_wrap, int codes_size,
const void *symbols, int symbols_wrap, int symbols_size,
int flags);
/**
* Build VLC decoding tables suitable for use with get_vlc2()
*
* This function takes lengths and symbols and calculates the codes from them.
* For this the input lengths and symbols have to be sorted according to "left
* nodes in the corresponding tree first".
*
* @param[in,out] vlc The VLC to be initialized; table and table_allocated
* must have been set when initializing a static VLC,
* otherwise this will be treated as uninitialized.
* @param[in] nb_bits The number of bits to use for the VLC table;
* higher values take up more memory and cache, but
* allow to read codes with fewer reads.
* @param[in] nb_codes The number of provided length and (if supplied) symbol
* entries.
* @param[in] lens The lengths of the codes. Entries > 0 correspond to
* valid codes; entries == 0 will be skipped and entries
* with len < 0 indicate that the tree is incomplete and
* has an open end of length -len at this position.
* @param[in] lens_wrap Stride (in bytes) of the lengths.
* @param[in] symbols The symbols, i.e. what is returned from get_vlc2()
* when the corresponding code is encountered.
* May be NULL, then 0, 1, 2, 3, 4,... will be used.
* @param[in] symbols_wrap Stride (in bytes) of the symbols.
* @param[in] symbols_size Size of the symbols. 1 and 2 are supported.
* @param[in] offset An offset to apply to all the valid symbols.
* @param[in] flags A combination of the INIT_VLC_* flags; notice that
* INIT_VLC_INPUT_LE is pointless and ignored.
*/
int ff_init_vlc_from_lengths(VLC *vlc, int nb_bits, int nb_codes,
const int8_t *lens, int lens_wrap,
const void *symbols, int symbols_wrap, int symbols_size,
int offset, int flags, void *logctx);
void ff_free_vlc(VLC *vlc);
/* If INIT_VLC_INPUT_LE is set, the LSB bit of the codes used to
* initialize the VLC table is the first bit to be read. */
#define INIT_VLC_INPUT_LE 2
/* If set the VLC is intended for a little endian bitstream reader. */
#define INIT_VLC_OUTPUT_LE 8
#define INIT_VLC_LE (INIT_VLC_INPUT_LE | INIT_VLC_OUTPUT_LE)
#define INIT_VLC_USE_NEW_STATIC 4
#define INIT_VLC_STATIC_OVERLONG (1 | INIT_VLC_USE_NEW_STATIC)
#define INIT_CUSTOM_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, \
h, i, j, flags, static_size) \
do { \
static VLC_TYPE table[static_size][2]; \
(vlc)->table = table; \
(vlc)->table_allocated = static_size; \
ff_init_vlc_sparse(vlc, bits, a, b, c, d, e, f, g, h, i, j, \
flags | INIT_VLC_USE_NEW_STATIC); \
} while(0)
#define INIT_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, h, i, j, static_size) \
INIT_CUSTOM_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, \
h, i, j, 0, static_size)
#define INIT_LE_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, h, i, j, static_size) \
INIT_CUSTOM_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, \
h, i, j, INIT_VLC_LE, static_size)
#define INIT_CUSTOM_VLC_STATIC(vlc, bits, a, b, c, d, e, f, g, flags, static_size) \
INIT_CUSTOM_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, \
NULL, 0, 0, flags, static_size)
#define INIT_VLC_STATIC(vlc, bits, a, b, c, d, e, f, g, static_size) \
INIT_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, NULL, 0, 0, static_size)
#define INIT_LE_VLC_STATIC(vlc, bits, a, b, c, d, e, f, g, static_size) \
INIT_LE_VLC_SPARSE_STATIC(vlc, bits, a, b, c, d, e, f, g, NULL, 0, 0, static_size)
#define INIT_VLC_STATIC_FROM_LENGTHS(vlc, bits, nb_codes, lens, len_wrap, \
symbols, symbols_wrap, symbols_size, \
offset, flags, static_size) \
do { \
static VLC_TYPE table[static_size][2]; \
(vlc)->table = table; \
(vlc)->table_allocated = static_size; \
ff_init_vlc_from_lengths(vlc, bits, nb_codes, lens, len_wrap, \
symbols, symbols_wrap, symbols_size, \
offset, flags | INIT_VLC_USE_NEW_STATIC, \
NULL); \
} while(0)
#endif /* AVCODEC_VLC_H */

@ -1 +1 @@
Subproject commit fe670da0818a248eaf1c08eaa556a8285c1d97f7
Subproject commit f0ef18f75d010fa4248616108032d3beeffd5859

@ -1 +1 @@
Subproject commit c56edb680615dd84ed93cc531476659150aa2477
Subproject commit 2b3cb11bcc4aef7546851216dc9610b31d3769f5

@ -1 +1 @@
Subproject commit 9103674a57ab04f5109d4e10812fbf9f22685468
Subproject commit 4524bc1854ad7711fdbf0ea0722f19f63a2cacfa

@ -1 +1 @@
Subproject commit 0b7d3bee1aa7a18af1023a6e35b71ba9a47e1907
Subproject commit 5aabe9f00712aa02f0c229397500884ef5592162

@ -1 +1 @@
Subproject commit e4d141c6a08047e28025acade471baa2b412a873
Subproject commit 09b1467bba6b718d1ace07e6a353c17660b29d02