\n", argv[0]);
+ return 1;
+ }
+
+ av_register_all();
+ avfilter_register_all();
+
+ if ((ret = open_input_file(argv[1])) < 0)
+ goto end;
+ if ((ret = open_output_file(argv[2])) < 0)
+ goto end;
+ if ((ret = init_filters()) < 0)
+ goto end;
+
+ /* read all packets */
+ while (1) {
+ if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
+ break;
+ stream_index = packet.stream_index;
+ type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
+ av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
+ stream_index);
+
+ if (filter_ctx[stream_index].filter_graph) {
+ av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
+ frame = av_frame_alloc();
+ if (!frame) {
+ ret = AVERROR(ENOMEM);
+ break;
+ }
+ av_packet_rescale_ts(&packet,
+ ifmt_ctx->streams[stream_index]->time_base,
+ ifmt_ctx->streams[stream_index]->codec->time_base);
+ dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
+ avcodec_decode_audio4;
+ ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
+ &got_frame, &packet);
+ if (ret < 0) {
+ av_frame_free(&frame);
+ av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
+ break;
+ }
+
+ if (got_frame) {
+ frame->pts = av_frame_get_best_effort_timestamp(frame);
+ ret = filter_encode_write_frame(frame, stream_index);
+ av_frame_free(&frame);
+ if (ret < 0)
+ goto end;
+ } else {
+ av_frame_free(&frame);
+ }
+ } else {
+ /* remux this frame without reencoding */
+ av_packet_rescale_ts(&packet,
+ ifmt_ctx->streams[stream_index]->time_base,
+ ofmt_ctx->streams[stream_index]->time_base);
+
+ ret = av_interleaved_write_frame(ofmt_ctx, &packet);
+ if (ret < 0)
+ goto end;
+ }
+ av_free_packet(&packet);
+ }
+
+ /* flush filters and encoders */
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ /* flush filter */
+ if (!filter_ctx[i].filter_graph)
+ continue;
+ ret = filter_encode_write_frame(NULL, i);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
+ goto end;
+ }
+
+ /* flush encoder */
+ ret = flush_encoder(i);
+ if (ret < 0) {
+ av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
+ goto end;
+ }
+ }
+
+ av_write_trailer(ofmt_ctx);
+end:
+ av_free_packet(&packet);
+ av_frame_free(&frame);
+ for (i = 0; i < ifmt_ctx->nb_streams; i++) {
+ avcodec_close(ifmt_ctx->streams[i]->codec);
+ if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
+ avcodec_close(ofmt_ctx->streams[i]->codec);
+ if (filter_ctx && filter_ctx[i].filter_graph)
+ avfilter_graph_free(&filter_ctx[i].filter_graph);
+ }
+ av_free(filter_ctx);
+ avformat_close_input(&ifmt_ctx);
+ if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
+ avio_closep(&ofmt_ctx->pb);
+ avformat_free_context(ofmt_ctx);
+
+ if (ret < 0)
+ av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
+
+ return ret ? 1 : 0;
+}
diff --git a/Externals/ffmpeg/dev/doc/faq.html b/Externals/ffmpeg/dev/doc/faq.html
new file mode 100644
index 0000000000..be6ea62995
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/faq.html
@@ -0,0 +1,719 @@
+
+
+
+
+
+
+ FFmpeg FAQ
+
+
+
+
+
+
+
+
+ FFmpeg FAQ
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 General Questions# TOC
+
+
+
1.1 Why doesn’t FFmpeg support feature [xyz]?# TOC
+
+
Because no one has taken on that task yet. FFmpeg development is
+driven by the tasks that are important to the individual developers.
+If there is a feature that is important to you, the best way to get
+it implemented is to undertake the task yourself or sponsor a developer.
+
+
+
1.2 FFmpeg does not support codec XXX. Can you include a Windows DLL loader to support it?# TOC
+
+
No. Windows DLLs are not portable, bloated and often slow.
+Moreover FFmpeg strives to support all codecs natively.
+A DLL loader is not conducive to that goal.
+
+
+
1.3 I cannot read this file although this format seems to be supported by ffmpeg.# TOC
+
+
Even if ffmpeg can read the container format, it may not support all its
+codecs. Please consult the supported codec list in the ffmpeg
+documentation.
+
+
+
1.4 Which codecs are supported by Windows?# TOC
+
+
Windows does not support standard formats like MPEG very well, unless you
+install some additional codecs.
+
+
The following list of video codecs should work on most Windows systems:
+
+msmpeg4v2
+.avi/.asf
+
+msmpeg4
+.asf only
+
+wmv1
+.asf only
+
+wmv2
+.asf only
+
+mpeg4
+Only if you have some MPEG-4 codec like ffdshow or Xvid installed.
+
+mpeg1video
+.mpg only
+
+
+
Note, ASF files often have .wmv or .wma extensions in Windows. It should also
+be mentioned that Microsoft claims a patent on the ASF format, and may sue
+or threaten users who create ASF files with non-Microsoft software. It is
+strongly advised to avoid ASF where possible.
+
+
The following list of audio codecs should work on most Windows systems:
+
+adpcm_ima_wav
+adpcm_ms
+pcm_s16le
+always
+
+libmp3lame
+If some MP3 codec like LAME is installed.
+
+
+
+
+
+
2 Compilation# TOC
+
+
+
2.1 error: can't find a register in class 'GENERAL_REGS' while reloading 'asm'
# TOC
+
+
This is a bug in gcc. Do not report it to us. Instead, please report it to
+the gcc developers. Note that we will not add workarounds for gcc bugs.
+
+
Also note that (some of) the gcc developers believe this is not a bug or
+not a bug they should fix:
+http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11203 .
+Then again, some of them do not know the difference between an undecidable
+problem and an NP-hard problem...
+
+
+
2.2 I have installed this library with my distro’s package manager. Why does configure
not see it?# TOC
+
+
Distributions usually split libraries in several packages. The main package
+contains the files necessary to run programs using the library. The
+development package contains the files necessary to build programs using the
+library. Sometimes, docs and/or data are in a separate package too.
+
+
To build FFmpeg, you need to install the development package. It is usually
+called libfoo-dev or libfoo-devel . You can remove it after the
+build is finished, but be sure to keep the main package.
+
+
+
2.3 How do I make pkg-config
find my libraries?# TOC
+
+
Somewhere along with your libraries, there is a .pc file (or several)
+in a pkgconfig directory. You need to set environment variables to
+point pkg-config
to these files.
+
+
If you need to add directories to pkg-config
’s search list
+(typical use case: library installed separately), add it to
+$PKG_CONFIG_PATH
:
+
+
+
export PKG_CONFIG_PATH=/opt/x264/lib/pkgconfig:/opt/opus/lib/pkgconfig
+
+
+
If you need to replace pkg-config
’s search list
+(typical use case: cross-compiling), set it in
+$PKG_CONFIG_LIBDIR
:
+
+
+
export PKG_CONFIG_LIBDIR=/home/me/cross/usr/lib/pkgconfig:/home/me/cross/usr/local/lib/pkgconfig
+
+
+
If you need to know the library’s internal dependencies (typical use: static
+linking), add the --static
option to pkg-config
:
+
+
+
./configure --pkg-config-flags=--static
+
+
+
+
2.4 How do I use pkg-config
when cross-compiling?# TOC
+
+
The best way is to install pkg-config
in your cross-compilation
+environment. It will automatically use the cross-compilation libraries.
+
+
You can also use pkg-config
from the host environment by
+specifying explicitly --pkg-config=pkg-config
to configure
.
+In that case, you must point pkg-config
to the correct directories
+using the PKG_CONFIG_LIBDIR
, as explained in the previous entry.
+
+
As an intermediate solution, you can place in your cross-compilation
+environment a script that calls the host pkg-config
with
+PKG_CONFIG_LIBDIR
set. That script can look like that:
+
+
+
#!/bin/sh
+PKG_CONFIG_LIBDIR=/path/to/cross/lib/pkgconfig
+export PKG_CONFIG_LIBDIR
+exec /usr/bin/pkg-config "$@"
+
+
+
+
+
+
+
3.1 ffmpeg does not work; what is wrong?# TOC
+
+
Try a make distclean
in the ffmpeg source directory before the build.
+If this does not help see
+(http://ffmpeg.org/bugreports.html ).
+
+
+
3.2 How do I encode single pictures into movies?# TOC
+
+
First, rename your pictures to follow a numerical sequence.
+For example, img1.jpg, img2.jpg, img3.jpg,...
+Then you may run:
+
+
+
ffmpeg -f image2 -i img%d.jpg /tmp/a.mpg
+
+
+
Notice that ‘%d ’ is replaced by the image number.
+
+
img%03d.jpg means the sequence img001.jpg , img002.jpg , etc.
+
+
Use the -start_number option to declare a starting number for
+the sequence. This is useful if your sequence does not start with
+img001.jpg but is still in a numerical order. The following
+example will start with img100.jpg :
+
+
+
ffmpeg -f image2 -start_number 100 -i img%d.jpg /tmp/a.mpg
+
+
+
If you have large number of pictures to rename, you can use the
+following command to ease the burden. The command, using the bourne
+shell syntax, symbolically links all files in the current directory
+that match *jpg
to the /tmp directory in the sequence of
+img001.jpg , img002.jpg and so on.
+
+
+
x=1; for i in *jpg; do counter=$(printf %03d $x); ln -s "$i" /tmp/img"$counter".jpg; x=$(($x+1)); done
+
+
+
If you want to sequence them by oldest modified first, substitute
+$(ls -r -t *jpg)
in place of *jpg
.
+
+
Then run:
+
+
+
ffmpeg -f image2 -i /tmp/img%03d.jpg /tmp/a.mpg
+
+
+
The same logic is used for any image format that ffmpeg reads.
+
+
You can also use cat
to pipe images to ffmpeg:
+
+
+
cat *.jpg | ffmpeg -f image2pipe -c:v mjpeg -i - output.mpg
+
+
+
+
3.3 How do I encode movie to single pictures?# TOC
+
+
Use:
+
+
+
ffmpeg -i movie.mpg movie%d.jpg
+
+
+
The movie.mpg used as input will be converted to
+movie1.jpg , movie2.jpg , etc...
+
+
Instead of relying on file format self-recognition, you may also use
+
+-c:v ppm
+-c:v png
+-c:v mjpeg
+
+
to force the encoding.
+
+
Applying that to the previous example:
+
+
ffmpeg -i movie.mpg -f image2 -c:v mjpeg menu%d.jpg
+
+
+
Beware that there is no "jpeg" codec. Use "mjpeg" instead.
+
+
+
3.4 Why do I see a slight quality degradation with multithreaded MPEG* encoding?# TOC
+
+
For multithreaded MPEG* encoding, the encoded slices must be independent,
+otherwise thread n would practically have to wait for n-1 to finish, so it’s
+quite logical that there is a small reduction of quality. This is not a bug.
+
+
+
3.5 How can I read from the standard input or write to the standard output?# TOC
+
+
Use - as file name.
+
+
+
3.6 -f jpeg doesn’t work.# TOC
+
+
Try ’-f image2 test%d.jpg’.
+
+
+
3.7 Why can I not change the frame rate?# TOC
+
+
Some codecs, like MPEG-1/2, only allow a small number of fixed frame rates.
+Choose a different codec with the -c:v command line option.
+
+
+
3.8 How do I encode Xvid or DivX video with ffmpeg?# TOC
+
+
Both Xvid and DivX (version 4+) are implementations of the ISO MPEG-4
+standard (note that there are many other coding formats that use this
+same standard). Thus, use ’-c:v mpeg4’ to encode in these formats. The
+default fourcc stored in an MPEG-4-coded file will be ’FMP4’. If you want
+a different fourcc, use the ’-vtag’ option. E.g., ’-vtag xvid’ will
+force the fourcc ’xvid’ to be stored as the video fourcc rather than the
+default.
+
+
+
3.9 Which are good parameters for encoding high quality MPEG-4?# TOC
+
+
’-mbd rd -flags +mv4+aic -trellis 2 -cmp 2 -subcmp 2 -g 300 -pass 1/2’,
+things to try: ’-bf 2’, ’-flags qprd’, ’-flags mv0’, ’-flags skiprd’.
+
+
+
3.10 Which are good parameters for encoding high quality MPEG-1/MPEG-2?# TOC
+
+
’-mbd rd -trellis 2 -cmp 2 -subcmp 2 -g 100 -pass 1/2’
+but beware the ’-g 100’ might cause problems with some decoders.
+Things to try: ’-bf 2’, ’-flags qprd’, ’-flags mv0’, ’-flags skiprd.
+
+
+
3.11 Interlaced video looks very bad when encoded with ffmpeg, what is wrong?# TOC
+
+
You should use ’-flags +ilme+ildct’ and maybe ’-flags +alt’ for interlaced
+material, and try ’-top 0/1’ if the result looks really messed-up.
+
+
+
3.12 How can I read DirectShow files?# TOC
+
+
If you have built FFmpeg with ./configure --enable-avisynth
+(only possible on MinGW/Cygwin platforms),
+then you may use any file that DirectShow can read as input.
+
+
Just create an "input.avs" text file with this single line ...
+
+
DirectShowSource("C:\path to your file\yourfile.asf")
+
+
... and then feed that text file to ffmpeg:
+
+
+
For ANY other help on AviSynth, please visit the
+AviSynth homepage .
+
+
+
3.13 How can I join video files?# TOC
+
+
To "join" video files is quite ambiguous. The following list explains the
+different kinds of "joining" and points out how those are addressed in
+FFmpeg. To join video files may mean:
+
+
+ To put them one after the other: this is called to concatenate them
+(in short: concat) and is addressed
+in this very faq .
+
+ To put them together in the same file, to let the user choose between the
+different versions (example: different audio languages): this is called to
+multiplex them together (in short: mux), and is done by simply
+invoking ffmpeg with several -i options.
+
+ For audio, to put all channels together in a single stream (example: two
+mono streams into one stereo stream): this is sometimes called to
+merge them, and can be done using the
+amerge
filter.
+
+ For audio, to play one on top of the other: this is called to mix
+them, and can be done by first merging them into a single stream and then
+using the pan
filter to mix
+the channels at will.
+
+ For video, to display both together, side by side or one on top of a part of
+the other; it can be done using the
+overlay
video filter.
+
+
+
+
+
3.14 How can I concatenate video files?# TOC
+
+
There are several solutions, depending on the exact circumstances.
+
+
+
3.14.1 Concatenating using the concat filter # TOC
+
+
FFmpeg has a concat
filter designed specifically for that, with examples in the
+documentation. This operation is recommended if you need to re-encode.
+
+
+
3.14.2 Concatenating using the concat demuxer # TOC
+
+
FFmpeg has a concat
demuxer which you can use when you want to avoid a re-encode and
+your format doesn’t support file level concatenation.
+
+
+
3.14.3 Concatenating using the concat protocol (file level)# TOC
+
+
FFmpeg has a concat
protocol designed specifically for that, with examples in the
+documentation.
+
+
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
+video by merely concatenating the files containing them.
+
+
Hence you may concatenate your multimedia files by first transcoding them to
+these privileged formats, then using the humble cat
command (or the
+equally humble copy
under Windows), and finally transcoding back to your
+format of choice.
+
+
+
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
+ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
+cat intermediate1.mpg intermediate2.mpg > intermediate_all.mpg
+ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
+
+
+
Additionally, you can use the concat
protocol instead of cat
or
+copy
which will avoid creation of a potentially huge intermediate file.
+
+
+
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
+ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
+ffmpeg -i concat:"intermediate1.mpg|intermediate2.mpg" -c copy intermediate_all.mpg
+ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
+
+
+
Note that you may need to escape the character "|" which is special for many
+shells.
+
+
Another option is usage of named pipes, should your platform support it:
+
+
+
mkfifo intermediate1.mpg
+mkfifo intermediate2.mpg
+ffmpeg -i input1.avi -qscale:v 1 -y intermediate1.mpg < /dev/null &
+ffmpeg -i input2.avi -qscale:v 1 -y intermediate2.mpg < /dev/null &
+cat intermediate1.mpg intermediate2.mpg |\
+ffmpeg -f mpeg -i - -c:v mpeg4 -acodec libmp3lame output.avi
+
+
+
+
3.14.4 Concatenating using raw audio and video# TOC
+
+
Similarly, the yuv4mpegpipe format, and the raw video, raw audio codecs also
+allow concatenation, and the transcoding step is almost lossless.
+When using multiple yuv4mpegpipe(s), the first line needs to be discarded
+from all but the first stream. This can be accomplished by piping through
+tail
as seen below. Note that when piping through tail
you
+must use command grouping, { ;}
, to background properly.
+
+
For example, let’s say we want to concatenate two FLV files into an
+output.flv file:
+
+
+
mkfifo temp1.a
+mkfifo temp1.v
+mkfifo temp2.a
+mkfifo temp2.v
+mkfifo all.a
+mkfifo all.v
+ffmpeg -i input1.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp1.a < /dev/null &
+ffmpeg -i input2.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp2.a < /dev/null &
+ffmpeg -i input1.flv -an -f yuv4mpegpipe - > temp1.v < /dev/null &
+{ ffmpeg -i input2.flv -an -f yuv4mpegpipe - < /dev/null | tail -n +2 > temp2.v ; } &
+cat temp1.a temp2.a > all.a &
+cat temp1.v temp2.v > all.v &
+ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
+ -f yuv4mpegpipe -i all.v \
+ -y output.flv
+rm temp[12].[av] all.[av]
+
+
+
+
3.15 Using -f lavfi , audio becomes mono for no apparent reason.# TOC
+
+
Use -dumpgraph - to find out exactly where the channel layout is
+lost.
+
+
Most likely, it is through auto-inserted aresample
. Try to understand
+why the converting filter was needed at that place.
+
+
Just before the output is a likely place, as -f lavfi currently
+only support packed S16.
+
+
Then insert the correct aformat
explicitly in the filtergraph,
+specifying the exact format.
+
+
+
aformat=sample_fmts=s16:channel_layouts=stereo
+
+
+
+
3.16 Why does FFmpeg not see the subtitles in my VOB file?# TOC
+
+
VOB and a few other formats do not have a global header that describes
+everything present in the file. Instead, applications are supposed to scan
+the file to see what it contains. Since VOB files are frequently large, only
+the beginning is scanned. If the subtitles happen only later in the file,
+they will not be initially detected.
+
+
Some applications, including the ffmpeg
command-line tool, can only
+work with streams that were detected during the initial scan; streams that
+are detected later are ignored.
+
+
The size of the initial scan is controlled by two options: probesize
+(default ~5 Mo) and analyzeduration
(default 5,000,000 µs = 5 s). For
+the subtitle stream to be detected, both values must be large enough.
+
+
+
3.17 Why was the ffmpeg
-sameq option removed? What to use instead?# TOC
+
+
The -sameq option meant "same quantizer", and made sense only in a
+very limited set of cases. Unfortunately, a lot of people mistook it for
+"same quality" and used it in places where it did not make sense: it had
+roughly the expected visible effect, but achieved it in a very inefficient
+way.
+
+
Each encoder has its own set of options to set the quality-vs-size balance,
+use the options for the encoder you are using to set the quality level to a
+point acceptable for your tastes. The most common options to do that are
+-qscale and -qmax , but you should peruse the documentation
+of the encoder you chose.
+
+
+
4 Development# TOC
+
+
+
4.1 Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?# TOC
+
+
Yes. Check the doc/examples directory in the source
+repository, also available online at:
+https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples .
+
+
Examples are also installed by default, usually in
+$PREFIX/share/ffmpeg/examples
.
+
+
Also you may read the Developers Guide of the FFmpeg documentation. Alternatively,
+examine the source code for one of the many open source projects that
+already incorporate FFmpeg at (projects.html ).
+
+
+
4.2 Can you support my C compiler XXX?# TOC
+
+
It depends. If your compiler is C99-compliant, then patches to support
+it are likely to be welcome if they do not pollute the source code
+with #ifdef
s related to the compiler.
+
+
+
4.3 Is Microsoft Visual C++ supported?# TOC
+
+
Yes. Please see the Microsoft Visual C++
+section in the FFmpeg documentation.
+
+
+
4.4 Can you add automake, libtool or autoconf support?# TOC
+
+
No. These tools are too bloated and they complicate the build.
+
+
+
4.5 Why not rewrite FFmpeg in object-oriented C++?# TOC
+
+
FFmpeg is already organized in a highly modular manner and does not need to
+be rewritten in a formal object language. Further, many of the developers
+favor straight C; it works for them. For more arguments on this matter,
+read "Programming Religion" .
+
+
+
4.6 Why are the ffmpeg programs devoid of debugging symbols?# TOC
+
+
The build process creates ffmpeg_g
, ffplay_g
, etc. which
+contain full debug information. Those binaries are stripped to create
+ffmpeg
, ffplay
, etc. If you need the debug information, use
+the *_g versions.
+
+
+
4.7 I do not like the LGPL, can I contribute code under the GPL instead?# TOC
+
+
Yes, as long as the code is optional and can easily and cleanly be placed
+under #if CONFIG_GPL without breaking anything. So, for example, a new codec
+or filter would be OK under GPL while a bug fix to LGPL code would not.
+
+
+
4.8 I’m using FFmpeg from within my C application but the linker complains about missing symbols from the libraries themselves.# TOC
+
+
FFmpeg builds static libraries by default. In static libraries, dependencies
+are not handled. That has two consequences. First, you must specify the
+libraries in dependency order: -lavdevice
must come before
+-lavformat
, -lavutil
must come after everything else, etc.
+Second, external libraries that are used in FFmpeg have to be specified too.
+
+
An easy way to get the full list of required libraries in dependency order
+is to use pkg-config
.
+
+
+
c99 -o program program.c $(pkg-config --cflags --libs libavformat libavcodec)
+
+
+
See doc/example/Makefile and doc/example/pc-uninstalled for
+more details.
+
+
+
4.9 I’m using FFmpeg from within my C++ application but the linker complains about missing symbols which seem to be available.# TOC
+
+
FFmpeg is a pure C project, so to use the libraries within your C++ application
+you need to explicitly state that you are using a C library. You can do this by
+encompassing your FFmpeg includes using extern "C"
.
+
+
See http://www.parashift.com/c++-faq-lite/mixing-c-and-cpp.html#faq-32.3
+
+
+
4.10 I’m using libavutil from within my C++ application but the compiler complains about ’UINT64_C’ was not declared in this scope# TOC
+
+
FFmpeg is a pure C project using C99 math features, in order to enable C++
+to use them you have to append -D__STDC_CONSTANT_MACROS to your CXXFLAGS
+
+
+
4.11 I have a file in memory / a API different from *open/*read/ libc how do I use it with libavformat?# TOC
+
+
You have to create a custom AVIOContext using avio_alloc_context
,
+see libavformat/aviobuf.c in FFmpeg and libmpdemux/demux_lavf.c in MPlayer or MPlayer2 sources.
+
+
+
4.12 Where is the documentation about ffv1, msmpeg4, asv1, 4xm?# TOC
+
+
see http://www.ffmpeg.org/~michael/
+
+
+
4.13 How do I feed H.263-RTP (and other codecs in RTP) to libavcodec?# TOC
+
+
Even if peculiar since it is network oriented, RTP is a container like any
+other. You have to demux RTP before feeding the payload to libavcodec.
+In this specific case please look at RFC 4629 to see how it should be done.
+
+
+
4.14 AVStream.r_frame_rate is wrong, it is much larger than the frame rate.# TOC
+
+
r_frame_rate
is NOT the average frame rate, it is the smallest frame rate
+that can accurately represent all timestamps. So no, it is not
+wrong if it is larger than the average!
+For example, if you have mixed 25 and 30 fps content, then r_frame_rate
+will be 150 (it is the least common multiple).
+If you are looking for the average frame rate, see AVStream.avg_frame_rate
.
+
+
+
4.15 Why is make fate
not running all tests?# TOC
+
+
Make sure you have the fate-suite samples and the SAMPLES
Make variable
+or FATE_SAMPLES
environment variable or the --samples
+configure
option is set to the right path.
+
+
+
4.16 Why is make fate
not finding the samples?# TOC
+
+
Do you happen to have a ~
character in the samples path to indicate a
+home directory? The value is used in ways where the shell cannot expand it,
+causing FATE to not find files. Just replace ~
by the full path.
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/fate.html b/Externals/ffmpeg/dev/doc/fate.html
new file mode 100644
index 0000000000..980d28b756
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/fate.html
@@ -0,0 +1,286 @@
+
+
+
+
+
+
+ FFmpeg Automated Testing Environment
+
+
+
+
+
+
+
+
+ FFmpeg Automated Testing Environment
+
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Introduction# TOC
+
+
FATE is an extended regression suite on the client-side and a means
+for results aggregation and presentation on the server-side.
+
+
The first part of this document explains how you can use FATE from
+your FFmpeg source directory to test your ffmpeg binary. The second
+part describes how you can run FATE to submit the results to FFmpeg’s
+FATE server.
+
+
In any way you can have a look at the publicly viewable FATE results
+by visiting this website:
+
+
http://fate.ffmpeg.org/
+
+
This is especially recommended for all people contributing source
+code to FFmpeg, as it can be seen if some test on some platform broke
+with their recent contribution. This usually happens on the platforms
+the developers could not test on.
+
+
The second part of this document describes how you can run FATE to
+submit your results to FFmpeg’s FATE server. If you want to submit your
+results be sure to check that your combination of CPU, OS and compiler
+is not already listed on the above mentioned website.
+
+
In the third part you can find a comprehensive listing of FATE makefile
+targets and variables.
+
+
+
+
2 Using FATE from your FFmpeg source directory# TOC
+
+
If you want to run FATE on your machine you need to have the samples
+in place. You can get the samples via the build target fate-rsync.
+Use this command from the top-level source directory:
+
+
+
make fate-rsync SAMPLES=fate-suite/
+make fate SAMPLES=fate-suite/
+
+
+
The above commands set the samples location by passing a makefile
+variable via command line. It is also possible to set the samples
+location at source configuration time by invoking configure with
+‘–samples=<path to the samples directory>’. Afterwards you can
+invoke the makefile targets without setting the SAMPLES makefile
+variable. This is illustrated by the following commands:
+
+
+
./configure --samples=fate-suite/
+make fate-rsync
+make fate
+
+
+
Yet another way to tell FATE about the location of the sample
+directory is by making sure the environment variable FATE_SAMPLES
+contains the path to your samples directory. This can be achieved
+by e.g. putting that variable in your shell profile or by setting
+it in your interactive session.
+
+
+
FATE_SAMPLES=fate-suite/ make fate
+
+
+
+
Do not put a ’~’ character in the samples path to indicate a home
+directory. Because of shell nuances, this will cause FATE to fail.
+
+
To use a custom wrapper to run the test, pass --target-exec to
+configure
or set the TARGET_EXEC Make variable.
+
+
+
+
3 Submitting the results to the FFmpeg result aggregation server# TOC
+
+
To submit your results to the server you should run fate through the
+shell script tests/fate.sh from the FFmpeg sources. This script needs
+to be invoked with a configuration file as its first argument.
+
+
+
tests/fate.sh /path/to/fate_config
+
+
+
A configuration file template with comments describing the individual
+configuration variables can be found at doc/fate_config.sh.template .
+
+
The mentioned configuration template is also available here:
+
slot= # some unique identifier
+repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
+samples= # path to samples directory
+workdir= # directory in which to do all the work
+#fate_recv="ssh -T fate@fate.ffmpeg.org" # command to submit report
+comment= # optional description
+build_only= # set to "yes" for a compile-only instance that skips tests
+
+# the following are optional and map to configure options
+arch=
+cpu=
+cross_prefix=
+as=
+cc=
+ld=
+target_os=
+sysroot=
+target_exec=
+target_path=
+target_samples=
+extra_cflags=
+extra_ldflags=
+extra_libs=
+extra_conf= # extra configure options not covered above
+
+#make= # name of GNU make if not 'make'
+makeopts= # extra options passed to 'make'
+#tar= # command to create a tar archive from its arguments on stdout,
+ # defaults to 'tar c'
+
+
Create a configuration that suits your needs, based on the configuration
+template. The ‘slot’ configuration variable can be any string that is not
+yet used, but it is suggested that you name it adhering to the following
+pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file
+itself will be sourced in a shell script, therefore all shell features may
+be used. This enables you to setup the environment as you need it for your
+build.
+
+
For your first test runs the ‘fate_recv’ variable should be empty or
+commented out. This will run everything as normal except that it will omit
+the submission of the results to the server. The following files should be
+present in $workdir as specified in the configuration file:
+
+
+ configure.log
+ compile.log
+ test.log
+ report
+ version
+
+
+
When you have everything working properly you can create an SSH key pair
+and send the public key to the FATE server administrator who can be contacted
+at the email address fate-admin@ffmpeg.org .
+
+
Configure your SSH client to use public key authentication with that key
+when connecting to the FATE server. Also do not forget to check the identity
+of the server and to accept its host key. This can usually be achieved by
+running your SSH client manually and killing it after you accepted the key.
+The FATE server’s fingerprint is:
+
+
+RSA
+d3:f1:83:97:a4:75:2b:a6:fb:d6:e8:aa:81:93:97:51
+
+ECDSA
+76:9f:68:32:04:1e:d5:d4:ec:47:3f:dc:fc:18:17:86
+
+
+
+
If you have problems connecting to the FATE server, it may help to try out
+the ssh
command with one or more -v options. You should
+get detailed output concerning your SSH configuration and the authentication
+process.
+
+
The only thing left is to automate the execution of the fate.sh script and
+the synchronisation of the samples directory.
+
+
+
+
4 FATE makefile targets and variables# TOC
+
+
+
4.1 Makefile targets# TOC
+
+
+fate-rsync
+Download/synchronize sample files to the configured samples directory.
+
+
+fate-list
+Will list all fate/regression test targets.
+
+
+fate
+Run the FATE test suite (requires the fate-suite dataset).
+
+
+
+
+
4.2 Makefile variables# TOC
+
+
+V
+Verbosity level, can be set to 0, 1 or 2.
+
+ 0: show just the test arguments
+ 1: show just the command used in the test
+ 2: show everything
+
+
+
+SAMPLES
+Specify or override the path to the FATE samples at make time, it has a
+meaning only while running the regression tests.
+
+
+THREADS
+Specify how many threads to use while running regression tests, it is
+quite useful to detect thread-related regressions.
+
+
+THREAD_TYPE
+Specify which threading strategy test, either slice or frame ,
+by default slice+frame
+
+
+CPUFLAGS
+Specify CPU flags.
+
+
+TARGET_EXEC
+Specify or override the wrapper used to run the tests.
+The TARGET_EXEC option provides a way to run FATE wrapped in
+valgrind
, qemu-user
or wine
or on remote targets
+through ssh
.
+
+
+GEN
+Set to 1 to generate the missing or mismatched references.
+
+
+
+
+
4.3 Examples# TOC
+
+
+
make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-all.html b/Externals/ffmpeg/dev/doc/ffmpeg-all.html
new file mode 100644
index 0000000000..dc4fc35617
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/ffmpeg-all.html
@@ -0,0 +1,27303 @@
+
+
+
+
+
+
+ ffmpeg Documentation
+
+
+
+
+
+
+
+
+ ffmpeg Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Synopsis# TOC
+
+
ffmpeg [global_options ] {[input_file_options ] -i input_file } ... {[output_file_options ] output_file } ...
+
+
+
2 Description# TOC
+
+
ffmpeg
is a very fast video and audio converter that can also grab from
+a live audio/video source. It can also convert between arbitrary sample
+rates and resize video on the fly with a high quality polyphase filter.
+
+
ffmpeg
reads from an arbitrary number of input "files" (which can be regular
+files, pipes, network streams, grabbing devices, etc.), specified by the
+-i
option, and writes to an arbitrary number of output "files", which are
+specified by a plain output filename. Anything found on the command line which
+cannot be interpreted as an option is considered to be an output filename.
+
+
Each input or output file can, in principle, contain any number of streams of
+different types (video/audio/subtitle/attachment/data). The allowed number and/or
+types of streams may be limited by the container format. Selecting which
+streams from which inputs will go into which output is either done automatically
+or with the -map
option (see the Stream selection chapter).
+
+
To refer to input files in options, you must use their indices (0-based). E.g.
+the first input file is 0
, the second is 1
, etc. Similarly, streams
+within a file are referred to by their indices. E.g. 2:3
refers to the
+fourth stream in the third input file. Also see the Stream specifiers chapter.
+
+
As a general rule, options are applied to the next specified
+file. Therefore, order is important, and you can have the same
+option on the command line multiple times. Each occurrence is
+then applied to the next input or output file.
+Exceptions from this rule are the global options (e.g. verbosity level),
+which should be specified first.
+
+
Do not mix input and output files – first specify all input files, then all
+output files. Also do not mix options which belong to different files. All
+options apply ONLY to the next input or output file and are reset between files.
+
+
+ To set the video bitrate of the output file to 64 kbit/s:
+
+
ffmpeg -i input.avi -b:v 64k -bufsize 64k output.avi
+
+
+ To force the frame rate of the output file to 24 fps:
+
+
ffmpeg -i input.avi -r 24 output.avi
+
+
+ To force the frame rate of the input file (valid for raw formats only)
+to 1 fps and the frame rate of the output file to 24 fps:
+
+
ffmpeg -r 1 -i input.m2v -r 24 output.avi
+
+
+
+
The format option may be needed for raw input files.
+
+
+
+
3 Detailed description# TOC
+
+
The transcoding process in ffmpeg
for each output can be described by
+the following diagram:
+
+
+
_______ ______________
+| | | |
+| input | demuxer | encoded data | decoder
+| file | ---------> | packets | -----+
+|_______| |______________| |
+ v
+ _________
+ | |
+ | decoded |
+ | frames |
+ |_________|
+ ________ ______________ |
+| | | | |
+| output | <-------- | encoded data | <----+
+| file | muxer | packets | encoder
+|________| |______________|
+
+
+
+
+
ffmpeg
calls the libavformat library (containing demuxers) to read
+input files and get packets containing encoded data from them. When there are
+multiple input files, ffmpeg
tries to keep them synchronized by
+tracking lowest timestamp on any active input stream.
+
+
Encoded packets are then passed to the decoder (unless streamcopy is selected
+for the stream, see further for a description). The decoder produces
+uncompressed frames (raw video/PCM audio/...) which can be processed further by
+filtering (see next section). After filtering, the frames are passed to the
+encoder, which encodes them and outputs encoded packets. Finally those are
+passed to the muxer, which writes the encoded packets to the output file.
+
+
+
3.1 Filtering# TOC
+
Before encoding, ffmpeg
can process raw audio and video frames using
+filters from the libavfilter library. Several chained filters form a filter
+graph. ffmpeg
distinguishes between two types of filtergraphs:
+simple and complex.
+
+
+
3.1.1 Simple filtergraphs# TOC
+
Simple filtergraphs are those that have exactly one input and output, both of
+the same type. In the above diagram they can be represented by simply inserting
+an additional step between decoding and encoding:
+
+
+
_________ ______________
+| | | |
+| decoded | | encoded data |
+| frames |\ _ | packets |
+|_________| \ /||______________|
+ \ __________ /
+ simple _\|| | / encoder
+ filtergraph | filtered |/
+ | frames |
+ |__________|
+
+
+
+
Simple filtergraphs are configured with the per-stream -filter option
+(with -vf and -af aliases for video and audio respectively).
+A simple filtergraph for video can look for example like this:
+
+
+
_______ _____________ _______ ________
+| | | | | | | |
+| input | ---> | deinterlace | ---> | scale | ---> | output |
+|_______| |_____________| |_______| |________|
+
+
+
+
Note that some filters change frame properties but not frame contents. E.g. the
+fps
filter in the example above changes number of frames, but does not
+touch the frame contents. Another example is the setpts
filter, which
+only sets timestamps and otherwise passes the frames unchanged.
+
+
+
3.1.2 Complex filtergraphs# TOC
+
Complex filtergraphs are those which cannot be described as simply a linear
+processing chain applied to one stream. This is the case, for example, when the graph has
+more than one input and/or output, or when output stream type is different from
+input. They can be represented with the following diagram:
+
+
+
_________
+| |
+| input 0 |\ __________
+|_________| \ | |
+ \ _________ /| output 0 |
+ \ | | / |__________|
+ _________ \| complex | /
+| | | |/
+| input 1 |---->| filter |\
+|_________| | | \ __________
+ /| graph | \ | |
+ / | | \| output 1 |
+ _________ / |_________| |__________|
+| | /
+| input 2 |/
+|_________|
+
+
+
+
Complex filtergraphs are configured with the -filter_complex option.
+Note that this option is global, since a complex filtergraph, by its nature,
+cannot be unambiguously associated with a single stream or file.
+
+
The -lavfi option is equivalent to -filter_complex .
+
+
A trivial example of a complex filtergraph is the overlay
filter, which
+has two video inputs and one video output, containing one video overlaid on top
+of the other. Its audio counterpart is the amix
filter.
+
+
+
3.2 Stream copy# TOC
+
Stream copy is a mode selected by supplying the copy
parameter to the
+-codec option. It makes ffmpeg
omit the decoding and encoding
+step for the specified stream, so it does only demuxing and muxing. It is useful
+for changing the container format or modifying container-level metadata. The
+diagram above will, in this case, simplify to this:
+
+
+
_______ ______________ ________
+| | | | | |
+| input | demuxer | encoded data | muxer | output |
+| file | ---------> | packets | -------> | file |
+|_______| |______________| |________|
+
+
+
+
Since there is no decoding or encoding, it is very fast and there is no quality
+loss. However, it might not work in some cases because of many factors. Applying
+filters is obviously also impossible, since filters work on uncompressed data.
+
+
+
+
4 Stream selection# TOC
+
+
By default, ffmpeg
includes only one stream of each type (video, audio, subtitle)
+present in the input files and adds them to each output file. It picks the
+"best" of each based upon the following criteria: for video, it is the stream
+with the highest resolution, for audio, it is the stream with the most channels, for
+subtitles, it is the first subtitle stream. In the case where several streams of
+the same type rate equally, the stream with the lowest index is chosen.
+
+
You can disable some of those defaults by using the -vn/-an/-sn
options. For
+full manual control, use the -map
option, which disables the defaults just
+described.
+
+
+
+
5 Options# TOC
+
+
All the numerical options, if not specified otherwise, accept a string
+representing a number as input, which may be followed by one of the SI
+unit prefixes, for example: ’K’, ’M’, or ’G’.
+
+
If ’i’ is appended to the SI unit prefix, the complete prefix will be
+interpreted as a unit prefix for binary multiples, which are based on
+powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
+prefix multiplies the value by 8. This allows using, for example:
+’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
+
+
Options which do not take arguments are boolean options, and set the
+corresponding value to true. They can be set to false by prefixing
+the option name with "no". For example using "-nofoo"
+will set the boolean option with name "foo" to false.
+
+
+
5.1 Stream specifiers# TOC
+
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
+are used to precisely specify which stream(s) a given option belongs to.
+
+
A stream specifier is a string generally appended to the option name and
+separated from it by a colon. E.g. -codec:a:1 ac3
contains the
+a:1
stream specifier, which matches the second audio stream. Therefore, it
+would select the ac3 codec for the second audio stream.
+
+
A stream specifier can match several streams, so that the option is applied to all
+of them. E.g. the stream specifier in -b:a 128k
matches all audio
+streams.
+
+
An empty stream specifier matches all streams. For example, -codec copy
+or -codec: copy
would copy all the streams without reencoding.
+
+
Possible forms of stream specifiers are:
+
+stream_index
+Matches the stream with this index. E.g. -threads:1 4
would set the
+thread count for the second stream to 4.
+
+stream_type [:stream_index ]
+stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
+’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
+stream number stream_index of this type. Otherwise, it matches all
+streams of this type.
+
+p:program_id [:stream_index ]
+If stream_index is given, then it matches the stream with number stream_index
+in the program with the id program_id . Otherwise, it matches all streams in the
+program.
+
+#stream_id or i:stream_id
+Match the stream by stream id (e.g. PID in MPEG-TS container).
+
+m:key [:value ]
+Matches streams with the metadata tag key having the specified value. If
+value is not given, matches streams that contain the given tag with any
+value.
+
+Note that in ffmpeg
, matching by metadata will only work properly for
+input files.
+
+
+
+
+
5.2 Generic options# TOC
+
+
These options are shared amongst the ff* tools.
+
+
+-L
+Show license.
+
+
+-h, -?, -help, --help [arg ]
+Show help. An optional parameter may be specified to print help about a specific
+item. If no argument is specified, only basic (non advanced) tool
+options are shown.
+
+Possible values of arg are:
+
+long
+Print advanced tool options in addition to the basic tool options.
+
+
+full
+Print complete list of options, including shared and private options
+for encoders, decoders, demuxers, muxers, filters, etc.
+
+
+decoder=decoder_name
+Print detailed information about the decoder named decoder_name . Use the
+-decoders option to get a list of all decoders.
+
+
+encoder=encoder_name
+Print detailed information about the encoder named encoder_name . Use the
+-encoders option to get a list of all encoders.
+
+
+demuxer=demuxer_name
+Print detailed information about the demuxer named demuxer_name . Use the
+-formats option to get a list of all demuxers and muxers.
+
+
+muxer=muxer_name
+Print detailed information about the muxer named muxer_name . Use the
+-formats option to get a list of all muxers and demuxers.
+
+
+filter=filter_name
+Print detailed information about the filter name filter_name . Use the
+-filters option to get a list of all filters.
+
+
+
+
+-version
+Show version.
+
+
+-formats
+Show available formats (including devices).
+
+
+-devices
+Show available devices.
+
+
+-codecs
+Show all codecs known to libavcodec.
+
+Note that the term ’codec’ is used throughout this documentation as a shortcut
+for what is more correctly called a media bitstream format.
+
+
+-decoders
+Show available decoders.
+
+
+-encoders
+Show all available encoders.
+
+
+-bsfs
+Show available bitstream filters.
+
+
+-protocols
+Show available protocols.
+
+
+-filters
+Show available libavfilter filters.
+
+
+-pix_fmts
+Show available pixel formats.
+
+
+-sample_fmts
+Show available sample formats.
+
+
+-layouts
+Show channel names and standard channel layouts.
+
+
+-colors
+Show recognized color names.
+
+
+-sources device [,opt1 =val1 [,opt2 =val2 ]...]
+Show autodetected sources of the intput device.
+Some devices may provide system-dependent source names that cannot be autodetected.
+The returned list cannot be assumed to be always complete.
+
+
ffmpeg -sources pulse,server=192.168.0.4
+
+
+
+-sinks device [,opt1 =val1 [,opt2 =val2 ]...]
+Show autodetected sinks of the output device.
+Some devices may provide system-dependent sink names that cannot be autodetected.
+The returned list cannot be assumed to be always complete.
+
+
ffmpeg -sinks pulse,server=192.168.0.4
+
+
+
+-loglevel [repeat+]loglevel | -v [repeat+]loglevel
+Set the logging level used by the library.
+Adding "repeat+" indicates that repeated log output should not be compressed
+to the first line and the "Last message repeated n times" line will be
+omitted. "repeat" can also be used alone.
+If "repeat" is used alone, and with no prior loglevel set, the default
+loglevel will be used. If multiple loglevel parameters are given, using
+’repeat’ will not change the loglevel.
+loglevel is a string or a number containing one of the following values:
+
+‘quiet, -8 ’
+Show nothing at all; be silent.
+
+‘panic, 0 ’
+Only show fatal errors which could lead the process to crash, such as
+and assert failure. This is not currently used for anything.
+
+‘fatal, 8 ’
+Only show fatal errors. These are errors after which the process absolutely
+cannot continue after.
+
+‘error, 16 ’
+Show all errors, including ones which can be recovered from.
+
+‘warning, 24 ’
+Show all warnings and errors. Any message related to possibly
+incorrect or unexpected events will be shown.
+
+‘info, 32 ’
+Show informative messages during processing. This is in addition to
+warnings and errors. This is the default value.
+
+‘verbose, 40 ’
+Same as info
, except more verbose.
+
+‘debug, 48 ’
+Show everything, including debugging information.
+
+
+
+By default the program logs to stderr, if coloring is supported by the
+terminal, colors are used to mark errors and warnings. Log coloring
+can be disabled setting the environment variable
+AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
+the environment variable AV_LOG_FORCE_COLOR
.
+The use of the environment variable NO_COLOR
is deprecated and
+will be dropped in a following FFmpeg version.
+
+
+-report
+Dump full command line and console output to a file named
+program -YYYYMMDD -HHMMSS .log
in the current
+directory.
+This file can be useful for bug reports.
+It also implies -loglevel verbose
.
+
+Setting the environment variable FFREPORT
to any value has the
+same effect. If the value is a ’:’-separated key=value sequence, these
+options will affect the report; option values must be escaped if they
+contain special characters or the options delimiter ’:’ (see the
+“Quoting and escaping” section in the ffmpeg-utils manual).
+
+The following options are recognized:
+
+file
+set the file name to use for the report; %p
is expanded to the name
+of the program, %t
is expanded to a timestamp, %%
is expanded
+to a plain %
+
+level
+set the log verbosity level using a numerical value (see -loglevel
).
+
+
+
+For example, to output a report to a file named ffreport.log
+using a log level of 32
(alias for log level info
):
+
+
+
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
+
+
+Errors in parsing the environment variable are not fatal, and will not
+appear in the report.
+
+
+-hide_banner
+Suppress printing banner.
+
+All FFmpeg tools will normally show a copyright notice, build options
+and library versions. This option can be used to suppress printing
+this information.
+
+
+-cpuflags flags (global )
+Allows setting and clearing cpu flags. This option is intended
+for testing. Do not use it unless you know what you’re doing.
+
+
ffmpeg -cpuflags -sse+mmx ...
+ffmpeg -cpuflags mmx ...
+ffmpeg -cpuflags 0 ...
+
+Possible flags for this option are:
+
+‘x86 ’
+
+‘mmx ’
+‘mmxext ’
+‘sse ’
+‘sse2 ’
+‘sse2slow ’
+‘sse3 ’
+‘sse3slow ’
+‘ssse3 ’
+‘atom ’
+‘sse4.1 ’
+‘sse4.2 ’
+‘avx ’
+‘xop ’
+‘fma4 ’
+‘3dnow ’
+‘3dnowext ’
+‘cmov ’
+
+
+‘ARM ’
+
+‘armv5te ’
+‘armv6 ’
+‘armv6t2 ’
+‘vfp ’
+‘vfpv3 ’
+‘neon ’
+
+
+‘PowerPC ’
+
+‘altivec ’
+
+
+‘Specific Processors ’
+
+‘pentium2 ’
+‘pentium3 ’
+‘pentium4 ’
+‘k6 ’
+‘k62 ’
+‘athlon ’
+‘athlonxp ’
+‘k8 ’
+
+
+
+
+
+-opencl_bench
+Benchmark all available OpenCL devices and show the results. This option
+is only available when FFmpeg has been compiled with --enable-opencl
.
+
+
+-opencl_options options (global )
+Set OpenCL environment options. This option is only available when
+FFmpeg has been compiled with --enable-opencl
.
+
+options must be a list of key =value option pairs
+separated by ’:’. See the “OpenCL Options” section in the
+ffmpeg-utils manual for the list of supported options.
+
+
+
+
+
5.3 AVOptions# TOC
+
+
These options are provided directly by the libavformat, libavdevice and
+libavcodec libraries. To see the list of available AVOptions, use the
+-help option. They are separated into two categories:
+
+generic
+These options can be set for any container, codec or device. Generic options
+are listed under AVFormatContext options for containers/devices and under
+AVCodecContext options for codecs.
+
+private
+These options are specific to the given container, device or codec. Private
+options are listed under their corresponding containers/devices/codecs.
+
+
+
+
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
+an MP3 file, use the id3v2_version private option of the MP3
+muxer:
+
+
ffmpeg -i input.flac -id3v2_version 3 out.mp3
+
+
+
All codec AVOptions are per-stream, and thus a stream specifier
+should be attached to them.
+
+
Note: the -nooption syntax cannot be used for boolean
+AVOptions, use -option 0 /-option 1 .
+
+
Note: the old undocumented way of specifying per-stream AVOptions by
+prepending v/a/s to the options name is now obsolete and will be
+removed soon.
+
+
+
5.4 Main options# TOC
+
+
+-f fmt (input/output )
+Force input or output file format. The format is normally auto detected for input
+files and guessed from the file extension for output files, so this option is not
+needed in most cases.
+
+
+-i filename (input )
+input file name
+
+
+-y (global )
+Overwrite output files without asking.
+
+
+-n (global )
+Do not overwrite output files, and exit immediately if a specified
+output file already exists.
+
+
+-c[:stream_specifier ] codec (input/output,per-stream )
+-codec[:stream_specifier ] codec (input/output,per-stream )
+Select an encoder (when used before an output file) or a decoder (when used
+before an input file) for one or more streams. codec is the name of a
+decoder/encoder or a special value copy
(output only) to indicate that
+the stream is not to be re-encoded.
+
+For example
+
+
ffmpeg -i INPUT -map 0 -c:v libx264 -c:a copy OUTPUT
+
+encodes all video streams with libx264 and copies all audio streams.
+
+For each stream, the last matching c
option is applied, so
+
+
ffmpeg -i INPUT -map 0 -c copy -c:v:1 libx264 -c:a:137 libvorbis OUTPUT
+
+will copy all the streams except the second video, which will be encoded with
+libx264, and the 138th audio, which will be encoded with libvorbis.
+
+
+-t duration (input/output )
+When used as an input option (before -i
), limit the duration of
+data read from the input file.
+
+When used as an output option (before an output filename), stop writing the
+output after its duration reaches duration .
+
+duration may be a number in seconds, or in hh:mm:ss[.xxx]
form.
+
+-to and -t are mutually exclusive and -t has priority.
+
+
+-to position (output )
+Stop writing the output at position .
+position may be a number in seconds, or in hh:mm:ss[.xxx]
form.
+
+-to and -t are mutually exclusive and -t has priority.
+
+
+-fs limit_size (output )
+Set the file size limit, expressed in bytes.
+
+
+-ss position (input/output )
+When used as an input option (before -i
), seeks in this input file to
+position . Note the in most formats it is not possible to seek exactly, so
+ffmpeg
will seek to the closest seek point before position .
+When transcoding and -accurate_seek is enabled (the default), this
+extra segment between the seek point and position will be decoded and
+discarded. When doing stream copy or when -noaccurate_seek is used, it
+will be preserved.
+
+When used as an output option (before an output filename), decodes but discards
+input until the timestamps reach position .
+
+position may be either in seconds or in hh:mm:ss[.xxx]
form.
+
+
+-itsoffset offset (input )
+Set the input time offset.
+
+offset must be a time duration specification,
+see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
+
+The offset is added to the timestamps of the input files. Specifying
+a positive offset means that the corresponding streams are delayed by
+the time duration specified in offset .
+
+
+-timestamp date (output )
+Set the recording timestamp in the container.
+
+date must be a time duration specification,
+see (ffmpeg-utils)the Date section in the ffmpeg-utils(1) manual .
+
+
+-metadata[:metadata_specifier] key =value (output,per-metadata )
+Set a metadata key/value pair.
+
+An optional metadata_specifier may be given to set metadata
+on streams or chapters. See -map_metadata
documentation for
+details.
+
+This option overrides metadata set with -map_metadata
. It is
+also possible to delete metadata by using an empty value.
+
+For example, for setting the title in the output file:
+
+
ffmpeg -i in.avi -metadata title="my title" out.flv
+
+
+To set the language of the first audio stream:
+
+
ffmpeg -i INPUT -metadata:s:a:0 language=eng OUTPUT
+
+
+
+-target type (output )
+Specify target file type (vcd
, svcd
, dvd
, dv
,
+dv50
). type may be prefixed with pal-
, ntsc-
or
+film-
to use the corresponding standard. All the format options
+(bitrate, codecs, buffer sizes) are then set automatically. You can just type:
+
+
+
ffmpeg -i myfile.avi -target vcd /tmp/vcd.mpg
+
+
+Nevertheless you can specify additional options as long as you know
+they do not conflict with the standard, as in:
+
+
+
ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
+
+
+
+-dframes number (output )
+Set the number of data frames to output. This is an alias for -frames:d
.
+
+
+-frames[:stream_specifier ] framecount (output,per-stream )
+Stop writing to the stream after framecount frames.
+
+
+-q[:stream_specifier ] q (output,per-stream )
+-qscale[:stream_specifier ] q (output,per-stream )
+Use fixed quality scale (VBR). The meaning of q /qscale is
+codec-dependent.
+If qscale is used without a stream_specifier then it applies only
+to the video stream, this is to maintain compatibility with previous behavior
+and as specifying the same codec specific value to 2 different codecs that is
+audio and video generally is not what is intended when no stream_specifier is
+used.
+
+
+-filter[:stream_specifier ] filtergraph (output,per-stream )
+Create the filtergraph specified by filtergraph and use it to
+filter the stream.
+
+filtergraph is a description of the filtergraph to apply to
+the stream, and must have a single input and a single output of the
+same type of the stream. In the filtergraph, the input is associated
+to the label in
, and the output to the label out
. See
+the ffmpeg-filters manual for more information about the filtergraph
+syntax.
+
+See the -filter_complex option if you
+want to create filtergraphs with multiple inputs and/or outputs.
+
+
+-filter_script[:stream_specifier ] filename (output,per-stream )
+This option is similar to -filter , the only difference is that its
+argument is the name of the file from which a filtergraph description is to be
+read.
+
+
+-pre[:stream_specifier ] preset_name (output,per-stream )
+Specify the preset for matching stream(s).
+
+
+-stats (global )
+Print encoding progress/statistics. It is on by default, to explicitly
+disable it you need to specify -nostats
.
+
+
+-progress url (global )
+Send program-friendly progress information to url .
+
+Progress information is written approximately every second and at the end of
+the encoding process. It is made of "key =value " lines. key
+consists of only alphanumeric characters. The last key of a sequence of
+progress information is always "progress".
+
+
+-stdin
+Enable interaction on standard input. On by default unless standard input is
+used as an input. To explicitly disable interaction you need to specify
+-nostdin
.
+
+Disabling interaction on standard input is useful, for example, if
+ffmpeg is in the background process group. Roughly the same result can
+be achieved with ffmpeg ... < /dev/null
but it requires a
+shell.
+
+
+-debug_ts (global )
+Print timestamp information. It is off by default. This option is
+mostly useful for testing and debugging purposes, and the output
+format may change from one version to another, so it should not be
+employed by portable scripts.
+
+See also the option -fdebug ts
.
+
+
+-attach filename (output )
+Add an attachment to the output file. This is supported by a few formats
+like Matroska for e.g. fonts used in rendering subtitles. Attachments
+are implemented as a specific type of stream, so this option will add
+a new stream to the file. It is then possible to use per-stream options
+on this stream in the usual way. Attachment streams created with this
+option will be created after all the other streams (i.e. those created
+with -map
or automatic mappings).
+
+Note that for Matroska you also have to set the mimetype metadata tag:
+
+
ffmpeg -i INPUT -attach DejaVuSans.ttf -metadata:s:2 mimetype=application/x-truetype-font out.mkv
+
+(assuming that the attachment stream will be third in the output file).
+
+
+-dump_attachment[:stream_specifier ] filename (input,per-stream )
+Extract the matching attachment stream into a file named filename . If
+filename is empty, then the value of the filename
metadata tag
+will be used.
+
+E.g. to extract the first attachment to a file named ’out.ttf’:
+
+
ffmpeg -dump_attachment:t:0 out.ttf -i INPUT
+
+To extract all attachments to files determined by the filename
tag:
+
+
ffmpeg -dump_attachment:t "" -i INPUT
+
+
+Technical note – attachments are implemented as codec extradata, so this
+option can actually be used to extract extradata from any stream, not just
+attachments.
+
+
+
+
+
+
5.5 Video Options# TOC
+
+
+-vframes number (output )
+Set the number of video frames to output. This is an alias for -frames:v
.
+
+-r[:stream_specifier ] fps (input/output,per-stream )
+Set frame rate (Hz value, fraction or abbreviation).
+
+As an input option, ignore any timestamps stored in the file and instead
+generate timestamps assuming constant frame rate fps .
+This is not the same as the -framerate option used for some input formats
+like image2 or v4l2 (it used to be the same in older versions of FFmpeg).
+If in doubt use -framerate instead of the input option -r .
+
+As an output option, duplicate or drop input frames to achieve constant output
+frame rate fps .
+
+
+-s[:stream_specifier ] size (input/output,per-stream )
+Set frame size.
+
+As an input option, this is a shortcut for the video_size private
+option, recognized by some demuxers for which the frame size is either not
+stored in the file or is configurable – e.g. raw video or video grabbers.
+
+As an output option, this inserts the scale
video filter to the
+end of the corresponding filtergraph. Please use the scale
filter
+directly to insert it at the beginning or some other place.
+
+The format is ‘wxh ’ (default - same as source).
+
+
+-aspect[:stream_specifier ] aspect (output,per-stream )
+Set the video display aspect ratio specified by aspect .
+
+aspect can be a floating point number string, or a string of the
+form num :den , where num and den are the
+numerator and denominator of the aspect ratio. For example "4:3",
+"16:9", "1.3333", and "1.7777" are valid argument values.
+
+If used together with -vcodec copy , it will affect the aspect ratio
+stored at container level, but not the aspect ratio stored in encoded
+frames, if it exists.
+
+
+-vn (output )
+Disable video recording.
+
+
+-vcodec codec (output )
+Set the video codec. This is an alias for -codec:v
.
+
+
+-pass[:stream_specifier ] n (output,per-stream )
+Select the pass number (1 or 2). It is used to do two-pass
+video encoding. The statistics of the video are recorded in the first
+pass into a log file (see also the option -passlogfile),
+and in the second pass that log file is used to generate the video
+at the exact requested bitrate.
+On pass 1, you may just deactivate audio and set output to null,
+examples for Windows and Unix:
+
+
ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y NUL
+ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y /dev/null
+
+
+
+-passlogfile[:stream_specifier ] prefix (output,per-stream )
+Set two-pass log file name prefix to prefix , the default file name
+prefix is “ffmpeg2pass”. The complete file name will be
+PREFIX-N.log , where N is a number specific to the output
+stream
+
+
+-vf filtergraph (output )
+Create the filtergraph specified by filtergraph and use it to
+filter the stream.
+
+This is an alias for -filter:v
, see the -filter option .
+
+
+
+
+
5.6 Advanced Video options# TOC
+
+
+-pix_fmt[:stream_specifier ] format (input/output,per-stream )
+Set pixel format. Use -pix_fmts
to show all the supported
+pixel formats.
+If the selected pixel format can not be selected, ffmpeg will print a
+warning and select the best pixel format supported by the encoder.
+If pix_fmt is prefixed by a +
, ffmpeg will exit with an error
+if the requested pixel format can not be selected, and automatic conversions
+inside filtergraphs are disabled.
+If pix_fmt is a single +
, ffmpeg selects the same pixel format
+as the input (or graph output) and automatic conversions are disabled.
+
+
+-sws_flags flags (input/output )
+Set SwScaler flags.
+
+-vdt n
+Discard threshold.
+
+
+-rc_override[:stream_specifier ] override (output,per-stream )
+Rate control override for specific intervals, formatted as "int,int,int"
+list separated with slashes. Two first values are the beginning and
+end frame numbers, last one is quantizer to use if positive, or quality
+factor if negative.
+
+
+-ilme
+Force interlacing support in encoder (MPEG-2 and MPEG-4 only).
+Use this option if your input file is interlaced and you want
+to keep the interlaced format for minimum losses.
+The alternative is to deinterlace the input stream with
+-deinterlace , but deinterlacing introduces losses.
+
+-psnr
+Calculate PSNR of compressed frames.
+
+-vstats
+Dump video coding statistics to vstats_HHMMSS.log .
+
+-vstats_file file
+Dump video coding statistics to file .
+
+-top[:stream_specifier ] n (output,per-stream )
+top=1/bottom=0/auto=-1 field first
+
+-dc precision
+Intra_dc_precision.
+
+-vtag fourcc/tag (output )
+Force video tag/fourcc. This is an alias for -tag:v
.
+
+-qphist (global )
+Show QP histogram
+
+-vbsf bitstream_filter
+Deprecated see -bsf
+
+
+-force_key_frames[:stream_specifier ] time [,time ...] (output,per-stream )
+-force_key_frames[:stream_specifier ] expr:expr (output,per-stream )
+Force key frames at the specified timestamps, more precisely at the first
+frames after each specified time.
+
+If the argument is prefixed with expr:
, the string expr
+is interpreted like an expression and is evaluated for each frame. A
+key frame is forced in case the evaluation is non-zero.
+
+If one of the times is "chapters
[delta ]", it is expanded into
+the time of the beginning of all chapters in the file, shifted by
+delta , expressed as a time in seconds.
+This option can be useful to ensure that a seek point is present at a
+chapter mark or any other designated place in the output file.
+
+For example, to insert a key frame at 5 minutes, plus key frames 0.1 second
+before the beginning of every chapter:
+
+
-force_key_frames 0:05:00,chapters-0.1
+
+
+The expression in expr can contain the following constants:
+
+n
+the number of current processed frame, starting from 0
+
+n_forced
+the number of forced frames
+
+prev_forced_n
+the number of the previous forced frame, it is NAN
when no
+keyframe was forced yet
+
+prev_forced_t
+the time of the previous forced frame, it is NAN
when no
+keyframe was forced yet
+
+t
+the time of the current processed frame
+
+
+
+For example to force a key frame every 5 seconds, you can specify:
+
+
-force_key_frames expr:gte(t,n_forced*5)
+
+
+To force a key frame 5 seconds after the time of the last forced one,
+starting from second 13:
+
+
-force_key_frames expr:if(isnan(prev_forced_t),gte(t,13),gte(t,prev_forced_t+5))
+
+
+Note that forcing too many keyframes is very harmful for the lookahead
+algorithms of certain encoders: using fixed-GOP options or similar
+would be more efficient.
+
+
+-copyinkf[:stream_specifier ] (output,per-stream )
+When doing stream copy, copy also non-key frames found at the
+beginning.
+
+
+-hwaccel[:stream_specifier ] hwaccel (input,per-stream )
+Use hardware acceleration to decode the matching stream(s). The allowed values
+of hwaccel are:
+
+none
+Do not use any hardware acceleration (the default).
+
+
+auto
+Automatically select the hardware acceleration method.
+
+
+vda
+Use Apple VDA hardware acceleration.
+
+
+vdpau
+Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration.
+
+
+dxva2
+Use DXVA2 (DirectX Video Acceleration) hardware acceleration.
+
+
+
+This option has no effect if the selected hwaccel is not available or not
+supported by the chosen decoder.
+
+Note that most acceleration methods are intended for playback and will not be
+faster than software decoding on modern CPUs. Additionally, ffmpeg
+will usually need to copy the decoded frames from the GPU memory into the system
+memory, resulting in further performance loss. This option is thus mainly
+useful for testing.
+
+
+-hwaccel_device[:stream_specifier ] hwaccel_device (input,per-stream )
+Select a device to use for hardware acceleration.
+
+This option only makes sense when the -hwaccel option is also
+specified. Its exact meaning depends on the specific hardware acceleration
+method chosen.
+
+
+vdpau
+For VDPAU, this option specifies the X11 display/screen to use. If this option
+is not specified, the value of the DISPLAY environment variable is used
+
+
+dxva2
+For DXVA2, this option should contain the number of the display adapter to use.
+If this option is not specified, the default adapter is used.
+
+
+
+
+
+
+
5.7 Audio Options# TOC
+
+
+-aframes number (output )
+Set the number of audio frames to output. This is an alias for -frames:a
.
+
+-ar[:stream_specifier ] freq (input/output,per-stream )
+Set the audio sampling frequency. For output streams it is set by
+default to the frequency of the corresponding input stream. For input
+streams this option only makes sense for audio grabbing devices and raw
+demuxers and is mapped to the corresponding demuxer options.
+
+-aq q (output )
+Set the audio quality (codec-specific, VBR). This is an alias for -q:a.
+
+-ac[:stream_specifier ] channels (input/output,per-stream )
+Set the number of audio channels. For output streams it is set by
+default to the number of input audio channels. For input streams
+this option only makes sense for audio grabbing devices and raw demuxers
+and is mapped to the corresponding demuxer options.
+
+-an (output )
+Disable audio recording.
+
+-acodec codec (input/output )
+Set the audio codec. This is an alias for -codec:a
.
+
+-sample_fmt[:stream_specifier ] sample_fmt (output,per-stream )
+Set the audio sample format. Use -sample_fmts
to get a list
+of supported sample formats.
+
+
+-af filtergraph (output )
+Create the filtergraph specified by filtergraph and use it to
+filter the stream.
+
+This is an alias for -filter:a
, see the -filter option .
+
+
+
+
+
5.8 Advanced Audio options# TOC
+
+
+-atag fourcc/tag (output )
+Force audio tag/fourcc. This is an alias for -tag:a
.
+
+-absf bitstream_filter
+Deprecated, see -bsf
+
+-guess_layout_max channels (input,per-stream )
+If some input channel layout is not known, try to guess only if it
+corresponds to at most the specified number of channels. For example, 2
+tells to ffmpeg
to recognize 1 channel as mono and 2 channels as
+stereo but not 6 channels as 5.1. The default is to always try to guess. Use
+0 to disable all guessing.
+
+
+
+
+
5.9 Subtitle options# TOC
+
+
+-scodec codec (input/output )
+Set the subtitle codec. This is an alias for -codec:s
.
+
+-sn (output )
+Disable subtitle recording.
+
+-sbsf bitstream_filter
+Deprecated, see -bsf
+
+
+
+
+
5.10 Advanced Subtitle options# TOC
+
+
+-fix_sub_duration
+Fix subtitles durations. For each subtitle, wait for the next packet in the
+same stream and adjust the duration of the first to avoid overlap. This is
+necessary with some subtitles codecs, especially DVB subtitles, because the
+duration in the original packet is only a rough estimate and the end is
+actually marked by an empty subtitle frame. Failing to use this option when
+necessary can result in exaggerated durations or muxing failures due to
+non-monotonic timestamps.
+
+Note that this option will delay the output of all data until the next
+subtitle packet is decoded: it may increase memory consumption and latency a
+lot.
+
+
+-canvas_size size
+Set the size of the canvas used to render subtitles.
+
+
+
+
+
+
5.11 Advanced options# TOC
+
+
+-map [-]input_file_id [:stream_specifier ][,sync_file_id [:stream_specifier ]] | [linklabel] (output )
+
+Designate one or more input streams as a source for the output file. Each input
+stream is identified by the input file index input_file_id and
+the input stream index input_stream_id within the input
+file. Both indices start at 0. If specified,
+sync_file_id :stream_specifier sets which input stream
+is used as a presentation sync reference.
+
+The first -map
option on the command line specifies the
+source for output stream 0, the second -map
option specifies
+the source for output stream 1, etc.
+
+A -
character before the stream identifier creates a "negative" mapping.
+It disables matching streams from already created mappings.
+
+An alternative [linklabel] form will map outputs from complex filter
+graphs (see the -filter_complex option) to the output file.
+linklabel must correspond to a defined output link label in the graph.
+
+For example, to map ALL streams from the first input file to output
+
+
ffmpeg -i INPUT -map 0 output
+
+
+For example, if you have two audio streams in the first input file,
+these streams are identified by "0:0" and "0:1". You can use
+-map
to select which streams to place in an output file. For
+example:
+
+
ffmpeg -i INPUT -map 0:1 out.wav
+
+will map the input stream in INPUT identified by "0:1" to
+the (single) output stream in out.wav .
+
+For example, to select the stream with index 2 from input file
+a.mov (specified by the identifier "0:2"), and stream with
+index 6 from input b.mov (specified by the identifier "1:6"),
+and copy them to the output file out.mov :
+
+
ffmpeg -i a.mov -i b.mov -c copy -map 0:2 -map 1:6 out.mov
+
+
+To select all video and the third audio stream from an input file:
+
+
ffmpeg -i INPUT -map 0:v -map 0:a:2 OUTPUT
+
+
+To map all the streams except the second audio, use negative mappings
+
+
ffmpeg -i INPUT -map 0 -map -0:a:1 OUTPUT
+
+
+To pick the English audio stream:
+
+
ffmpeg -i INPUT -map 0:m:language:eng OUTPUT
+
+
+Note that using this option disables the default mappings for this output file.
+
+
+-map_channel [input_file_id .stream_specifier .channel_id |-1][:output_file_id .stream_specifier ]
+Map an audio channel from a given input to an output. If
+output_file_id .stream_specifier is not set, the audio channel will
+be mapped on all the audio streams.
+
+Using "-1" instead of
+input_file_id .stream_specifier .channel_id will map a muted
+channel.
+
+For example, assuming INPUT is a stereo audio file, you can switch the
+two audio channels with the following command:
+
+
ffmpeg -i INPUT -map_channel 0.0.1 -map_channel 0.0.0 OUTPUT
+
+
+If you want to mute the first channel and keep the second:
+
+
ffmpeg -i INPUT -map_channel -1 -map_channel 0.0.1 OUTPUT
+
+
+The order of the "-map_channel" option specifies the order of the channels in
+the output stream. The output channel layout is guessed from the number of
+channels mapped (mono if one "-map_channel", stereo if two, etc.). Using "-ac"
+in combination of "-map_channel" makes the channel gain levels to be updated if
+input and output channel layouts don’t match (for instance two "-map_channel"
+options and "-ac 6").
+
+You can also extract each channel of an input to specific outputs; the following
+command extracts two channels of the INPUT audio stream (file 0, stream 0)
+to the respective OUTPUT_CH0 and OUTPUT_CH1 outputs:
+
+
ffmpeg -i INPUT -map_channel 0.0.0 OUTPUT_CH0 -map_channel 0.0.1 OUTPUT_CH1
+
+
+The following example splits the channels of a stereo input into two separate
+streams, which are put into the same output file:
+
+
ffmpeg -i stereo.wav -map 0:0 -map 0:0 -map_channel 0.0.0:0.0 -map_channel 0.0.1:0.1 -y out.ogg
+
+
+Note that currently each output stream can only contain channels from a single
+input stream; you can’t for example use "-map_channel" to pick multiple input
+audio channels contained in different streams (from the same or different files)
+and merge them into a single output stream. It is therefore not currently
+possible, for example, to turn two separate mono streams into a single stereo
+stream. However splitting a stereo stream into two single channel mono streams
+is possible.
+
+If you need this feature, a possible workaround is to use the amerge
+filter. For example, if you need to merge a media (here input.mkv ) with 2
+mono audio streams into one single stereo channel audio stream (and keep the
+video stream), you can use the following command:
+
+
ffmpeg -i input.mkv -filter_complex "[0:1] [0:2] amerge" -c:a pcm_s16le -c:v copy output.mkv
+
+
+
+-map_metadata[:metadata_spec_out ] infile [:metadata_spec_in ] (output,per-metadata )
+Set metadata information of the next output file from infile . Note that
+those are file indices (zero-based), not filenames.
+Optional metadata_spec_in/out parameters specify, which metadata to copy.
+A metadata specifier can have the following forms:
+
+g
+global metadata, i.e. metadata that applies to the whole file
+
+
+s [:stream_spec ]
+per-stream metadata. stream_spec is a stream specifier as described
+in the Stream specifiers chapter. In an input metadata specifier, the first
+matching stream is copied from. In an output metadata specifier, all matching
+streams are copied to.
+
+
+c :chapter_index
+per-chapter metadata. chapter_index is the zero-based chapter index.
+
+
+p :program_index
+per-program metadata. program_index is the zero-based program index.
+
+
+If metadata specifier is omitted, it defaults to global.
+
+By default, global metadata is copied from the first input file,
+per-stream and per-chapter metadata is copied along with streams/chapters. These
+default mappings are disabled by creating any mapping of the relevant type. A negative
+file index can be used to create a dummy mapping that just disables automatic copying.
+
+For example to copy metadata from the first stream of the input file to global metadata
+of the output file:
+
+
ffmpeg -i in.ogg -map_metadata 0:s:0 out.mp3
+
+
+To do the reverse, i.e. copy global metadata to all audio streams:
+
+
ffmpeg -i in.mkv -map_metadata:s:a 0:g out.mkv
+
+Note that simple 0
would work as well in this example, since global
+metadata is assumed by default.
+
+
+-map_chapters input_file_index (output )
+Copy chapters from input file with index input_file_index to the next
+output file. If no chapter mapping is specified, then chapters are copied from
+the first input file with at least one chapter. Use a negative file index to
+disable any chapter copying.
+
+
+-benchmark (global )
+Show benchmarking information at the end of an encode.
+Shows CPU time used and maximum memory consumption.
+Maximum memory consumption is not supported on all systems,
+it will usually display as 0 if not supported.
+
+-benchmark_all (global )
+Show benchmarking information during the encode.
+Shows CPU time used in various steps (audio/video encode/decode).
+
+-timelimit duration (global )
+Exit after ffmpeg has been running for duration seconds.
+
+-dump (global )
+Dump each input packet to stderr.
+
+-hex (global )
+When dumping packets, also dump the payload.
+
+-re (input )
+Read input at native frame rate. Mainly used to simulate a grab device.
+or live input stream (e.g. when reading from a file). Should not be used
+with actual grab devices or live input streams (where it can cause packet
+loss).
+By default ffmpeg
attempts to read the input(s) as fast as possible.
+This option will slow down the reading of the input(s) to the native frame rate
+of the input(s). It is useful for real-time output (e.g. live streaming).
+
+-loop_input
+Loop over the input stream. Currently it works only for image
+streams. This option is used for automatic FFserver testing.
+This option is deprecated, use -loop 1.
+
+-loop_output number_of_times
+Repeatedly loop output for formats that support looping such as animated GIF
+(0 will loop the output infinitely).
+This option is deprecated, use -loop.
+
+-vsync parameter
+Video sync method.
+For compatibility reasons old values can be specified as numbers.
+Newly added values will have to be specified as strings always.
+
+
+0, passthrough
+Each frame is passed with its timestamp from the demuxer to the muxer.
+
+1, cfr
+Frames will be duplicated and dropped to achieve exactly the requested
+constant frame rate.
+
+2, vfr
+Frames are passed through with their timestamp or dropped so as to
+prevent 2 frames from having the same timestamp.
+
+drop
+As passthrough but destroys all timestamps, making the muxer generate
+fresh timestamps based on frame-rate.
+
+-1, auto
+Chooses between 1 and 2 depending on muxer capabilities. This is the
+default method.
+
+
+
+Note that the timestamps may be further modified by the muxer, after this.
+For example, in the case that the format option avoid_negative_ts
+is enabled.
+
+With -map you can select from which stream the timestamps should be
+taken. You can leave either video or audio unchanged and sync the
+remaining stream(s) to the unchanged one.
+
+
+-async samples_per_second
+Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps,
+the parameter is the maximum samples per second by which the audio is changed.
+-async 1 is a special case where only the start of the audio stream is corrected
+without any later correction.
+
+Note that the timestamps may be further modified by the muxer, after this.
+For example, in the case that the format option avoid_negative_ts
+is enabled.
+
+This option has been deprecated. Use the aresample
audio filter instead.
+
+
+-copyts
+Do not process input timestamps, but keep their values without trying
+to sanitize them. In particular, do not remove the initial start time
+offset value.
+
+Note that, depending on the vsync option or on specific muxer
+processing (e.g. in case the format option avoid_negative_ts
+is enabled) the output timestamps may mismatch with the input
+timestamps even when this option is selected.
+
+
+-start_at_zero
+When used with copyts , shift input timestamps so they start at zero.
+
+This means that using e.g. -ss 50
will make output timestamps start at
+50 seconds, regardless of what timestamp the input file started at.
+
+
+-copytb mode
+Specify how to set the encoder timebase when stream copying. mode is an
+integer numeric value, and can assume one of the following values:
+
+
+1
+Use the demuxer timebase.
+
+The time base is copied to the output encoder from the corresponding input
+demuxer. This is sometimes required to avoid non monotonically increasing
+timestamps when copying video streams with variable frame rate.
+
+
+0
+Use the decoder timebase.
+
+The time base is copied to the output encoder from the corresponding input
+decoder.
+
+
+-1
+Try to make the choice automatically, in order to generate a sane output.
+
+
+
+Default value is -1.
+
+
+-shortest (output )
+Finish encoding when the shortest input stream ends.
+
+-dts_delta_threshold
+Timestamp discontinuity delta threshold.
+
+-muxdelay seconds (input )
+Set the maximum demux-decode delay.
+
+-muxpreload seconds (input )
+Set the initial demux-decode delay.
+
+-streamid output-stream-index :new-value (output )
+Assign a new stream-id value to an output stream. This option should be
+specified prior to the output filename to which it applies.
+For the situation where multiple output files exist, a streamid
+may be reassigned to a different value.
+
+For example, to set the stream 0 PID to 33 and the stream 1 PID to 36 for
+an output mpegts file:
+
+
ffmpeg -i infile -streamid 0:33 -streamid 1:36 out.ts
+
+
+
+-bsf[:stream_specifier ] bitstream_filters (output,per-stream )
+Set bitstream filters for matching streams. bitstream_filters is
+a comma-separated list of bitstream filters. Use the -bsfs
option
+to get the list of bitstream filters.
+
+
ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264
+
+
+
ffmpeg -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt
+
+
+
+-tag[:stream_specifier ] codec_tag (input/output,per-stream )
+Force a tag/fourcc for matching streams.
+
+
+-timecode hh :mm :ss SEPff
+Specify Timecode for writing. SEP is ’:’ for non drop timecode and ’;’
+(or ’.’) for drop.
+
+
ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg
+
+
+
+-filter_complex filtergraph (global )
+Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or
+outputs. For simple graphs – those with one input and one output of the same
+type – see the -filter options. filtergraph is a description of
+the filtergraph, as described in the “Filtergraph syntax” section of the
+ffmpeg-filters manual.
+
+Input link labels must refer to input streams using the
+[file_index:stream_specifier]
syntax (i.e. the same as -map
+uses). If stream_specifier matches multiple streams, the first one will be
+used. An unlabeled input will be connected to the first unused input stream of
+the matching type.
+
+Output link labels are referred to with -map . Unlabeled outputs are
+added to the first output file.
+
+Note that with this option it is possible to use only lavfi sources without
+normal input files.
+
+For example, to overlay an image over video
+
+
ffmpeg -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map
+'[out]' out.mkv
+
+Here [0:v]
refers to the first video stream in the first input file,
+which is linked to the first (main) input of the overlay filter. Similarly the
+first video stream in the second input is linked to the second (overlay) input
+of overlay.
+
+Assuming there is only one video stream in each input file, we can omit input
+labels, so the above is equivalent to
+
+
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay[out]' -map
+'[out]' out.mkv
+
+
+Furthermore we can omit the output label and the single output from the filter
+graph will be added to the output file automatically, so we can simply write
+
+
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv
+
+
+To generate 5 seconds of pure red video using lavfi color
source:
+
+
ffmpeg -filter_complex 'color=c=red' -t 5 out.mkv
+
+
+
+-lavfi filtergraph (global )
+Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or
+outputs. Equivalent to -filter_complex .
+
+
+-filter_complex_script filename (global )
+This option is similar to -filter_complex , the only difference is that
+its argument is the name of the file from which a complex filtergraph
+description is to be read.
+
+
+-accurate_seek (input )
+This option enables or disables accurate seeking in input files with the
+-ss option. It is enabled by default, so seeking is accurate when
+transcoding. Use -noaccurate_seek to disable it, which may be useful
+e.g. when copying some streams and transcoding the others.
+
+
+-override_ffserver (global )
+Overrides the input specifications from ffserver
. Using this
+option you can map any input stream to ffserver
and control
+many aspects of the encoding from ffmpeg
. Without this
+option ffmpeg
will transmit to ffserver
what is
+requested by ffserver
.
+
+The option is intended for cases where features are needed that cannot be
+specified to ffserver
but can be to ffmpeg
.
+
+
+-sdp_file file (global )
+Print sdp information to file .
+This allows dumping sdp information when at least one output isn’t an
+rtp stream.
+
+
+-discard (input )
+Allows discarding specific streams or frames of streams at the demuxer.
+Not all demuxers support this.
+
+
+none
+Discard no frame.
+
+
+default
+Default, which discards no frames.
+
+
+noref
+Discard all non-reference frames.
+
+
+bidir
+Discard all bidirectional frames.
+
+
+nokey
+Discard all frames excepts keyframes.
+
+
+all
+Discard all frames.
+
+
+
+
+
+
+
As a special exception, you can use a bitmap subtitle stream as input: it
+will be converted into a video with the same size as the largest video in
+the file, or 720x576 if no video is present. Note that this is an
+experimental and temporary solution. It will be removed once libavfilter has
+proper support for subtitles.
+
+
For example, to hardcode subtitles on top of a DVB-T recording stored in
+MPEG-TS format, delaying the subtitles by 1 second:
+
+
ffmpeg -i input.ts -filter_complex \
+ '[#0x2ef] setpts=PTS+1/TB [sub] ; [#0x2d0] [sub] overlay' \
+ -sn -map '#0x2dc' output.mkv
+
+
(0x2d0, 0x2dc and 0x2ef are the MPEG-TS PIDs of respectively the video,
+audio and subtitles streams; 0:0, 0:3 and 0:7 would have worked too)
+
+
+
5.12 Preset files# TOC
+
A preset file contains a sequence of option =value pairs,
+one for each line, specifying a sequence of options which would be
+awkward to specify on the command line. Lines starting with the hash
+(’#’) character are ignored and are used to provide comments. Check
+the presets directory in the FFmpeg source tree for examples.
+
+
There are two types of preset files: ffpreset and avpreset files.
+
+
+
5.12.1 ffpreset files# TOC
+
ffpreset files are specified with the vpre
, apre
,
+spre
, and fpre
options. The fpre
option takes the
+filename of the preset instead of a preset name as input and can be
+used for any kind of codec. For the vpre
, apre
, and
+spre
options, the options specified in a preset file are
+applied to the currently selected codec of the same type as the preset
+option.
+
+
The argument passed to the vpre
, apre
, and spre
+preset options identifies the preset file to use according to the
+following rules:
+
+
First ffmpeg searches for a file named arg .ffpreset in the
+directories $FFMPEG_DATADIR (if set), and $HOME/.ffmpeg , and in
+the datadir defined at configuration time (usually PREFIX/share/ffmpeg )
+or in a ffpresets folder along the executable on win32,
+in that order. For example, if the argument is libvpx-1080p
, it will
+search for the file libvpx-1080p.ffpreset .
+
+
If no such file is found, then ffmpeg will search for a file named
+codec_name -arg .ffpreset in the above-mentioned
+directories, where codec_name is the name of the codec to which
+the preset file options will be applied. For example, if you select
+the video codec with -vcodec libvpx
and use -vpre 1080p
,
+then it will search for the file libvpx-1080p.ffpreset .
+
+
+
5.12.2 avpreset files# TOC
+
avpreset files are specified with the pre
option. They work similar to
+ffpreset files, but they only allow encoder- specific options. Therefore, an
+option =value pair specifying an encoder cannot be used.
+
+
When the pre
option is specified, ffmpeg will look for files with the
+suffix .avpreset in the directories $AVCONV_DATADIR (if set), and
+$HOME/.avconv , and in the datadir defined at configuration time (usually
+PREFIX/share/ffmpeg ), in that order.
+
+
First ffmpeg searches for a file named codec_name -arg .avpreset in
+the above-mentioned directories, where codec_name is the name of the codec
+to which the preset file options will be applied. For example, if you select the
+video codec with -vcodec libvpx
and use -pre 1080p
, then it will
+search for the file libvpx-1080p.avpreset .
+
+
If no such file is found, then ffmpeg will search for a file named
+arg .avpreset in the same directories.
+
+
+
+
+
+
+ For streaming at very low bitrates, use a low frame rate
+and a small GOP size. This is especially true for RealVideo where
+the Linux player does not seem to be very fast, so it can miss
+frames. An example is:
+
+
+
ffmpeg -g 3 -r 3 -t 10 -b:v 50k -s qcif -f rv10 /tmp/b.rm
+
+
+ The parameter ’q’ which is displayed while encoding is the current
+quantizer. The value 1 indicates that a very good quality could
+be achieved. The value 31 indicates the worst quality. If q=31 appears
+too often, it means that the encoder cannot compress enough to meet
+your bitrate. You must either increase the bitrate, decrease the
+frame rate or decrease the frame size.
+
+ If your computer is not fast enough, you can speed up the
+compression at the expense of the compression ratio. You can use
+’-me zero’ to speed up motion estimation, and ’-g 0’ to disable
+motion estimation completely (you have only I-frames, which means it
+is about as good as JPEG compression).
+
+ To have very low audio bitrates, reduce the sampling frequency
+(down to 22050 Hz for MPEG audio, 22050 or 11025 for AC-3).
+
+ To have a constant quality (but a variable bitrate), use the option
+’-qscale n’ when ’n’ is between 1 (excellent quality) and 31 (worst
+quality).
+
+
+
+
+
7 Examples# TOC
+
+
+
7.1 Video and Audio grabbing# TOC
+
+
If you specify the input format and device then ffmpeg can grab video
+and audio directly.
+
+
+
ffmpeg -f oss -i /dev/dsp -f video4linux2 -i /dev/video0 /tmp/out.mpg
+
+
+
Or with an ALSA audio source (mono input, card id 1) instead of OSS:
+
+
ffmpeg -f alsa -ac 1 -i hw:1 -f video4linux2 -i /dev/video0 /tmp/out.mpg
+
+
+
Note that you must activate the right video source and channel before
+launching ffmpeg with any TV viewer such as
+xawtv by Gerd Knorr. You also
+have to set the audio recording levels correctly with a
+standard mixer.
+
+
+
7.2 X11 grabbing# TOC
+
+
Grab the X11 display with ffmpeg via
+
+
+
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0 /tmp/out.mpg
+
+
+
0.0 is display.screen number of your X11 server, same as
+the DISPLAY environment variable.
+
+
+
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0+10,20 /tmp/out.mpg
+
+
+
0.0 is display.screen number of your X11 server, same as the DISPLAY environment
+variable. 10 is the x-offset and 20 the y-offset for the grabbing.
+
+
+
7.3 Video and Audio file format conversion# TOC
+
+
Any supported file format and protocol can serve as input to ffmpeg:
+
+
Examples:
+
+ You can use YUV files as input:
+
+
+
ffmpeg -i /tmp/test%d.Y /tmp/out.mpg
+
+
+It will use the files:
+
+
/tmp/test0.Y, /tmp/test0.U, /tmp/test0.V,
+/tmp/test1.Y, /tmp/test1.U, /tmp/test1.V, etc...
+
+
+The Y files use twice the resolution of the U and V files. They are
+raw files, without header. They can be generated by all decent video
+decoders. You must specify the size of the image with the -s option
+if ffmpeg cannot guess it.
+
+ You can input from a raw YUV420P file:
+
+
+
ffmpeg -i /tmp/test.yuv /tmp/out.avi
+
+
+test.yuv is a file containing raw YUV planar data. Each frame is composed
+of the Y plane followed by the U and V planes at half vertical and
+horizontal resolution.
+
+ You can output to a raw YUV420P file:
+
+
+
ffmpeg -i mydivx.avi hugefile.yuv
+
+
+ You can set several input files and output files:
+
+
+
ffmpeg -i /tmp/a.wav -s 640x480 -i /tmp/a.yuv /tmp/a.mpg
+
+
+Converts the audio file a.wav and the raw YUV video file a.yuv
+to MPEG file a.mpg.
+
+ You can also do audio and video conversions at the same time:
+
+
+
ffmpeg -i /tmp/a.wav -ar 22050 /tmp/a.mp2
+
+
+Converts a.wav to MPEG audio at 22050 Hz sample rate.
+
+ You can encode to several formats at the same time and define a
+mapping from input stream to output streams:
+
+
+
ffmpeg -i /tmp/a.wav -map 0:a -b:a 64k /tmp/a.mp2 -map 0:a -b:a 128k /tmp/b.mp2
+
+
+Converts a.wav to a.mp2 at 64 kbits and to b.mp2 at 128 kbits. ’-map
+file:index’ specifies which input stream is used for each output
+stream, in the order of the definition of output streams.
+
+ You can transcode decrypted VOBs:
+
+
+
ffmpeg -i snatch_1.vob -f avi -c:v mpeg4 -b:v 800k -g 300 -bf 2 -c:a libmp3lame -b:a 128k snatch.avi
+
+
+This is a typical DVD ripping example; the input is a VOB file, the
+output an AVI file with MPEG-4 video and MP3 audio. Note that in this
+command we use B-frames so the MPEG-4 stream is DivX5 compatible, and
+GOP size is 300 which means one intra frame every 10 seconds for 29.97fps
+input video. Furthermore, the audio stream is MP3-encoded so you need
+to enable LAME support by passing --enable-libmp3lame
to configure.
+The mapping is particularly useful for DVD transcoding
+to get the desired audio language.
+
+NOTE: To see the supported input formats, use ffmpeg -formats
.
+
+ You can extract images from a video, or create a video from many images:
+
+For extracting images from a video:
+
+
ffmpeg -i foo.avi -r 1 -s WxH -f image2 foo-%03d.jpeg
+
+
+This will extract one video frame per second from the video and will
+output them in files named foo-001.jpeg , foo-002.jpeg ,
+etc. Images will be rescaled to fit the new WxH values.
+
+If you want to extract just a limited number of frames, you can use the
+above command in combination with the -vframes or -t option, or in
+combination with -ss to start extracting from a certain point in time.
+
+For creating a video from many images:
+
+
ffmpeg -f image2 -i foo-%03d.jpeg -r 12 -s WxH foo.avi
+
+
+The syntax foo-%03d.jpeg
specifies to use a decimal number
+composed of three digits padded with zeroes to express the sequence
+number. It is the same syntax supported by the C printf function, but
+only formats accepting a normal integer are suitable.
+
+When importing an image sequence, -i also supports expanding
+shell-like wildcard patterns (globbing) internally, by selecting the
+image2-specific -pattern_type glob
option.
+
+For example, for creating a video from filenames matching the glob pattern
+foo-*.jpeg
:
+
+
ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
+
+
+ You can put many streams of the same type in the output:
+
+
+
ffmpeg -i test1.avi -i test2.avi -map 1:1 -map 1:0 -map 0:1 -map 0:0 -c copy -y test12.nut
+
+
+The resulting output file test12.nut will contain the first four streams
+from the input files in reverse order.
+
+ To force CBR video output:
+
+
ffmpeg -i myfile.avi -b 4000k -minrate 4000k -maxrate 4000k -bufsize 1835k out.m2v
+
+
+ The four options lmin, lmax, mblmin and mblmax use ’lambda’ units,
+but you may use the QP2LAMBDA constant to easily convert from ’q’ units:
+
+
ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
+
+
+
+
+
+
8 Syntax# TOC
+
+
This section documents the syntax and formats employed by the FFmpeg
+libraries and tools.
+
+
+
8.1 Quoting and escaping# TOC
+
+
FFmpeg adopts the following quoting and escaping mechanism, unless
+explicitly specified. The following rules are applied:
+
+
+ '
and \
are special characters (respectively used for
+quoting and escaping). In addition to them, there might be other
+special characters depending on the specific syntax where the escaping
+and quoting are employed.
+
+ A special character is escaped by prefixing it with a ’\’.
+
+ All characters enclosed between ” are included literally in the
+parsed string. The quote character '
itself cannot be quoted,
+so you may need to close the quote and escape it.
+
+ Leading and trailing whitespaces, unless escaped or quoted, are
+removed from the parsed string.
+
+
+
Note that you may need to add a second level of escaping when using
+the command line or a script, which depends on the syntax of the
+adopted shell language.
+
+
The function av_get_token
defined in
+libavutil/avstring.h can be used to parse a token quoted or
+escaped according to the rules defined above.
+
+
The tool tools/ffescape in the FFmpeg source tree can be used
+to automatically quote or escape a string in a script.
+
+
+
8.1.1 Examples# TOC
+
+
+ Escape the string Crime d'Amour
containing the '
special
+character:
+
+
+ The string above contains a quote, so the '
needs to be escaped
+when quoting it:
+
+
+ Include leading or trailing whitespaces using quoting:
+
+
' this string starts and ends with whitespaces '
+
+
+ Escaping and quoting can be mixed together:
+
+
' The string '\'string\'' is a string '
+
+
+ To include a literal \
you can use either escaping or quoting:
+
+
'c:\foo' can be written as c:\\foo
+
+
+
+
+
8.2 Date# TOC
+
+
The accepted syntax is:
+
+
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
+now
+
+
+
If the value is "now" it takes the current time.
+
+
Time is local time unless Z is appended, in which case it is
+interpreted as UTC.
+If the year-month-day part is not specified it takes the current
+year-month-day.
+
+
+
8.3 Time duration# TOC
+
+
There are two accepted syntaxes for expressing time duration.
+
+
+
+
HH expresses the number of hours, MM the number of minutes
+for a maximum of 2 digits, and SS the number of seconds for a
+maximum of 2 digits. The m at the end expresses decimal value for
+SS .
+
+
or
+
+
+
+
S expresses the number of seconds, with the optional decimal part
+m .
+
+
In both expressions, the optional ‘- ’ indicates negative duration.
+
+
+
8.3.1 Examples# TOC
+
+
The following examples are all valid time duration:
+
+
+‘55 ’
+55 seconds
+
+
+‘12:03:45 ’
+12 hours, 03 minutes and 45 seconds
+
+
+‘23.189 ’
+23.189 seconds
+
+
+
+
+
8.4 Video size# TOC
+
Specify the size of the sourced video, it may be a string of the form
+width xheight , or the name of a size abbreviation.
+
+
The following abbreviations are recognized:
+
+‘ntsc ’
+720x480
+
+‘pal ’
+720x576
+
+‘qntsc ’
+352x240
+
+‘qpal ’
+352x288
+
+‘sntsc ’
+640x480
+
+‘spal ’
+768x576
+
+‘film ’
+352x240
+
+‘ntsc-film ’
+352x240
+
+‘sqcif ’
+128x96
+
+‘qcif ’
+176x144
+
+‘cif ’
+352x288
+
+‘4cif ’
+704x576
+
+‘16cif ’
+1408x1152
+
+‘qqvga ’
+160x120
+
+‘qvga ’
+320x240
+
+‘vga ’
+640x480
+
+‘svga ’
+800x600
+
+‘xga ’
+1024x768
+
+‘uxga ’
+1600x1200
+
+‘qxga ’
+2048x1536
+
+‘sxga ’
+1280x1024
+
+‘qsxga ’
+2560x2048
+
+‘hsxga ’
+5120x4096
+
+‘wvga ’
+852x480
+
+‘wxga ’
+1366x768
+
+‘wsxga ’
+1600x1024
+
+‘wuxga ’
+1920x1200
+
+‘woxga ’
+2560x1600
+
+‘wqsxga ’
+3200x2048
+
+‘wquxga ’
+3840x2400
+
+‘whsxga ’
+6400x4096
+
+‘whuxga ’
+7680x4800
+
+‘cga ’
+320x200
+
+‘ega ’
+640x350
+
+‘hd480 ’
+852x480
+
+‘hd720 ’
+1280x720
+
+‘hd1080 ’
+1920x1080
+
+‘2k ’
+2048x1080
+
+‘2kflat ’
+1998x1080
+
+‘2kscope ’
+2048x858
+
+‘4k ’
+4096x2160
+
+‘4kflat ’
+3996x2160
+
+‘4kscope ’
+4096x1716
+
+‘nhd ’
+640x360
+
+‘hqvga ’
+240x160
+
+‘wqvga ’
+400x240
+
+‘fwqvga ’
+432x240
+
+‘hvga ’
+480x320
+
+‘qhd ’
+960x540
+
+
+
+
+
8.5 Video rate# TOC
+
+
Specify the frame rate of a video, expressed as the number of frames
+generated per second. It has to be a string in the format
+frame_rate_num /frame_rate_den , an integer number, a float
+number or a valid video frame rate abbreviation.
+
+
The following abbreviations are recognized:
+
+‘ntsc ’
+30000/1001
+
+‘pal ’
+25/1
+
+‘qntsc ’
+30000/1001
+
+‘qpal ’
+25/1
+
+‘sntsc ’
+30000/1001
+
+‘spal ’
+25/1
+
+‘film ’
+24/1
+
+‘ntsc-film ’
+24000/1001
+
+
+
+
+
8.6 Ratio# TOC
+
+
A ratio can be expressed as an expression, or in the form
+numerator :denominator .
+
+
Note that a ratio with infinite (1/0) or negative value is
+considered valid, so you should check on the returned value if you
+want to exclude those values.
+
+
The undefined value can be expressed using the "0:0" string.
+
+
+
8.7 Color# TOC
+
+
It can be the name of a color as defined below (case insensitive match) or a
+[0x|#]RRGGBB[AA]
sequence, possibly followed by @ and a string
+representing the alpha component.
+
+
The alpha component may be a string composed by "0x" followed by an
+hexadecimal number or a decimal number between 0.0 and 1.0, which
+represents the opacity value (‘0x00 ’ or ‘0.0 ’ means completely
+transparent, ‘0xff ’ or ‘1.0 ’ completely opaque). If the alpha
+component is not specified then ‘0xff ’ is assumed.
+
+
The string ‘random ’ will result in a random color.
+
+
The following names of colors are recognized:
+
+‘AliceBlue ’
+0xF0F8FF
+
+‘AntiqueWhite ’
+0xFAEBD7
+
+‘Aqua ’
+0x00FFFF
+
+‘Aquamarine ’
+0x7FFFD4
+
+‘Azure ’
+0xF0FFFF
+
+‘Beige ’
+0xF5F5DC
+
+‘Bisque ’
+0xFFE4C4
+
+‘Black ’
+0x000000
+
+‘BlanchedAlmond ’
+0xFFEBCD
+
+‘Blue ’
+0x0000FF
+
+‘BlueViolet ’
+0x8A2BE2
+
+‘Brown ’
+0xA52A2A
+
+‘BurlyWood ’
+0xDEB887
+
+‘CadetBlue ’
+0x5F9EA0
+
+‘Chartreuse ’
+0x7FFF00
+
+‘Chocolate ’
+0xD2691E
+
+‘Coral ’
+0xFF7F50
+
+‘CornflowerBlue ’
+0x6495ED
+
+‘Cornsilk ’
+0xFFF8DC
+
+‘Crimson ’
+0xDC143C
+
+‘Cyan ’
+0x00FFFF
+
+‘DarkBlue ’
+0x00008B
+
+‘DarkCyan ’
+0x008B8B
+
+‘DarkGoldenRod ’
+0xB8860B
+
+‘DarkGray ’
+0xA9A9A9
+
+‘DarkGreen ’
+0x006400
+
+‘DarkKhaki ’
+0xBDB76B
+
+‘DarkMagenta ’
+0x8B008B
+
+‘DarkOliveGreen ’
+0x556B2F
+
+‘Darkorange ’
+0xFF8C00
+
+‘DarkOrchid ’
+0x9932CC
+
+‘DarkRed ’
+0x8B0000
+
+‘DarkSalmon ’
+0xE9967A
+
+‘DarkSeaGreen ’
+0x8FBC8F
+
+‘DarkSlateBlue ’
+0x483D8B
+
+‘DarkSlateGray ’
+0x2F4F4F
+
+‘DarkTurquoise ’
+0x00CED1
+
+‘DarkViolet ’
+0x9400D3
+
+‘DeepPink ’
+0xFF1493
+
+‘DeepSkyBlue ’
+0x00BFFF
+
+‘DimGray ’
+0x696969
+
+‘DodgerBlue ’
+0x1E90FF
+
+‘FireBrick ’
+0xB22222
+
+‘FloralWhite ’
+0xFFFAF0
+
+‘ForestGreen ’
+0x228B22
+
+‘Fuchsia ’
+0xFF00FF
+
+‘Gainsboro ’
+0xDCDCDC
+
+‘GhostWhite ’
+0xF8F8FF
+
+‘Gold ’
+0xFFD700
+
+‘GoldenRod ’
+0xDAA520
+
+‘Gray ’
+0x808080
+
+‘Green ’
+0x008000
+
+‘GreenYellow ’
+0xADFF2F
+
+‘HoneyDew ’
+0xF0FFF0
+
+‘HotPink ’
+0xFF69B4
+
+‘IndianRed ’
+0xCD5C5C
+
+‘Indigo ’
+0x4B0082
+
+‘Ivory ’
+0xFFFFF0
+
+‘Khaki ’
+0xF0E68C
+
+‘Lavender ’
+0xE6E6FA
+
+‘LavenderBlush ’
+0xFFF0F5
+
+‘LawnGreen ’
+0x7CFC00
+
+‘LemonChiffon ’
+0xFFFACD
+
+‘LightBlue ’
+0xADD8E6
+
+‘LightCoral ’
+0xF08080
+
+‘LightCyan ’
+0xE0FFFF
+
+‘LightGoldenRodYellow ’
+0xFAFAD2
+
+‘LightGreen ’
+0x90EE90
+
+‘LightGrey ’
+0xD3D3D3
+
+‘LightPink ’
+0xFFB6C1
+
+‘LightSalmon ’
+0xFFA07A
+
+‘LightSeaGreen ’
+0x20B2AA
+
+‘LightSkyBlue ’
+0x87CEFA
+
+‘LightSlateGray ’
+0x778899
+
+‘LightSteelBlue ’
+0xB0C4DE
+
+‘LightYellow ’
+0xFFFFE0
+
+‘Lime ’
+0x00FF00
+
+‘LimeGreen ’
+0x32CD32
+
+‘Linen ’
+0xFAF0E6
+
+‘Magenta ’
+0xFF00FF
+
+‘Maroon ’
+0x800000
+
+‘MediumAquaMarine ’
+0x66CDAA
+
+‘MediumBlue ’
+0x0000CD
+
+‘MediumOrchid ’
+0xBA55D3
+
+‘MediumPurple ’
+0x9370D8
+
+‘MediumSeaGreen ’
+0x3CB371
+
+‘MediumSlateBlue ’
+0x7B68EE
+
+‘MediumSpringGreen ’
+0x00FA9A
+
+‘MediumTurquoise ’
+0x48D1CC
+
+‘MediumVioletRed ’
+0xC71585
+
+‘MidnightBlue ’
+0x191970
+
+‘MintCream ’
+0xF5FFFA
+
+‘MistyRose ’
+0xFFE4E1
+
+‘Moccasin ’
+0xFFE4B5
+
+‘NavajoWhite ’
+0xFFDEAD
+
+‘Navy ’
+0x000080
+
+‘OldLace ’
+0xFDF5E6
+
+‘Olive ’
+0x808000
+
+‘OliveDrab ’
+0x6B8E23
+
+‘Orange ’
+0xFFA500
+
+‘OrangeRed ’
+0xFF4500
+
+‘Orchid ’
+0xDA70D6
+
+‘PaleGoldenRod ’
+0xEEE8AA
+
+‘PaleGreen ’
+0x98FB98
+
+‘PaleTurquoise ’
+0xAFEEEE
+
+‘PaleVioletRed ’
+0xD87093
+
+‘PapayaWhip ’
+0xFFEFD5
+
+‘PeachPuff ’
+0xFFDAB9
+
+‘Peru ’
+0xCD853F
+
+‘Pink ’
+0xFFC0CB
+
+‘Plum ’
+0xDDA0DD
+
+‘PowderBlue ’
+0xB0E0E6
+
+‘Purple ’
+0x800080
+
+‘Red ’
+0xFF0000
+
+‘RosyBrown ’
+0xBC8F8F
+
+‘RoyalBlue ’
+0x4169E1
+
+‘SaddleBrown ’
+0x8B4513
+
+‘Salmon ’
+0xFA8072
+
+‘SandyBrown ’
+0xF4A460
+
+‘SeaGreen ’
+0x2E8B57
+
+‘SeaShell ’
+0xFFF5EE
+
+‘Sienna ’
+0xA0522D
+
+‘Silver ’
+0xC0C0C0
+
+‘SkyBlue ’
+0x87CEEB
+
+‘SlateBlue ’
+0x6A5ACD
+
+‘SlateGray ’
+0x708090
+
+‘Snow ’
+0xFFFAFA
+
+‘SpringGreen ’
+0x00FF7F
+
+‘SteelBlue ’
+0x4682B4
+
+‘Tan ’
+0xD2B48C
+
+‘Teal ’
+0x008080
+
+‘Thistle ’
+0xD8BFD8
+
+‘Tomato ’
+0xFF6347
+
+‘Turquoise ’
+0x40E0D0
+
+‘Violet ’
+0xEE82EE
+
+‘Wheat ’
+0xF5DEB3
+
+‘White ’
+0xFFFFFF
+
+‘WhiteSmoke ’
+0xF5F5F5
+
+‘Yellow ’
+0xFFFF00
+
+‘YellowGreen ’
+0x9ACD32
+
+
+
+
+
8.8 Channel Layout# TOC
+
+
A channel layout specifies the spatial disposition of the channels in
+a multi-channel audio stream. To specify a channel layout, FFmpeg
+makes use of a special syntax.
+
+
Individual channels are identified by an id, as given by the table
+below:
+
+‘FL ’
+front left
+
+‘FR ’
+front right
+
+‘FC ’
+front center
+
+‘LFE ’
+low frequency
+
+‘BL ’
+back left
+
+‘BR ’
+back right
+
+‘FLC ’
+front left-of-center
+
+‘FRC ’
+front right-of-center
+
+‘BC ’
+back center
+
+‘SL ’
+side left
+
+‘SR ’
+side right
+
+‘TC ’
+top center
+
+‘TFL ’
+top front left
+
+‘TFC ’
+top front center
+
+‘TFR ’
+top front right
+
+‘TBL ’
+top back left
+
+‘TBC ’
+top back center
+
+‘TBR ’
+top back right
+
+‘DL ’
+downmix left
+
+‘DR ’
+downmix right
+
+‘WL ’
+wide left
+
+‘WR ’
+wide right
+
+‘SDL ’
+surround direct left
+
+‘SDR ’
+surround direct right
+
+‘LFE2 ’
+low frequency 2
+
+
+
+
Standard channel layout compositions can be specified by using the
+following identifiers:
+
+‘mono ’
+FC
+
+‘stereo ’
+FL+FR
+
+‘2.1 ’
+FL+FR+LFE
+
+‘3.0 ’
+FL+FR+FC
+
+‘3.0(back) ’
+FL+FR+BC
+
+‘4.0 ’
+FL+FR+FC+BC
+
+‘quad ’
+FL+FR+BL+BR
+
+‘quad(side) ’
+FL+FR+SL+SR
+
+‘3.1 ’
+FL+FR+FC+LFE
+
+‘5.0 ’
+FL+FR+FC+BL+BR
+
+‘5.0(side) ’
+FL+FR+FC+SL+SR
+
+‘4.1 ’
+FL+FR+FC+LFE+BC
+
+‘5.1 ’
+FL+FR+FC+LFE+BL+BR
+
+‘5.1(side) ’
+FL+FR+FC+LFE+SL+SR
+
+‘6.0 ’
+FL+FR+FC+BC+SL+SR
+
+‘6.0(front) ’
+FL+FR+FLC+FRC+SL+SR
+
+‘hexagonal ’
+FL+FR+FC+BL+BR+BC
+
+‘6.1 ’
+FL+FR+FC+LFE+BC+SL+SR
+
+‘6.1 ’
+FL+FR+FC+LFE+BL+BR+BC
+
+‘6.1(front) ’
+FL+FR+LFE+FLC+FRC+SL+SR
+
+‘7.0 ’
+FL+FR+FC+BL+BR+SL+SR
+
+‘7.0(front) ’
+FL+FR+FC+FLC+FRC+SL+SR
+
+‘7.1 ’
+FL+FR+FC+LFE+BL+BR+SL+SR
+
+‘7.1(wide) ’
+FL+FR+FC+LFE+BL+BR+FLC+FRC
+
+‘7.1(wide-side) ’
+FL+FR+FC+LFE+FLC+FRC+SL+SR
+
+‘octagonal ’
+FL+FR+FC+BL+BR+BC+SL+SR
+
+‘downmix ’
+DL+DR
+
+
+
+
A custom channel layout can be specified as a sequence of terms, separated by
+’+’ or ’|’. Each term can be:
+
+ the name of a standard channel layout (e.g. ‘mono ’,
+‘stereo ’, ‘4.0 ’, ‘quad ’, ‘5.0 ’, etc.)
+
+ the name of a single channel (e.g. ‘FL ’, ‘FR ’, ‘FC ’, ‘LFE ’, etc.)
+
+ a number of channels, in decimal, optionally followed by ’c’, yielding
+the default channel layout for that number of channels (see the
+function av_get_default_channel_layout
)
+
+ a channel layout mask, in hexadecimal starting with "0x" (see the
+AV_CH_*
macros in libavutil/channel_layout.h .
+
+
+
Starting from libavutil version 53 the trailing character "c" to
+specify a number of channels will be required, while a channel layout
+mask could also be specified as a decimal number (if and only if not
+followed by "c").
+
+
See also the function av_get_channel_layout
defined in
+libavutil/channel_layout.h .
+
+
+
9 Expression Evaluation# TOC
+
+
When evaluating an arithmetic expression, FFmpeg uses an internal
+formula evaluator, implemented through the libavutil/eval.h
+interface.
+
+
An expression may contain unary, binary operators, constants, and
+functions.
+
+
Two expressions expr1 and expr2 can be combined to form
+another expression "expr1 ;expr2 ".
+expr1 and expr2 are evaluated in turn, and the new
+expression evaluates to the value of expr2 .
+
+
The following binary operators are available: +
, -
,
+*
, /
, ^
.
+
+
The following unary operators are available: +
, -
.
+
+
The following functions are available:
+
+abs(x)
+Compute absolute value of x .
+
+
+acos(x)
+Compute arccosine of x .
+
+
+asin(x)
+Compute arcsine of x .
+
+
+atan(x)
+Compute arctangent of x .
+
+
+between(x, min, max)
+Return 1 if x is greater than or equal to min and lesser than or
+equal to max , 0 otherwise.
+
+
+bitand(x, y)
+bitor(x, y)
+Compute bitwise and/or operation on x and y .
+
+The results of the evaluation of x and y are converted to
+integers before executing the bitwise operation.
+
+Note that both the conversion to integer and the conversion back to
+floating point can lose precision. Beware of unexpected results for
+large numbers (usually 2^53 and larger).
+
+
+ceil(expr)
+Round the value of expression expr upwards to the nearest
+integer. For example, "ceil(1.5)" is "2.0".
+
+
+clip(x, min, max)
+Return the value of x clipped between min and max .
+
+
+cos(x)
+Compute cosine of x .
+
+
+cosh(x)
+Compute hyperbolic cosine of x .
+
+
+eq(x, y)
+Return 1 if x and y are equivalent, 0 otherwise.
+
+
+exp(x)
+Compute exponential of x (with base e
, the Euler’s number).
+
+
+floor(expr)
+Round the value of expression expr downwards to the nearest
+integer. For example, "floor(-1.5)" is "-2.0".
+
+
+gauss(x)
+Compute Gauss function of x , corresponding to
+exp(-x*x/2) / sqrt(2*PI)
.
+
+
+gcd(x, y)
+Return the greatest common divisor of x and y . If both x and
+y are 0 or either or both are less than zero then behavior is undefined.
+
+
+gt(x, y)
+Return 1 if x is greater than y , 0 otherwise.
+
+
+gte(x, y)
+Return 1 if x is greater than or equal to y , 0 otherwise.
+
+
+hypot(x, y)
+This function is similar to the C function with the same name; it returns
+"sqrt(x *x + y *y )", the length of the hypotenuse of a
+right triangle with sides of length x and y , or the distance of the
+point (x , y ) from the origin.
+
+
+if(x, y)
+Evaluate x , and if the result is non-zero return the result of
+the evaluation of y , return 0 otherwise.
+
+
+if(x, y, z)
+Evaluate x , and if the result is non-zero return the evaluation
+result of y , otherwise the evaluation result of z .
+
+
+ifnot(x, y)
+Evaluate x , and if the result is zero return the result of the
+evaluation of y , return 0 otherwise.
+
+
+ifnot(x, y, z)
+Evaluate x , and if the result is zero return the evaluation
+result of y , otherwise the evaluation result of z .
+
+
+isinf(x)
+Return 1.0 if x is +/-INFINITY, 0.0 otherwise.
+
+
+isnan(x)
+Return 1.0 if x is NAN, 0.0 otherwise.
+
+
+ld(var)
+Allow to load the value of the internal variable with number
+var , which was previously stored with st(var , expr ).
+The function returns the loaded value.
+
+
+log(x)
+Compute natural logarithm of x .
+
+
+lt(x, y)
+Return 1 if x is lesser than y , 0 otherwise.
+
+
+lte(x, y)
+Return 1 if x is lesser than or equal to y , 0 otherwise.
+
+
+max(x, y)
+Return the maximum between x and y .
+
+
+min(x, y)
+Return the maximum between x and y .
+
+
+mod(x, y)
+Compute the remainder of division of x by y .
+
+
+not(expr)
+Return 1.0 if expr is zero, 0.0 otherwise.
+
+
+pow(x, y)
+Compute the power of x elevated y , it is equivalent to
+"(x )^(y )".
+
+
+print(t)
+print(t, l)
+Print the value of expression t with loglevel l . If
+l is not specified then a default log level is used.
+Returns the value of the expression printed.
+
+Prints t with loglevel l
+
+
+random(x)
+Return a pseudo random value between 0.0 and 1.0. x is the index of the
+internal variable which will be used to save the seed/state.
+
+
+root(expr, max)
+Find an input value for which the function represented by expr
+with argument ld(0) is 0 in the interval 0..max .
+
+The expression in expr must denote a continuous function or the
+result is undefined.
+
+ld(0) is used to represent the function input value, which means
+that the given expression will be evaluated multiple times with
+various input values that the expression can access through
+ld(0)
. When the expression evaluates to 0 then the
+corresponding input value will be returned.
+
+
+sin(x)
+Compute sine of x .
+
+
+sinh(x)
+Compute hyperbolic sine of x .
+
+
+sqrt(expr)
+Compute the square root of expr . This is equivalent to
+"(expr )^.5".
+
+
+squish(x)
+Compute expression 1/(1 + exp(4*x))
.
+
+
+st(var, expr)
+Allow to store the value of the expression expr in an internal
+variable. var specifies the number of the variable where to
+store the value, and it is a value ranging from 0 to 9. The function
+returns the value stored in the internal variable.
+Note, Variables are currently not shared between expressions.
+
+
+tan(x)
+Compute tangent of x .
+
+
+tanh(x)
+Compute hyperbolic tangent of x .
+
+
+taylor(expr, x)
+taylor(expr, x, id)
+Evaluate a Taylor series at x , given an expression representing
+the ld(id)
-th derivative of a function at 0.
+
+When the series does not converge the result is undefined.
+
+ld(id) is used to represent the derivative order in expr ,
+which means that the given expression will be evaluated multiple times
+with various input values that the expression can access through
+ld(id)
. If id is not specified then 0 is assumed.
+
+Note, when you have the derivatives at y instead of 0,
+taylor(expr, x-y)
can be used.
+
+
+time(0)
+Return the current (wallclock) time in seconds.
+
+
+trunc(expr)
+Round the value of expression expr towards zero to the nearest
+integer. For example, "trunc(-1.5)" is "-1.0".
+
+
+while(cond, expr)
+Evaluate expression expr while the expression cond is
+non-zero, and returns the value of the last expr evaluation, or
+NAN if cond was always false.
+
+
+
+
The following constants are available:
+
+PI
+area of the unit disc, approximately 3.14
+
+E
+exp(1) (Euler’s number), approximately 2.718
+
+PHI
+golden ratio (1+sqrt(5))/2, approximately 1.618
+
+
+
+
Assuming that an expression is considered "true" if it has a non-zero
+value, note that:
+
+
*
works like AND
+
+
+
works like OR
+
+
For example the construct:
+
+
is equivalent to:
+
+
+
In your C code, you can extend the list of unary and binary functions,
+and define recognized constants, so that they are available for your
+expressions.
+
+
The evaluator also recognizes the International System unit prefixes.
+If ’i’ is appended after the prefix, binary prefixes are used, which
+are based on powers of 1024 instead of powers of 1000.
+The ’B’ postfix multiplies the value by 8, and can be appended after a
+unit prefix or used alone. This allows using for example ’KB’, ’MiB’,
+’G’ and ’B’ as number postfix.
+
+
The list of available International System prefixes follows, with
+indication of the corresponding powers of 10 and of 2.
+
+y
+10^-24 / 2^-80
+
+z
+10^-21 / 2^-70
+
+a
+10^-18 / 2^-60
+
+f
+10^-15 / 2^-50
+
+p
+10^-12 / 2^-40
+
+n
+10^-9 / 2^-30
+
+u
+10^-6 / 2^-20
+
+m
+10^-3 / 2^-10
+
+c
+10^-2
+
+d
+10^-1
+
+h
+10^2
+
+k
+10^3 / 2^10
+
+K
+10^3 / 2^10
+
+M
+10^6 / 2^20
+
+G
+10^9 / 2^30
+
+T
+10^12 / 2^40
+
+P
+10^15 / 2^40
+
+E
+10^18 / 2^50
+
+Z
+10^21 / 2^60
+
+Y
+10^24 / 2^70
+
+
+
+
+
+
10 OpenCL Options# TOC
+
+
When FFmpeg is configured with --enable-opencl
, it is possible
+to set the options for the global OpenCL context.
+
+
The list of supported options follows:
+
+
+build_options
+Set build options used to compile the registered kernels.
+
+See reference "OpenCL Specification Version: 1.2 chapter 5.6.4".
+
+
+platform_idx
+Select the index of the platform to run OpenCL code.
+
+The specified index must be one of the indexes in the device list
+which can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
+
+
+device_idx
+Select the index of the device used to run OpenCL code.
+
+The specified index must be one of the indexes in the device list which
+can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
+
+
+
+
+
+
11 Codec Options# TOC
+
+
libavcodec provides some generic global options, which can be set on
+all the encoders and decoders. In addition each codec may support
+so-called private options, which are specific for a given codec.
+
+
Sometimes, a global option may only affect a specific kind of codec,
+and may be nonsensical or ignored by another, so you need to be aware
+of the meaning of the specified options. Also some options are
+meant only for decoding or encoding.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, or by setting the value explicitly in the
+AVCodecContext
options or using the libavutil/opt.h API
+for programmatic use.
+
+
The list of supported options follow:
+
+
+b integer (encoding,audio,video )
+Set bitrate in bits/s. Default value is 200K.
+
+
+ab integer (encoding,audio )
+Set audio bitrate (in bits/s). Default value is 128K.
+
+
+bt integer (encoding,video )
+Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate
+tolerance specifies how far ratecontrol is willing to deviate from the
+target average bitrate value. This is not related to min/max
+bitrate. Lowering tolerance too much has an adverse effect on quality.
+
+
+flags flags (decoding/encoding,audio,video,subtitles )
+Set generic flags.
+
+Possible values:
+
+‘mv4 ’
+Use four motion vector by macroblock (mpeg4).
+
+‘qpel ’
+Use 1/4 pel motion compensation.
+
+‘loop ’
+Use loop filter.
+
+‘qscale ’
+Use fixed qscale.
+
+‘gmc ’
+Use gmc.
+
+‘mv0 ’
+Always try a mb with mv=<0,0>.
+
+‘input_preserved ’
+‘pass1 ’
+Use internal 2pass ratecontrol in first pass mode.
+
+‘pass2 ’
+Use internal 2pass ratecontrol in second pass mode.
+
+‘gray ’
+Only decode/encode grayscale.
+
+‘emu_edge ’
+Do not draw edges.
+
+‘psnr ’
+Set error[?] variables during encoding.
+
+‘truncated ’
+‘naq ’
+Normalize adaptive quantization.
+
+‘ildct ’
+Use interlaced DCT.
+
+‘low_delay ’
+Force low delay.
+
+‘global_header ’
+Place global headers in extradata instead of every keyframe.
+
+‘bitexact ’
+Only write platform-, build- and time-independent data. (except (I)DCT).
+This ensures that file and data checksums are reproducible and match between
+platforms. Its primary use is for regression testing.
+
+‘aic ’
+Apply H263 advanced intra coding / mpeg4 ac prediction.
+
+‘cbp ’
+Deprecated, use mpegvideo private options instead.
+
+‘qprd ’
+Deprecated, use mpegvideo private options instead.
+
+‘ilme ’
+Apply interlaced motion estimation.
+
+‘cgop ’
+Use closed gop.
+
+
+
+
+me_method integer (encoding,video )
+Set motion estimation method.
+
+Possible values:
+
+‘zero ’
+zero motion estimation (fastest)
+
+‘full ’
+full motion estimation (slowest)
+
+‘epzs ’
+EPZS motion estimation (default)
+
+‘esa ’
+esa motion estimation (alias for full)
+
+‘tesa ’
+tesa motion estimation
+
+‘dia ’
+dia motion estimation (alias for epzs)
+
+‘log ’
+log motion estimation
+
+‘phods ’
+phods motion estimation
+
+‘x1 ’
+X1 motion estimation
+
+‘hex ’
+hex motion estimation
+
+‘umh ’
+umh motion estimation
+
+‘iter ’
+iter motion estimation
+
+
+
+
+extradata_size integer
+Set extradata size.
+
+
+time_base rational number
+Set codec time base.
+
+It is the fundamental unit of time (in seconds) in terms of which
+frame timestamps are represented. For fixed-fps content, timebase
+should be 1 / frame_rate
and timestamp increments should be
+identically 1.
+
+
+g integer (encoding,video )
+Set the group of picture size. Default value is 12.
+
+
+ar integer (decoding/encoding,audio )
+Set audio sampling rate (in Hz).
+
+
+ac integer (decoding/encoding,audio )
+Set number of audio channels.
+
+
+cutoff integer (encoding,audio )
+Set cutoff bandwidth.
+
+
+frame_size integer (encoding,audio )
+Set audio frame size.
+
+Each submitted frame except the last must contain exactly frame_size
+samples per channel. May be 0 when the codec has
+CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not
+restricted. It is set by some decoders to indicate constant frame
+size.
+
+
+frame_number integer
+Set the frame number.
+
+
+delay integer
+qcomp float (encoding,video )
+Set video quantizer scale compression (VBR). It is used as a constant
+in the ratecontrol equation. Recommended range for default rc_eq:
+0.0-1.0.
+
+
+qblur float (encoding,video )
+Set video quantizer scale blur (VBR).
+
+
+qmin integer (encoding,video )
+Set min video quantizer scale (VBR). Must be included between -1 and
+69, default value is 2.
+
+
+qmax integer (encoding,video )
+Set max video quantizer scale (VBR). Must be included between -1 and
+1024, default value is 31.
+
+
+qdiff integer (encoding,video )
+Set max difference between the quantizer scale (VBR).
+
+
+bf integer (encoding,video )
+Set max number of B frames between non-B-frames.
+
+Must be an integer between -1 and 16. 0 means that B-frames are
+disabled. If a value of -1 is used, it will choose an automatic value
+depending on the encoder.
+
+Default value is 0.
+
+
+b_qfactor float (encoding,video )
+Set qp factor between P and B frames.
+
+
+rc_strategy integer (encoding,video )
+Set ratecontrol method.
+
+
+b_strategy integer (encoding,video )
+Set strategy to choose between I/P/B-frames.
+
+
+ps integer (encoding,video )
+Set RTP payload size in bytes.
+
+
+mv_bits integer
+header_bits integer
+i_tex_bits integer
+p_tex_bits integer
+i_count integer
+p_count integer
+skip_count integer
+misc_bits integer
+frame_bits integer
+codec_tag integer
+bug flags (decoding,video )
+Workaround not auto detected encoder bugs.
+
+Possible values:
+
+‘autodetect ’
+‘old_msmpeg4 ’
+some old lavc generated msmpeg4v3 files (no autodetection)
+
+‘xvid_ilace ’
+Xvid interlacing bug (autodetected if fourcc==XVIX)
+
+‘ump4 ’
+(autodetected if fourcc==UMP4)
+
+‘no_padding ’
+padding bug (autodetected)
+
+‘amv ’
+‘ac_vlc ’
+illegal vlc bug (autodetected per fourcc)
+
+‘qpel_chroma ’
+‘std_qpel ’
+old standard qpel (autodetected per fourcc/version)
+
+‘qpel_chroma2 ’
+‘direct_blocksize ’
+direct-qpel-blocksize bug (autodetected per fourcc/version)
+
+‘edge ’
+edge padding bug (autodetected per fourcc/version)
+
+‘hpel_chroma ’
+‘dc_clip ’
+‘ms ’
+Workaround various bugs in microsoft broken decoders.
+
+‘trunc ’
+trancated frames
+
+
+
+
+lelim integer (encoding,video )
+Set single coefficient elimination threshold for luminance (negative
+values also consider DC coefficient).
+
+
+celim integer (encoding,video )
+Set single coefficient elimination threshold for chrominance (negative
+values also consider dc coefficient)
+
+
+strict integer (decoding/encoding,audio,video )
+Specify how strictly to follow the standards.
+
+Possible values:
+
+‘very ’
+strictly conform to a older more strict version of the spec or reference software
+
+‘strict ’
+strictly conform to all the things in the spec no matter what consequences
+
+‘normal ’
+‘unofficial ’
+allow unofficial extensions
+
+‘experimental ’
+allow non standardized experimental things, experimental
+(unfinished/work in progress/not well tested) decoders and encoders.
+Note: experimental decoders can pose a security risk, do not use this for
+decoding untrusted input.
+
+
+
+
+b_qoffset float (encoding,video )
+Set QP offset between P and B frames.
+
+
+err_detect flags (decoding,audio,video )
+Set error detection flags.
+
+Possible values:
+
+‘crccheck ’
+verify embedded CRCs
+
+‘bitstream ’
+detect bitstream specification deviations
+
+‘buffer ’
+detect improper bitstream length
+
+‘explode ’
+abort decoding on minor error detection
+
+‘ignore_err ’
+ignore decoding errors, and continue decoding.
+This is useful if you want to analyze the content of a video and thus want
+everything to be decoded no matter what. This option will not result in a video
+that is pleasing to watch in case of errors.
+
+‘careful ’
+consider things that violate the spec and have not been seen in the wild as errors
+
+‘compliant ’
+consider all spec non compliancies as errors
+
+‘aggressive ’
+consider things that a sane encoder should not do as an error
+
+
+
+
+has_b_frames integer
+block_align integer
+mpeg_quant integer (encoding,video )
+Use MPEG quantizers instead of H.263.
+
+
+qsquish float (encoding,video )
+How to keep quantizer between qmin and qmax (0 = clip, 1 = use
+differentiable function).
+
+
+rc_qmod_amp float (encoding,video )
+Set experimental quantizer modulation.
+
+
+rc_qmod_freq integer (encoding,video )
+Set experimental quantizer modulation.
+
+
+rc_override_count integer
+rc_eq string (encoding,video )
+Set rate control equation. When computing the expression, besides the
+standard functions defined in the section ’Expression Evaluation’, the
+following functions are available: bits2qp(bits), qp2bits(qp). Also
+the following constants are available: iTex pTex tex mv fCode iCount
+mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex
+avgTex.
+
+
+maxrate integer (encoding,audio,video )
+Set max bitrate tolerance (in bits/s). Requires bufsize to be set.
+
+
+minrate integer (encoding,audio,video )
+Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR
+encode. It is of little use elsewise.
+
+
+bufsize integer (encoding,audio,video )
+Set ratecontrol buffer size (in bits).
+
+
+rc_buf_aggressivity float (encoding,video )
+Currently useless.
+
+
+i_qfactor float (encoding,video )
+Set QP factor between P and I frames.
+
+
+i_qoffset float (encoding,video )
+Set QP offset between P and I frames.
+
+
+rc_init_cplx float (encoding,video )
+Set initial complexity for 1-pass encoding.
+
+
+dct integer (encoding,video )
+Set DCT algorithm.
+
+Possible values:
+
+‘auto ’
+autoselect a good one (default)
+
+‘fastint ’
+fast integer
+
+‘int ’
+accurate integer
+
+‘mmx ’
+‘altivec ’
+‘faan ’
+floating point AAN DCT
+
+
+
+
+lumi_mask float (encoding,video )
+Compress bright areas stronger than medium ones.
+
+
+tcplx_mask float (encoding,video )
+Set temporal complexity masking.
+
+
+scplx_mask float (encoding,video )
+Set spatial complexity masking.
+
+
+p_mask float (encoding,video )
+Set inter masking.
+
+
+dark_mask float (encoding,video )
+Compress dark areas stronger than medium ones.
+
+
+idct integer (decoding/encoding,video )
+Select IDCT implementation.
+
+Possible values:
+
+‘auto ’
+‘int ’
+‘simple ’
+‘simplemmx ’
+‘simpleauto ’
+Automatically pick a IDCT compatible with the simple one
+
+
+‘arm ’
+‘altivec ’
+‘sh4 ’
+‘simplearm ’
+‘simplearmv5te ’
+‘simplearmv6 ’
+‘simpleneon ’
+‘simplealpha ’
+‘ipp ’
+‘xvidmmx ’
+‘faani ’
+floating point AAN IDCT
+
+
+
+
+slice_count integer
+ec flags (decoding,video )
+Set error concealment strategy.
+
+Possible values:
+
+‘guess_mvs ’
+iterative motion vector (MV) search (slow)
+
+‘deblock ’
+use strong deblock filter for damaged MBs
+
+‘favor_inter ’
+favor predicting from the previous frame instead of the current
+
+
+
+
+bits_per_coded_sample integer
+pred integer (encoding,video )
+Set prediction method.
+
+Possible values:
+
+‘left ’
+‘plane ’
+‘median ’
+
+
+
+aspect rational number (encoding,video )
+Set sample aspect ratio.
+
+
+debug flags (decoding/encoding,audio,video,subtitles )
+Print specific debug info.
+
+Possible values:
+
+‘pict ’
+picture info
+
+‘rc ’
+rate control
+
+‘bitstream ’
+‘mb_type ’
+macroblock (MB) type
+
+‘qp ’
+per-block quantization parameter (QP)
+
+‘mv ’
+motion vector
+
+‘dct_coeff ’
+‘skip ’
+‘startcode ’
+‘pts ’
+‘er ’
+error recognition
+
+‘mmco ’
+memory management control operations (H.264)
+
+‘bugs ’
+‘vis_qp ’
+visualize quantization parameter (QP), lower QP are tinted greener
+
+‘vis_mb_type ’
+visualize block types
+
+‘buffers ’
+picture buffer allocations
+
+‘thread_ops ’
+threading operations
+
+‘nomc ’
+skip motion compensation
+
+
+
+
+vismv integer (decoding,video )
+Visualize motion vectors (MVs).
+
+This option is deprecated, see the codecview filter instead.
+
+Possible values:
+
+‘pf ’
+forward predicted MVs of P-frames
+
+‘bf ’
+forward predicted MVs of B-frames
+
+‘bb ’
+backward predicted MVs of B-frames
+
+
+
+
+cmp integer (encoding,video )
+Set full pel me compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+subcmp integer (encoding,video )
+Set sub pel me compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+mbcmp integer (encoding,video )
+Set macroblock compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+ildctcmp integer (encoding,video )
+Set interlaced dct compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+dia_size integer (encoding,video )
+Set diamond type & size for motion estimation.
+
+
+last_pred integer (encoding,video )
+Set amount of motion predictors from the previous frame.
+
+
+preme integer (encoding,video )
+Set pre motion estimation.
+
+
+precmp integer (encoding,video )
+Set pre motion estimation compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+pre_dia_size integer (encoding,video )
+Set diamond type & size for motion estimation pre-pass.
+
+
+subq integer (encoding,video )
+Set sub pel motion estimation quality.
+
+
+dtg_active_format integer
+me_range integer (encoding,video )
+Set limit motion vectors range (1023 for DivX player).
+
+
+ibias integer (encoding,video )
+Set intra quant bias.
+
+
+pbias integer (encoding,video )
+Set inter quant bias.
+
+
+color_table_id integer
+global_quality integer (encoding,audio,video )
+coder integer (encoding,video )
+
+Possible values:
+
+‘vlc ’
+variable length coder / huffman coder
+
+‘ac ’
+arithmetic coder
+
+‘raw ’
+raw (no encoding)
+
+‘rle ’
+run-length coder
+
+‘deflate ’
+deflate-based coder
+
+
+
+
+context integer (encoding,video )
+Set context model.
+
+
+slice_flags integer
+xvmc_acceleration integer
+mbd integer (encoding,video )
+Set macroblock decision algorithm (high quality mode).
+
+Possible values:
+
+‘simple ’
+use mbcmp (default)
+
+‘bits ’
+use fewest bits
+
+‘rd ’
+use best rate distortion
+
+
+
+
+stream_codec_tag integer
+sc_threshold integer (encoding,video )
+Set scene change threshold.
+
+
+lmin integer (encoding,video )
+Set min lagrange factor (VBR).
+
+
+lmax integer (encoding,video )
+Set max lagrange factor (VBR).
+
+
+nr integer (encoding,video )
+Set noise reduction.
+
+
+rc_init_occupancy integer (encoding,video )
+Set number of bits which should be loaded into the rc buffer before
+decoding starts.
+
+
+flags2 flags (decoding/encoding,audio,video )
+
+Possible values:
+
+‘fast ’
+Allow non spec compliant speedup tricks.
+
+‘sgop ’
+Deprecated, use mpegvideo private options instead.
+
+‘noout ’
+Skip bitstream encoding.
+
+‘ignorecrop ’
+Ignore cropping information from sps.
+
+‘local_header ’
+Place global headers at every keyframe instead of in extradata.
+
+‘chunks ’
+Frame data might be split into multiple chunks.
+
+‘showall ’
+Show all frames before the first keyframe.
+
+‘skiprd ’
+Deprecated, use mpegvideo private options instead.
+
+‘export_mvs ’
+Export motion vectors into frame side-data (see AV_FRAME_DATA_MOTION_VECTORS
)
+for codecs that support it. See also doc/examples/export_mvs.c .
+
+
+
+
+error integer (encoding,video )
+qns integer (encoding,video )
+Deprecated, use mpegvideo private options instead.
+
+
+threads integer (decoding/encoding,video )
+
+Possible values:
+
+‘auto ’
+detect a good number of threads
+
+
+
+
+me_threshold integer (encoding,video )
+Set motion estimation threshold.
+
+
+mb_threshold integer (encoding,video )
+Set macroblock threshold.
+
+
+dc integer (encoding,video )
+Set intra_dc_precision.
+
+
+nssew integer (encoding,video )
+Set nsse weight.
+
+
+skip_top integer (decoding,video )
+Set number of macroblock rows at the top which are skipped.
+
+
+skip_bottom integer (decoding,video )
+Set number of macroblock rows at the bottom which are skipped.
+
+
+profile integer (encoding,audio,video )
+
+Possible values:
+
+‘unknown ’
+‘aac_main ’
+‘aac_low ’
+‘aac_ssr ’
+‘aac_ltp ’
+‘aac_he ’
+‘aac_he_v2 ’
+‘aac_ld ’
+‘aac_eld ’
+‘mpeg2_aac_low ’
+‘mpeg2_aac_he ’
+‘mpeg4_sp ’
+‘mpeg4_core ’
+‘mpeg4_main ’
+‘mpeg4_asp ’
+‘dts ’
+‘dts_es ’
+‘dts_96_24 ’
+‘dts_hd_hra ’
+‘dts_hd_ma ’
+
+
+
+level integer (encoding,audio,video )
+
+Possible values:
+
+‘unknown ’
+
+
+
+lowres integer (decoding,audio,video )
+Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
+
+
+skip_threshold integer (encoding,video )
+Set frame skip threshold.
+
+
+skip_factor integer (encoding,video )
+Set frame skip factor.
+
+
+skip_exp integer (encoding,video )
+Set frame skip exponent.
+Negative values behave identical to the corresponding positive ones, except
+that the score is normalized.
+Positive values exist primarily for compatibility reasons and are not so useful.
+
+
+skipcmp integer (encoding,video )
+Set frame skip compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+border_mask float (encoding,video )
+Increase the quantizer for macroblocks close to borders.
+
+
+mblmin integer (encoding,video )
+Set min macroblock lagrange factor (VBR).
+
+
+mblmax integer (encoding,video )
+Set max macroblock lagrange factor (VBR).
+
+
+mepc integer (encoding,video )
+Set motion estimation bitrate penalty compensation (1.0 = 256).
+
+
+skip_loop_filter integer (decoding,video )
+skip_idct integer (decoding,video )
+skip_frame integer (decoding,video )
+
+Make decoder discard processing depending on the frame type selected
+by the option value.
+
+skip_loop_filter skips frame loop filtering, skip_idct
+skips frame IDCT/dequantization, skip_frame skips decoding.
+
+Possible values:
+
+‘none ’
+Discard no frame.
+
+
+‘default ’
+Discard useless frames like 0-sized frames.
+
+
+‘noref ’
+Discard all non-reference frames.
+
+
+‘bidir ’
+Discard all bidirectional frames.
+
+
+‘nokey ’
+Discard all frames excepts keyframes.
+
+
+‘all ’
+Discard all frames.
+
+
+
+Default value is ‘default ’.
+
+
+bidir_refine integer (encoding,video )
+Refine the two motion vectors used in bidirectional macroblocks.
+
+
+brd_scale integer (encoding,video )
+Downscale frames for dynamic B-frame decision.
+
+
+keyint_min integer (encoding,video )
+Set minimum interval between IDR-frames.
+
+
+refs integer (encoding,video )
+Set reference frames to consider for motion compensation.
+
+
+chromaoffset integer (encoding,video )
+Set chroma qp offset from luma.
+
+
+trellis integer (encoding,audio,video )
+Set rate-distortion optimal quantization.
+
+
+sc_factor integer (encoding,video )
+Set value multiplied by qscale for each frame and added to
+scene_change_score.
+
+
+mv0_threshold integer (encoding,video )
+b_sensitivity integer (encoding,video )
+Adjust sensitivity of b_frame_strategy 1.
+
+
+compression_level integer (encoding,audio,video )
+min_prediction_order integer (encoding,audio )
+max_prediction_order integer (encoding,audio )
+timecode_frame_start integer (encoding,video )
+Set GOP timecode frame start number, in non drop frame format.
+
+
+request_channels integer (decoding,audio )
+Set desired number of audio channels.
+
+
+bits_per_raw_sample integer
+channel_layout integer (decoding/encoding,audio )
+
+Possible values:
+
+request_channel_layout integer (decoding,audio )
+
+Possible values:
+
+rc_max_vbv_use float (encoding,video )
+rc_min_vbv_use float (encoding,video )
+ticks_per_frame integer (decoding/encoding,audio,video )
+color_primaries integer (decoding/encoding,video )
+color_trc integer (decoding/encoding,video )
+colorspace integer (decoding/encoding,video )
+color_range integer (decoding/encoding,video )
+chroma_sample_location integer (decoding/encoding,video )
+log_level_offset integer
+Set the log level offset.
+
+
+slices integer (encoding,video )
+Number of slices, used in parallelized encoding.
+
+
+thread_type flags (decoding/encoding,video )
+Select which multithreading methods to use.
+
+Use of ‘frame ’ will increase decoding delay by one frame per
+thread, so clients which cannot provide future frames should not use
+it.
+
+Possible values:
+
+‘slice ’
+Decode more than one part of a single frame at once.
+
+Multithreading using slices works only when the video was encoded with
+slices.
+
+
+‘frame ’
+Decode more than one frame at once.
+
+
+
+Default value is ‘slice+frame ’.
+
+
+audio_service_type integer (encoding,audio )
+Set audio service type.
+
+Possible values:
+
+‘ma ’
+Main Audio Service
+
+‘ef ’
+Effects
+
+‘vi ’
+Visually Impaired
+
+‘hi ’
+Hearing Impaired
+
+‘di ’
+Dialogue
+
+‘co ’
+Commentary
+
+‘em ’
+Emergency
+
+‘vo ’
+Voice Over
+
+‘ka ’
+Karaoke
+
+
+
+
+request_sample_fmt sample_fmt (decoding,audio )
+Set sample format audio decoders should prefer. Default value is
+none
.
+
+
+pkt_timebase rational number
+sub_charenc encoding (decoding,subtitles )
+Set the input subtitles character encoding.
+
+
+field_order field_order (video )
+Set/override the field order of the video.
+Possible values:
+
+‘progressive ’
+Progressive video
+
+‘tt ’
+Interlaced video, top field coded and displayed first
+
+‘bb ’
+Interlaced video, bottom field coded and displayed first
+
+‘tb ’
+Interlaced video, top coded first, bottom displayed first
+
+‘bt ’
+Interlaced video, bottom coded first, top displayed first
+
+
+
+
+skip_alpha integer (decoding,video )
+Set to 1 to disable processing alpha (transparency). This works like the
+‘gray ’ flag in the flags option which skips chroma information
+instead of alpha. Default is 0.
+
+
+codec_whitelist list (input )
+"," separated List of allowed decoders. By default all are allowed.
+
+
+dump_separator string (input )
+Separator used to separate the fields printed on the command line about the
+Stream parameters.
+For example to separate the fields with newlines and indention:
+
+
ffprobe -dump_separator "
+ " -i ~/videos/matrixbench_mpeg2.mpg
+
+
+
+
+
+
+
+
12 Decoders# TOC
+
+
Decoders are configured elements in FFmpeg which allow the decoding of
+multimedia streams.
+
+
When you configure your FFmpeg build, all the supported native decoders
+are enabled by default. Decoders requiring an external library must be enabled
+manually via the corresponding --enable-lib
option. You can list all
+available decoders using the configure option --list-decoders
.
+
+
You can disable all the decoders with the configure option
+--disable-decoders
and selectively enable / disable single decoders
+with the options --enable-decoder=DECODER
/
+--disable-decoder=DECODER
.
+
+
The option -decoders
of the ff* tools will display the list of
+enabled decoders.
+
+
+
+
13 Video Decoders# TOC
+
+
A description of some of the currently available video decoders
+follows.
+
+
+
13.1 rawvideo# TOC
+
+
Raw video decoder.
+
+
This decoder decodes rawvideo streams.
+
+
+
13.1.1 Options# TOC
+
+
+top top_field_first
+Specify the assumed field type of the input video.
+
+-1
+the video is assumed to be progressive (default)
+
+0
+bottom-field-first is assumed
+
+1
+top-field-first is assumed
+
+
+
+
+
+
+
+
+
14 Audio Decoders# TOC
+
+
A description of some of the currently available audio decoders
+follows.
+
+
+
14.1 ac3# TOC
+
+
AC-3 audio decoder.
+
+
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
+the undocumented RealAudio 3 (a.k.a. dnet).
+
+
+
14.1.1 AC-3 Decoder Options# TOC
+
+
+-drc_scale value
+Dynamic Range Scale Factor. The factor to apply to dynamic range values
+from the AC-3 stream. This factor is applied exponentially.
+There are 3 notable scale factor ranges:
+
+drc_scale == 0
+DRC disabled. Produces full range audio.
+
+0 < drc_scale <= 1
+DRC enabled. Applies a fraction of the stream DRC value.
+Audio reproduction is between full range and full compression.
+
+drc_scale > 1
+DRC enabled. Applies drc_scale asymmetrically.
+Loud sounds are fully compressed. Soft sounds are enhanced.
+
+
+
+
+
+
+
+
14.2 ffwavesynth# TOC
+
+
Internal wave synthetizer.
+
+
This decoder generates wave patterns according to predefined sequences. Its
+use is purely internal and the format of the data it accepts is not publicly
+documented.
+
+
+
14.3 libcelt# TOC
+
+
libcelt decoder wrapper.
+
+
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
+Requires the presence of the libcelt headers and library during configuration.
+You need to explicitly configure the build with --enable-libcelt
.
+
+
+
14.4 libgsm# TOC
+
+
libgsm decoder wrapper.
+
+
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
+the presence of the libgsm headers and library during configuration. You need
+to explicitly configure the build with --enable-libgsm
.
+
+
This decoder supports both the ordinary GSM and the Microsoft variant.
+
+
+
14.5 libilbc# TOC
+
+
libilbc decoder wrapper.
+
+
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
+audio codec. Requires the presence of the libilbc headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libilbc
.
+
+
+
14.5.1 Options# TOC
+
+
The following option is supported by the libilbc wrapper.
+
+
+enhance
+
+Enable the enhancement of the decoded audio when set to 1. The default
+value is 0 (disabled).
+
+
+
+
+
+
14.6 libopencore-amrnb# TOC
+
+
libopencore-amrnb decoder wrapper.
+
+
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
+Narrowband audio codec. Using it requires the presence of the
+libopencore-amrnb headers and library during configuration. You need to
+explicitly configure the build with --enable-libopencore-amrnb
.
+
+
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
+without this library.
+
+
+
14.7 libopencore-amrwb# TOC
+
+
libopencore-amrwb decoder wrapper.
+
+
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
+Wideband audio codec. Using it requires the presence of the
+libopencore-amrwb headers and library during configuration. You need to
+explicitly configure the build with --enable-libopencore-amrwb
.
+
+
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
+without this library.
+
+
+
14.8 libopus# TOC
+
+
libopus decoder wrapper.
+
+
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
+Requires the presence of the libopus headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libopus
.
+
+
An FFmpeg native decoder for Opus exists, so users can decode Opus
+without this library.
+
+
+
+
15 Subtitles Decoders# TOC
+
+
+
15.1 dvdsub# TOC
+
+
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
+also be found in VobSub file pairs and in some Matroska files.
+
+
+
15.1.1 Options# TOC
+
+
+palette
+Specify the global palette used by the bitmaps. When stored in VobSub, the
+palette is normally specified in the index file; in Matroska, the palette is
+stored in the codec extra-data in the same format as in VobSub. In DVDs, the
+palette is stored in the IFO file, and therefore not available when reading
+from dumped VOB files.
+
+The format for this option is a string containing 16 24-bits hexadecimal
+numbers (without 0x prefix) separated by comas, for example 0d00ee,
+ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
+7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b
.
+
+
+ifo_palette
+Specify the IFO file from which the global palette is obtained.
+(experimental)
+
+
+forced_subs_only
+Only decode subtitle entries marked as forced. Some titles have forced
+and non-forced subtitles in the same track. Setting this flag to 1
+will only keep the forced subtitles. Default value is 0
.
+
+
+
+
+
15.2 libzvbi-teletext# TOC
+
+
Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext
+subtitles. Requires the presence of the libzvbi headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libzvbi
.
+
+
+
15.2.1 Options# TOC
+
+
+txt_page
+List of teletext page numbers to decode. You may use the special * string to
+match all pages. Pages that do not match the specified list are dropped.
+Default value is *.
+
+txt_chop_top
+Discards the top teletext line. Default value is 1.
+
+txt_format
+Specifies the format of the decoded subtitles. The teletext decoder is capable
+of decoding the teletext pages to bitmaps or to simple text, you should use
+"bitmap" for teletext pages, because certain graphics and colors cannot be
+expressed in simple text. You might use "text" for teletext based subtitles if
+your application can handle simple text based subtitles. Default value is
+bitmap.
+
+txt_left
+X offset of generated bitmaps, default is 0.
+
+txt_top
+Y offset of generated bitmaps, default is 0.
+
+txt_chop_spaces
+Chops leading and trailing spaces and removes empty lines from the generated
+text. This option is useful for teletext based subtitles where empty spaces may
+be present at the start or at the end of the lines or empty lines may be
+present between the subtitle lines because of double-sized teletext charactes.
+Default value is 1.
+
+txt_duration
+Sets the display duration of the decoded teletext pages or subtitles in
+miliseconds. Default value is 30000 which is 30 seconds.
+
+txt_transparent
+Force transparent background of the generated teletext bitmaps. Default value
+is 0 which means an opaque (black) background.
+
+
+
+
+
16 Encoders# TOC
+
+
Encoders are configured elements in FFmpeg which allow the encoding of
+multimedia streams.
+
+
When you configure your FFmpeg build, all the supported native encoders
+are enabled by default. Encoders requiring an external library must be enabled
+manually via the corresponding --enable-lib
option. You can list all
+available encoders using the configure option --list-encoders
.
+
+
You can disable all the encoders with the configure option
+--disable-encoders
and selectively enable / disable single encoders
+with the options --enable-encoder=ENCODER
/
+--disable-encoder=ENCODER
.
+
+
The option -encoders
of the ff* tools will display the list of
+enabled encoders.
+
+
+
+
17 Audio Encoders# TOC
+
+
A description of some of the currently available audio encoders
+follows.
+
+
+
17.1 aac# TOC
+
+
Advanced Audio Coding (AAC) encoder.
+
+
This encoder is an experimental FFmpeg-native AAC encoder. Currently only the
+low complexity (AAC-LC) profile is supported. To use this encoder, you must set
+strict option to ‘experimental ’ or lower.
+
+
As this encoder is experimental, unexpected behavior may exist from time to
+time. For a more stable AAC encoder, see libvo-aacenc . However, be warned
+that it has a worse quality reported by some users.
+
+
See also libfdk_aac and libfaac .
+
+
+
17.1.1 Options# TOC
+
+
+b
+Set bit rate in bits/s. Setting this automatically activates constant bit rate
+(CBR) mode.
+
+
+q
+Set quality for variable bit rate (VBR) mode. This option is valid only using
+the ffmpeg
command-line tool. For library interface users, use
+global_quality .
+
+
+stereo_mode
+Set stereo encoding mode. Possible values:
+
+
+‘auto ’
+Automatically selected by the encoder.
+
+
+‘ms_off ’
+Disable middle/side encoding. This is the default.
+
+
+‘ms_force ’
+Force middle/side encoding.
+
+
+
+
+aac_coder
+Set AAC encoder coding method. Possible values:
+
+
+‘faac ’
+FAAC-inspired method.
+
+This method is a simplified reimplementation of the method used in FAAC, which
+sets thresholds proportional to the band energies, and then decreases all the
+thresholds with quantizer steps to find the appropriate quantization with
+distortion below threshold band by band.
+
+The quality of this method is comparable to the two loop searching method
+described below, but somewhat a little better and slower.
+
+
+‘anmr ’
+Average noise to mask ratio (ANMR) trellis-based solution.
+
+This has a theoretic best quality out of all the coding methods, but at the
+cost of the slowest speed.
+
+
+‘twoloop ’
+Two loop searching (TLS) method.
+
+This method first sets quantizers depending on band thresholds and then tries
+to find an optimal combination by adding or subtracting a specific value from
+all quantizers and adjusting some individual quantizer a little.
+
+This method produces similar quality with the FAAC method and is the default.
+
+
+‘fast ’
+Constant quantizer method.
+
+This method sets a constant quantizer for all bands. This is the fastest of all
+the methods, yet produces the worst quality.
+
+
+
+
+
+
+
+
+
17.2 ac3 and ac3_fixed# TOC
+
+
AC-3 audio encoders.
+
+
These encoders implement part of ATSC A/52:2010 and ETSI TS 102 366, as well as
+the undocumented RealAudio 3 (a.k.a. dnet).
+
+
The ac3 encoder uses floating-point math, while the ac3_fixed
+encoder only uses fixed-point integer math. This does not mean that one is
+always faster, just that one or the other may be better suited to a
+particular system. The floating-point encoder will generally produce better
+quality audio for a given bitrate. The ac3_fixed encoder is not the
+default codec for any of the output formats, so it must be specified explicitly
+using the option -acodec ac3_fixed
in order to use it.
+
+
+
17.2.1 AC-3 Metadata# TOC
+
+
The AC-3 metadata options are used to set parameters that describe the audio,
+but in most cases do not affect the audio encoding itself. Some of the options
+do directly affect or influence the decoding and playback of the resulting
+bitstream, while others are just for informational purposes. A few of the
+options will add bits to the output stream that could otherwise be used for
+audio data, and will thus affect the quality of the output. Those will be
+indicated accordingly with a note in the option list below.
+
+
These parameters are described in detail in several publicly-available
+documents.
+
+
+
+
17.2.1.1 Metadata Control Options# TOC
+
+
+-per_frame_metadata boolean
+Allow Per-Frame Metadata. Specifies if the encoder should check for changing
+metadata for each frame.
+
+0
+The metadata values set at initialization will be used for every frame in the
+stream. (default)
+
+1
+Metadata values can be changed before encoding each frame.
+
+
+
+
+
+
+
+
17.2.1.2 Downmix Levels# TOC
+
+
+-center_mixlev level
+Center Mix Level. The amount of gain the decoder should apply to the center
+channel when downmixing to stereo. This field will only be written to the
+bitstream if a center channel is present. The value is specified as a scale
+factor. There are 3 valid values:
+
+0.707
+Apply -3dB gain
+
+0.595
+Apply -4.5dB gain (default)
+
+0.500
+Apply -6dB gain
+
+
+
+
+-surround_mixlev level
+Surround Mix Level. The amount of gain the decoder should apply to the surround
+channel(s) when downmixing to stereo. This field will only be written to the
+bitstream if one or more surround channels are present. The value is specified
+as a scale factor. There are 3 valid values:
+
+0.707
+Apply -3dB gain
+
+0.500
+Apply -6dB gain (default)
+
+0.000
+Silence Surround Channel(s)
+
+
+
+
+
+
+
+
17.2.1.3 Audio Production Information# TOC
+
Audio Production Information is optional information describing the mixing
+environment. Either none or both of the fields are written to the bitstream.
+
+
+-mixing_level number
+Mixing Level. Specifies peak sound pressure level (SPL) in the production
+environment when the mix was mastered. Valid values are 80 to 111, or -1 for
+unknown or not indicated. The default value is -1, but that value cannot be
+used if the Audio Production Information is written to the bitstream. Therefore,
+if the room_type
option is not the default value, the mixing_level
+option must not be -1.
+
+
+-room_type type
+Room Type. Describes the equalization used during the final mixing session at
+the studio or on the dubbing stage. A large room is a dubbing stage with the
+industry standard X-curve equalization; a small room has flat equalization.
+This field will not be written to the bitstream if both the mixing_level
+option and the room_type
option have the default values.
+
+0
+notindicated
+Not Indicated (default)
+
+1
+large
+Large Room
+
+2
+small
+Small Room
+
+
+
+
+
+
+
+
17.2.1.4 Other Metadata Options# TOC
+
+
+-copyright boolean
+Copyright Indicator. Specifies whether a copyright exists for this audio.
+
+0
+off
+No Copyright Exists (default)
+
+1
+on
+Copyright Exists
+
+
+
+
+-dialnorm value
+Dialogue Normalization. Indicates how far the average dialogue level of the
+program is below digital 100% full scale (0 dBFS). This parameter determines a
+level shift during audio reproduction that sets the average volume of the
+dialogue to a preset level. The goal is to match volume level between program
+sources. A value of -31dB will result in no volume level change, relative to
+the source volume, during audio reproduction. Valid values are whole numbers in
+the range -31 to -1, with -31 being the default.
+
+
+-dsur_mode mode
+Dolby Surround Mode. Specifies whether the stereo signal uses Dolby Surround
+(Pro Logic). This field will only be written to the bitstream if the audio
+stream is stereo. Using this option does NOT mean the encoder will actually
+apply Dolby Surround processing.
+
+0
+notindicated
+Not Indicated (default)
+
+1
+off
+Not Dolby Surround Encoded
+
+2
+on
+Dolby Surround Encoded
+
+
+
+
+-original boolean
+Original Bit Stream Indicator. Specifies whether this audio is from the
+original source and not a copy.
+
+0
+off
+Not Original Source
+
+1
+on
+Original Source (default)
+
+
+
+
+
+
+
+
17.2.2 Extended Bitstream Information# TOC
+
The extended bitstream options are part of the Alternate Bit Stream Syntax as
+specified in Annex D of the A/52:2010 standard. It is grouped into 2 parts.
+If any one parameter in a group is specified, all values in that group will be
+written to the bitstream. Default values are used for those that are written
+but have not been specified. If the mixing levels are written, the decoder
+will use these values instead of the ones specified in the center_mixlev
+and surround_mixlev
options if it supports the Alternate Bit Stream
+Syntax.
+
+
+
17.2.2.1 Extended Bitstream Information - Part 1# TOC
+
+
+-dmix_mode mode
+Preferred Stereo Downmix Mode. Allows the user to select either Lt/Rt
+(Dolby Surround) or Lo/Ro (normal stereo) as the preferred stereo downmix mode.
+
+0
+notindicated
+Not Indicated (default)
+
+1
+ltrt
+Lt/Rt Downmix Preferred
+
+2
+loro
+Lo/Ro Downmix Preferred
+
+
+
+
+-ltrt_cmixlev level
+Lt/Rt Center Mix Level. The amount of gain the decoder should apply to the
+center channel when downmixing to stereo in Lt/Rt mode.
+
+1.414
+Apply +3dB gain
+
+1.189
+Apply +1.5dB gain
+
+1.000
+Apply 0dB gain
+
+0.841
+Apply -1.5dB gain
+
+0.707
+Apply -3.0dB gain
+
+0.595
+Apply -4.5dB gain (default)
+
+0.500
+Apply -6.0dB gain
+
+0.000
+Silence Center Channel
+
+
+
+
+-ltrt_surmixlev level
+Lt/Rt Surround Mix Level. The amount of gain the decoder should apply to the
+surround channel(s) when downmixing to stereo in Lt/Rt mode.
+
+0.841
+Apply -1.5dB gain
+
+0.707
+Apply -3.0dB gain
+
+0.595
+Apply -4.5dB gain
+
+0.500
+Apply -6.0dB gain (default)
+
+0.000
+Silence Surround Channel(s)
+
+
+
+
+-loro_cmixlev level
+Lo/Ro Center Mix Level. The amount of gain the decoder should apply to the
+center channel when downmixing to stereo in Lo/Ro mode.
+
+1.414
+Apply +3dB gain
+
+1.189
+Apply +1.5dB gain
+
+1.000
+Apply 0dB gain
+
+0.841
+Apply -1.5dB gain
+
+0.707
+Apply -3.0dB gain
+
+0.595
+Apply -4.5dB gain (default)
+
+0.500
+Apply -6.0dB gain
+
+0.000
+Silence Center Channel
+
+
+
+
+-loro_surmixlev level
+Lo/Ro Surround Mix Level. The amount of gain the decoder should apply to the
+surround channel(s) when downmixing to stereo in Lo/Ro mode.
+
+0.841
+Apply -1.5dB gain
+
+0.707
+Apply -3.0dB gain
+
+0.595
+Apply -4.5dB gain
+
+0.500
+Apply -6.0dB gain (default)
+
+0.000
+Silence Surround Channel(s)
+
+
+
+
+
+
+
+
17.2.2.2 Extended Bitstream Information - Part 2# TOC
+
+
+-dsurex_mode mode
+Dolby Surround EX Mode. Indicates whether the stream uses Dolby Surround EX
+(7.1 matrixed to 5.1). Using this option does NOT mean the encoder will actually
+apply Dolby Surround EX processing.
+
+0
+notindicated
+Not Indicated (default)
+
+1
+on
+Dolby Surround EX Off
+
+2
+off
+Dolby Surround EX On
+
+
+
+
+-dheadphone_mode mode
+Dolby Headphone Mode. Indicates whether the stream uses Dolby Headphone
+encoding (multi-channel matrixed to 2.0 for use with headphones). Using this
+option does NOT mean the encoder will actually apply Dolby Headphone
+processing.
+
+0
+notindicated
+Not Indicated (default)
+
+1
+on
+Dolby Headphone Off
+
+2
+off
+Dolby Headphone On
+
+
+
+
+-ad_conv_type type
+A/D Converter Type. Indicates whether the audio has passed through HDCD A/D
+conversion.
+
+0
+standard
+Standard A/D Converter (default)
+
+1
+hdcd
+HDCD A/D Converter
+
+
+
+
+
+
+
+
17.2.3 Other AC-3 Encoding Options# TOC
+
+
+-stereo_rematrixing boolean
+Stereo Rematrixing. Enables/Disables use of rematrixing for stereo input. This
+is an optional AC-3 feature that increases quality by selectively encoding
+the left/right channels as mid/side. This option is enabled by default, and it
+is highly recommended that it be left as enabled except for testing purposes.
+
+
+
+
+
+
17.2.4 Floating-Point-Only AC-3 Encoding Options# TOC
+
+
These options are only valid for the floating-point encoder and do not exist
+for the fixed-point encoder due to the corresponding features not being
+implemented in fixed-point.
+
+
+-channel_coupling boolean
+Enables/Disables use of channel coupling, which is an optional AC-3 feature
+that increases quality by combining high frequency information from multiple
+channels into a single channel. The per-channel high frequency information is
+sent with less accuracy in both the frequency and time domains. This allows
+more bits to be used for lower frequencies while preserving enough information
+to reconstruct the high frequencies. This option is enabled by default for the
+floating-point encoder and should generally be left as enabled except for
+testing purposes or to increase encoding speed.
+
+-1
+auto
+Selected by Encoder (default)
+
+0
+off
+Disable Channel Coupling
+
+1
+on
+Enable Channel Coupling
+
+
+
+
+-cpl_start_band number
+Coupling Start Band. Sets the channel coupling start band, from 1 to 15. If a
+value higher than the bandwidth is used, it will be reduced to 1 less than the
+coupling end band. If auto is used, the start band will be determined by
+the encoder based on the bit rate, sample rate, and channel layout. This option
+has no effect if channel coupling is disabled.
+
+-1
+auto
+Selected by Encoder (default)
+
+
+
+
+
+
+
+
17.3 libfaac# TOC
+
+
libfaac AAC (Advanced Audio Coding) encoder wrapper.
+
+
Requires the presence of the libfaac headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libfaac --enable-nonfree
.
+
+
This encoder is considered to be of higher quality with respect to the
+the native experimental FFmpeg AAC encoder .
+
+
For more information see the libfaac project at
+http://www.audiocoding.com/faac.html/ .
+
+
+
17.3.1 Options# TOC
+
+
The following shared FFmpeg codec options are recognized.
+
+
The following options are supported by the libfaac wrapper. The
+faac
-equivalent of the options are listed in parentheses.
+
+
+b (-b )
+Set bit rate in bits/s for ABR (Average Bit Rate) mode. If the bit rate
+is not explicitly specified, it is automatically set to a suitable
+value depending on the selected profile. faac
bitrate is
+expressed in kilobits/s.
+
+Note that libfaac does not support CBR (Constant Bit Rate) but only
+ABR (Average Bit Rate).
+
+If VBR mode is enabled this option is ignored.
+
+
+ar (-R )
+Set audio sampling rate (in Hz).
+
+
+ac (-c )
+Set the number of audio channels.
+
+
+cutoff (-C )
+Set cutoff frequency. If not specified (or explicitly set to 0) it
+will use a value automatically computed by the library. Default value
+is 0.
+
+
+profile
+Set audio profile.
+
+The following profiles are recognized:
+
+‘aac_main ’
+Main AAC (Main)
+
+
+‘aac_low ’
+Low Complexity AAC (LC)
+
+
+‘aac_ssr ’
+Scalable Sample Rate (SSR)
+
+
+‘aac_ltp ’
+Long Term Prediction (LTP)
+
+
+
+If not specified it is set to ‘aac_low ’.
+
+
+flags +qscale
+Set constant quality VBR (Variable Bit Rate) mode.
+
+
+global_quality
+Set quality in VBR mode as an integer number of lambda units.
+
+Only relevant when VBR mode is enabled with flags +qscale
. The
+value is converted to QP units by dividing it by FF_QP2LAMBDA
,
+and used to set the quality value used by libfaac. A reasonable range
+for the option value in QP units is [10-500], the higher the value the
+higher the quality.
+
+
+q (-q )
+Enable VBR mode when set to a non-negative value, and set constant
+quality value as a double floating point value in QP units.
+
+The value sets the quality value used by libfaac. A reasonable range
+for the option value is [10-500], the higher the value the higher the
+quality.
+
+This option is valid only using the ffmpeg
command-line
+tool. For library interface users, use global_quality .
+
+
+
+
+
17.3.2 Examples# TOC
+
+
+ Use ffmpeg
to convert an audio file to ABR 128 kbps AAC in an M4A (MP4)
+container:
+
+
ffmpeg -i input.wav -codec:a libfaac -b:a 128k -output.m4a
+
+
+ Use ffmpeg
to convert an audio file to VBR AAC, using the
+LTP AAC profile:
+
+
ffmpeg -i input.wav -c:a libfaac -profile:a aac_ltp -q:a 100 output.m4a
+
+
+
+
+
17.4 libfdk_aac# TOC
+
+
libfdk-aac AAC (Advanced Audio Coding) encoder wrapper.
+
+
The libfdk-aac library is based on the Fraunhofer FDK AAC code from
+the Android project.
+
+
Requires the presence of the libfdk-aac headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libfdk-aac
. The library is also incompatible with GPL,
+so if you allow the use of GPL, you should configure with
+--enable-gpl --enable-nonfree --enable-libfdk-aac
.
+
+
This encoder is considered to be of higher quality with respect to
+both the native experimental FFmpeg AAC encoder and
+libfaac .
+
+
VBR encoding, enabled through the vbr or flags
++qscale options, is experimental and only works with some
+combinations of parameters.
+
+
Support for encoding 7.1 audio is only available with libfdk-aac 0.1.3 or
+higher.
+
+
For more information see the fdk-aac project at
+http://sourceforge.net/p/opencore-amr/fdk-aac/ .
+
+
+
17.4.1 Options# TOC
+
+
The following options are mapped on the shared FFmpeg codec options.
+
+
+b
+Set bit rate in bits/s. If the bitrate is not explicitly specified, it
+is automatically set to a suitable value depending on the selected
+profile.
+
+In case VBR mode is enabled the option is ignored.
+
+
+ar
+Set audio sampling rate (in Hz).
+
+
+channels
+Set the number of audio channels.
+
+
+flags +qscale
+Enable fixed quality, VBR (Variable Bit Rate) mode.
+Note that VBR is implicitly enabled when the vbr value is
+positive.
+
+
+cutoff
+Set cutoff frequency. If not specified (or explicitly set to 0) it
+will use a value automatically computed by the library. Default value
+is 0.
+
+
+profile
+Set audio profile.
+
+The following profiles are recognized:
+
+‘aac_low ’
+Low Complexity AAC (LC)
+
+
+‘aac_he ’
+High Efficiency AAC (HE-AAC)
+
+
+‘aac_he_v2 ’
+High Efficiency AAC version 2 (HE-AACv2)
+
+
+‘aac_ld ’
+Low Delay AAC (LD)
+
+
+‘aac_eld ’
+Enhanced Low Delay AAC (ELD)
+
+
+
+If not specified it is set to ‘aac_low ’.
+
+
+
+
The following are private options of the libfdk_aac encoder.
+
+
+afterburner
+Enable afterburner feature if set to 1, disabled if set to 0. This
+improves the quality but also the required processing power.
+
+Default value is 1.
+
+
+eld_sbr
+Enable SBR (Spectral Band Replication) for ELD if set to 1, disabled
+if set to 0.
+
+Default value is 0.
+
+
+signaling
+Set SBR/PS signaling style.
+
+It can assume one of the following values:
+
+‘default ’
+choose signaling implicitly (explicit hierarchical by default,
+implicit if global header is disabled)
+
+
+‘implicit ’
+implicit backwards compatible signaling
+
+
+‘explicit_sbr ’
+explicit SBR, implicit PS signaling
+
+
+‘explicit_hierarchical ’
+explicit hierarchical signaling
+
+
+
+Default value is ‘default ’.
+
+
+latm
+Output LATM/LOAS encapsulated data if set to 1, disabled if set to 0.
+
+Default value is 0.
+
+
+header_period
+Set StreamMuxConfig and PCE repetition period (in frames) for sending
+in-band configuration buffers within LATM/LOAS transport layer.
+
+Must be a 16-bits non-negative integer.
+
+Default value is 0.
+
+
+vbr
+Set VBR mode, from 1 to 5. 1 is lowest quality (though still pretty
+good) and 5 is highest quality. A value of 0 will disable VBR, and CBR
+(Constant Bit Rate) is enabled.
+
+Currently only the ‘aac_low ’ profile supports VBR encoding.
+
+VBR modes 1-5 correspond to roughly the following average bit rates:
+
+
+‘1 ’
+32 kbps/channel
+
+‘2 ’
+40 kbps/channel
+
+‘3 ’
+48-56 kbps/channel
+
+‘4 ’
+64 kbps/channel
+
+‘5 ’
+about 80-96 kbps/channel
+
+
+
+Default value is 0.
+
+
+
+
+
17.4.2 Examples# TOC
+
+
+ Use ffmpeg
to convert an audio file to VBR AAC in an M4A (MP4)
+container:
+
+
ffmpeg -i input.wav -codec:a libfdk_aac -vbr 3 output.m4a
+
+
+ Use ffmpeg
to convert an audio file to CBR 64k kbps AAC, using the
+High-Efficiency AAC profile:
+
+
ffmpeg -i input.wav -c:a libfdk_aac -profile:a aac_he -b:a 64k output.m4a
+
+
+
+
+
17.5 libmp3lame# TOC
+
+
LAME (Lame Ain’t an MP3 Encoder) MP3 encoder wrapper.
+
+
Requires the presence of the libmp3lame headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libmp3lame
.
+
+
See libshine for a fixed-point MP3 encoder, although with a
+lower quality.
+
+
+
17.5.1 Options# TOC
+
+
The following options are supported by the libmp3lame wrapper. The
+lame
-equivalent of the options are listed in parentheses.
+
+
+b (-b )
+Set bitrate expressed in bits/s for CBR or ABR. LAME bitrate
is
+expressed in kilobits/s.
+
+
+q (-V )
+Set constant quality setting for VBR. This option is valid only
+using the ffmpeg
command-line tool. For library interface
+users, use global_quality .
+
+
+compression_level (-q )
+Set algorithm quality. Valid arguments are integers in the 0-9 range,
+with 0 meaning highest quality but slowest, and 9 meaning fastest
+while producing the worst quality.
+
+
+reservoir
+Enable use of bit reservoir when set to 1. Default value is 1. LAME
+has this enabled by default, but can be overridden by use
+--nores option.
+
+
+joint_stereo (-m j )
+Enable the encoder to use (on a frame by frame basis) either L/R
+stereo or mid/side stereo. Default value is 1.
+
+
+abr (--abr )
+Enable the encoder to use ABR when set to 1. The lame
+--abr sets the target bitrate, while this options only
+tells FFmpeg to use ABR still relies on b to set bitrate.
+
+
+
+
+
+
17.6 libopencore-amrnb# TOC
+
+
OpenCORE Adaptive Multi-Rate Narrowband encoder.
+
+
Requires the presence of the libopencore-amrnb headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libopencore-amrnb --enable-version3
.
+
+
This is a mono-only encoder. Officially it only supports 8000Hz sample rate,
+but you can override it by setting strict to ‘unofficial ’ or
+lower.
+
+
+
17.6.1 Options# TOC
+
+
+b
+Set bitrate in bits per second. Only the following bitrates are supported,
+otherwise libavcodec will round to the nearest valid bitrate.
+
+
+4750
+5150
+5900
+6700
+7400
+7950
+10200
+12200
+
+
+
+dtx
+Allow discontinuous transmission (generate comfort noise) when set to 1. The
+default value is 0 (disabled).
+
+
+
+
+
+
17.7 libshine# TOC
+
+
Shine Fixed-Point MP3 encoder wrapper.
+
+
Shine is a fixed-point MP3 encoder. It has a far better performance on
+platforms without an FPU, e.g. armel CPUs, and some phones and tablets.
+However, as it is more targeted on performance than quality, it is not on par
+with LAME and other production-grade encoders quality-wise. Also, according to
+the project’s homepage, this encoder may not be free of bugs as the code was
+written a long time ago and the project was dead for at least 5 years.
+
+
This encoder only supports stereo and mono input. This is also CBR-only.
+
+
The original project (last updated in early 2007) is at
+http://sourceforge.net/projects/libshine-fxp/ . We only support the
+updated fork by the Savonet/Liquidsoap project at https://github.com/savonet/shine .
+
+
Requires the presence of the libshine headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libshine
.
+
+
See also libmp3lame .
+
+
+
17.7.1 Options# TOC
+
+
The following options are supported by the libshine wrapper. The
+shineenc
-equivalent of the options are listed in parentheses.
+
+
+b (-b )
+Set bitrate expressed in bits/s for CBR. shineenc
-b option
+is expressed in kilobits/s.
+
+
+
+
+
+
17.8 libtwolame# TOC
+
+
TwoLAME MP2 encoder wrapper.
+
+
Requires the presence of the libtwolame headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libtwolame
.
+
+
+
17.8.1 Options# TOC
+
+
The following options are supported by the libtwolame wrapper. The
+twolame
-equivalent options follow the FFmpeg ones and are in
+parentheses.
+
+
+b (-b )
+Set bitrate expressed in bits/s for CBR. twolame
b
+option is expressed in kilobits/s. Default value is 128k.
+
+
+q (-V )
+Set quality for experimental VBR support. Maximum value range is
+from -50 to 50, useful range is from -10 to 10. The higher the
+value, the better the quality. This option is valid only using the
+ffmpeg
command-line tool. For library interface users,
+use global_quality .
+
+
+mode (--mode )
+Set the mode of the resulting audio. Possible values:
+
+
+‘auto ’
+Choose mode automatically based on the input. This is the default.
+
+‘stereo ’
+Stereo
+
+‘joint_stereo ’
+Joint stereo
+
+‘dual_channel ’
+Dual channel
+
+‘mono ’
+Mono
+
+
+
+
+psymodel (--psyc-mode )
+Set psychoacoustic model to use in encoding. The argument must be
+an integer between -1 and 4, inclusive. The higher the value, the
+better the quality. The default value is 3.
+
+
+energy_levels (--energy )
+Enable energy levels extensions when set to 1. The default value is
+0 (disabled).
+
+
+error_protection (--protect )
+Enable CRC error protection when set to 1. The default value is 0
+(disabled).
+
+
+copyright (--copyright )
+Set MPEG audio copyright flag when set to 1. The default value is 0
+(disabled).
+
+
+original (--original )
+Set MPEG audio original flag when set to 1. The default value is 0
+(disabled).
+
+
+
+
+
+
17.9 libvo-aacenc# TOC
+
+
VisualOn AAC encoder.
+
+
Requires the presence of the libvo-aacenc headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libvo-aacenc --enable-version3
.
+
+
This encoder is considered to be worse than the
+native experimental FFmpeg AAC encoder , according to
+multiple sources.
+
+
+
17.9.1 Options# TOC
+
+
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
+channels. It is also CBR-only.
+
+
+b
+Set bit rate in bits/s.
+
+
+
+
+
+
17.10 libvo-amrwbenc# TOC
+
+
VisualOn Adaptive Multi-Rate Wideband encoder.
+
+
Requires the presence of the libvo-amrwbenc headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libvo-amrwbenc --enable-version3
.
+
+
This is a mono-only encoder. Officially it only supports 16000Hz sample
+rate, but you can override it by setting strict to
+‘unofficial ’ or lower.
+
+
+
17.10.1 Options# TOC
+
+
+b
+Set bitrate in bits/s. Only the following bitrates are supported, otherwise
+libavcodec will round to the nearest valid bitrate.
+
+
+‘6600 ’
+‘8850 ’
+‘12650 ’
+‘14250 ’
+‘15850 ’
+‘18250 ’
+‘19850 ’
+‘23050 ’
+‘23850 ’
+
+
+
+dtx
+Allow discontinuous transmission (generate comfort noise) when set to 1. The
+default value is 0 (disabled).
+
+
+
+
+
+
17.11 libopus# TOC
+
+
libopus Opus Interactive Audio Codec encoder wrapper.
+
+
Requires the presence of the libopus headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libopus
.
+
+
+
17.11.1 Option Mapping# TOC
+
+
Most libopus options are modelled after the opusenc
utility from
+opus-tools. The following is an option mapping chart describing options
+supported by the libopus wrapper, and their opusenc
-equivalent
+in parentheses.
+
+
+b (bitrate )
+Set the bit rate in bits/s. FFmpeg’s b option is
+expressed in bits/s, while opusenc
’s bitrate in
+kilobits/s.
+
+
+vbr (vbr , hard-cbr , and cvbr )
+Set VBR mode. The FFmpeg vbr option has the following
+valid arguments, with the their opusenc
equivalent options
+in parentheses:
+
+
+‘off (hard-cbr ) ’
+Use constant bit rate encoding.
+
+
+‘on (vbr ) ’
+Use variable bit rate encoding (the default).
+
+
+‘constrained (cvbr ) ’
+Use constrained variable bit rate encoding.
+
+
+
+
+compression_level (comp )
+Set encoding algorithm complexity. Valid options are integers in
+the 0-10 range. 0 gives the fastest encodes but lower quality, while 10
+gives the highest quality but slowest encoding. The default is 10.
+
+
+frame_duration (framesize )
+Set maximum frame size, or duration of a frame in milliseconds. The
+argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
+frame sizes achieve lower latency but less quality at a given bitrate.
+Sizes greater than 20ms are only interesting at fairly low bitrates.
+The default is 20ms.
+
+
+packet_loss (expect-loss )
+Set expected packet loss percentage. The default is 0.
+
+
+application (N.A.)
+Set intended application type. Valid options are listed below:
+
+
+‘voip ’
+Favor improved speech intelligibility.
+
+‘audio ’
+Favor faithfulness to the input (the default).
+
+‘lowdelay ’
+Restrict to only the lowest delay modes.
+
+
+
+
+cutoff (N.A.)
+Set cutoff bandwidth in Hz. The argument must be exactly one of the
+following: 4000, 6000, 8000, 12000, or 20000, corresponding to
+narrowband, mediumband, wideband, super wideband, and fullband
+respectively. The default is 0 (cutoff disabled).
+
+
+
+
+
+
17.12 libvorbis# TOC
+
+
libvorbis encoder wrapper.
+
+
Requires the presence of the libvorbisenc headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libvorbis
.
+
+
+
17.12.1 Options# TOC
+
+
The following options are supported by the libvorbis wrapper. The
+oggenc
-equivalent of the options are listed in parentheses.
+
+
To get a more accurate and extensive documentation of the libvorbis
+options, consult the libvorbisenc’s and oggenc
’s documentations.
+See http://xiph.org/vorbis/ ,
+http://wiki.xiph.org/Vorbis-tools , and oggenc(1).
+
+
+b (-b )
+Set bitrate expressed in bits/s for ABR. oggenc
-b is
+expressed in kilobits/s.
+
+
+q (-q )
+Set constant quality setting for VBR. The value should be a float
+number in the range of -1.0 to 10.0. The higher the value, the better
+the quality. The default value is ‘3.0 ’.
+
+This option is valid only using the ffmpeg
command-line tool.
+For library interface users, use global_quality .
+
+
+cutoff (--advanced-encode-option lowpass_frequency=N )
+Set cutoff bandwidth in Hz, a value of 0 disables cutoff. oggenc
’s
+related option is expressed in kHz. The default value is ‘0 ’ (cutoff
+disabled).
+
+
+minrate (-m )
+Set minimum bitrate expressed in bits/s. oggenc
-m is
+expressed in kilobits/s.
+
+
+maxrate (-M )
+Set maximum bitrate expressed in bits/s. oggenc
-M is
+expressed in kilobits/s. This only has effect on ABR mode.
+
+
+iblock (--advanced-encode-option impulse_noisetune=N )
+Set noise floor bias for impulse blocks. The value is a float number from
+-15.0 to 0.0. A negative bias instructs the encoder to pay special attention
+to the crispness of transients in the encoded audio. The tradeoff for better
+transient response is a higher bitrate.
+
+
+
+
+
+
17.13 libwavpack# TOC
+
+
A wrapper providing WavPack encoding through libwavpack.
+
+
Only lossless mode using 32-bit integer samples is supported currently.
+
+
Requires the presence of the libwavpack headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libwavpack
.
+
+
Note that a libavcodec-native encoder for the WavPack codec exists so users can
+encode audios with this codec without using this encoder. See wavpackenc .
+
+
+
17.13.1 Options# TOC
+
+
wavpack
command line utility’s corresponding options are listed in
+parentheses, if any.
+
+
+frame_size (--blocksize )
+Default is 32768.
+
+
+compression_level
+Set speed vs. compression tradeoff. Acceptable arguments are listed below:
+
+
+‘0 (-f ) ’
+Fast mode.
+
+
+‘1 ’
+Normal (default) settings.
+
+
+‘2 (-h ) ’
+High quality.
+
+
+‘3 (-hh ) ’
+Very high quality.
+
+
+‘4-8 (-hh -x EXTRAPROC ) ’
+Same as ‘3 ’, but with extra processing enabled.
+
+‘4 ’ is the same as -x2 and ‘8 ’ is the same as -x6 .
+
+
+
+
+
+
+
+
17.14 wavpack# TOC
+
+
WavPack lossless audio encoder.
+
+
This is a libavcodec-native WavPack encoder. There is also an encoder based on
+libwavpack, but there is virtually no reason to use that encoder.
+
+
See also libwavpack .
+
+
+
17.14.1 Options# TOC
+
+
The equivalent options for wavpack
command line utility are listed in
+parentheses.
+
+
+
17.14.1.1 Shared options# TOC
+
+
The following shared options are effective for this encoder. Only special notes
+about this particular encoder will be documented here. For the general meaning
+of the options, see the Codec Options chapter .
+
+
+frame_size (--blocksize )
+For this encoder, the range for this option is between 128 and 131072. Default
+is automatically decided based on sample rate and number of channel.
+
+For the complete formula of calculating default, see
+libavcodec/wavpackenc.c .
+
+
+compression_level (-f , -h , -hh , and -x )
+This option’s syntax is consistent with libwavpack ’s.
+
+
+
+
+
17.14.1.2 Private options# TOC
+
+
+joint_stereo (-j )
+Set whether to enable joint stereo. Valid values are:
+
+
+‘on (1 ) ’
+Force mid/side audio encoding.
+
+‘off (0 ) ’
+Force left/right audio encoding.
+
+‘auto ’
+Let the encoder decide automatically.
+
+
+
+
+optimize_mono
+Set whether to enable optimization for mono. This option is only effective for
+non-mono streams. Available values:
+
+
+‘on ’
+enabled
+
+‘off ’
+disabled
+
+
+
+
+
+
+
+
+
18 Video Encoders# TOC
+
+
A description of some of the currently available video encoders
+follows.
+
+
+
18.1 libtheora# TOC
+
+
libtheora Theora encoder wrapper.
+
+
Requires the presence of the libtheora headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libtheora
.
+
+
For more information about the libtheora project see
+http://www.theora.org/ .
+
+
+
18.1.1 Options# TOC
+
+
The following global options are mapped to internal libtheora options
+which affect the quality and the bitrate of the encoded stream.
+
+
+b
+Set the video bitrate in bit/s for CBR (Constant Bit Rate) mode. In
+case VBR (Variable Bit Rate) mode is enabled this option is ignored.
+
+
+flags
+Used to enable constant quality mode (VBR) encoding through the
+qscale flag, and to enable the pass1
and pass2
+modes.
+
+
+g
+Set the GOP size.
+
+
+global_quality
+Set the global quality as an integer in lambda units.
+
+Only relevant when VBR mode is enabled with flags +qscale
. The
+value is converted to QP units by dividing it by FF_QP2LAMBDA
,
+clipped in the [0 - 10] range, and then multiplied by 6.3 to get a
+value in the native libtheora range [0-63]. A higher value corresponds
+to a higher quality.
+
+
+q
+Enable VBR mode when set to a non-negative value, and set constant
+quality value as a double floating point value in QP units.
+
+The value is clipped in the [0-10] range, and then multiplied by 6.3
+to get a value in the native libtheora range [0-63].
+
+This option is valid only using the ffmpeg
command-line
+tool. For library interface users, use global_quality .
+
+
+
+
+
18.1.2 Examples# TOC
+
+
+ Set maximum constant quality (VBR) encoding with ffmpeg
:
+
+
ffmpeg -i INPUT -codec:v libtheora -q:v 10 OUTPUT.ogg
+
+
+ Use ffmpeg
to convert a CBR 1000 kbps Theora video stream:
+
+
ffmpeg -i INPUT -codec:v libtheora -b:v 1000k OUTPUT.ogg
+
+
+
+
+
18.2 libvpx# TOC
+
+
VP8/VP9 format supported through libvpx.
+
+
Requires the presence of the libvpx headers and library during configuration.
+You need to explicitly configure the build with --enable-libvpx
.
+
+
+
18.2.1 Options# TOC
+
+
Mapping from FFmpeg to libvpx options with conversion notes in parentheses.
+
+
+threads
+g_threads
+
+
+profile
+g_profile
+
+
+vb
+rc_target_bitrate
+
+
+g
+kf_max_dist
+
+
+keyint_min
+kf_min_dist
+
+
+qmin
+rc_min_quantizer
+
+
+qmax
+rc_max_quantizer
+
+
+bufsize, vb
+rc_buf_sz
+(bufsize * 1000 / vb)
+
+rc_buf_optimal_sz
+(bufsize * 1000 / vb * 5 / 6)
+
+
+rc_init_occupancy, vb
+rc_buf_initial_sz
+(rc_init_occupancy * 1000 / vb)
+
+
+rc_buffer_aggressivity
+rc_undershoot_pct
+
+
+skip_threshold
+rc_dropframe_thresh
+
+
+qcomp
+rc_2pass_vbr_bias_pct
+
+
+maxrate, vb
+rc_2pass_vbr_maxsection_pct
+(maxrate * 100 / vb)
+
+
+minrate, vb
+rc_2pass_vbr_minsection_pct
+(minrate * 100 / vb)
+
+
+minrate, maxrate, vb
+VPX_CBR
+(minrate == maxrate == vb)
+
+
+crf
+VPX_CQ
, VP8E_SET_CQ_LEVEL
+
+
+quality
+
+best
+VPX_DL_BEST_QUALITY
+
+good
+VPX_DL_GOOD_QUALITY
+
+realtime
+VPX_DL_REALTIME
+
+
+
+
+speed
+VP8E_SET_CPUUSED
+
+
+nr
+VP8E_SET_NOISE_SENSITIVITY
+
+
+mb_threshold
+VP8E_SET_STATIC_THRESHOLD
+
+
+slices
+VP8E_SET_TOKEN_PARTITIONS
+
+
+max-intra-rate
+VP8E_SET_MAX_INTRA_BITRATE_PCT
+
+
+force_key_frames
+VPX_EFLAG_FORCE_KF
+
+
+Alternate reference frame related
+
+vp8flags altref
+VP8E_SET_ENABLEAUTOALTREF
+
+arnr_max_frames
+VP8E_SET_ARNR_MAXFRAMES
+
+arnr_type
+VP8E_SET_ARNR_TYPE
+
+arnr_strength
+VP8E_SET_ARNR_STRENGTH
+
+rc_lookahead
+g_lag_in_frames
+
+
+
+
+vp8flags error_resilient
+g_error_resilient
+
+
+aq_mode
+VP9E_SET_AQ_MODE
+
+
+
+
+
For more information about libvpx see:
+http://www.webmproject.org/
+
+
+
+
18.3 libwebp# TOC
+
+
libwebp WebP Image encoder wrapper
+
+
libwebp is Google’s official encoder for WebP images. It can encode in either
+lossy or lossless mode. Lossy images are essentially a wrapper around a VP8
+frame. Lossless images are a separate codec developed by Google.
+
+
+
18.3.1 Pixel Format# TOC
+
+
Currently, libwebp only supports YUV420 for lossy and RGB for lossless due
+to limitations of the format and libwebp. Alpha is supported for either mode.
+Because of API limitations, if RGB is passed in when encoding lossy or YUV is
+passed in for encoding lossless, the pixel format will automatically be
+converted using functions from libwebp. This is not ideal and is done only for
+convenience.
+
+
+
18.3.2 Options# TOC
+
+
+-lossless boolean
+Enables/Disables use of lossless mode. Default is 0.
+
+
+-compression_level integer
+For lossy, this is a quality/speed tradeoff. Higher values give better quality
+for a given size at the cost of increased encoding time. For lossless, this is
+a size/speed tradeoff. Higher values give smaller size at the cost of increased
+encoding time. More specifically, it controls the number of extra algorithms
+and compression tools used, and varies the combination of these tools. This
+maps to the method option in libwebp. The valid range is 0 to 6.
+Default is 4.
+
+
+-qscale float
+For lossy encoding, this controls image quality, 0 to 100. For lossless
+encoding, this controls the effort and time spent at compressing more. The
+default value is 75. Note that for usage via libavcodec, this option is called
+global_quality and must be multiplied by FF_QP2LAMBDA .
+
+
+-preset type
+Configuration preset. This does some automatic settings based on the general
+type of the image.
+
+none
+Do not use a preset.
+
+default
+Use the encoder default.
+
+picture
+Digital picture, like portrait, inner shot
+
+photo
+Outdoor photograph, with natural lighting
+
+drawing
+Hand or line drawing, with high-contrast details
+
+icon
+Small-sized colorful images
+
+text
+Text-like
+
+
+
+
+
+
+
+
18.4 libx264, libx264rgb# TOC
+
+
x264 H.264/MPEG-4 AVC encoder wrapper.
+
+
This encoder requires the presence of the libx264 headers and library
+during configuration. You need to explicitly configure the build with
+--enable-libx264
.
+
+
libx264 supports an impressive number of features, including 8x8 and
+4x4 adaptive spatial transform, adaptive B-frame placement, CAVLC/CABAC
+entropy coding, interlacing (MBAFF), lossless mode, psy optimizations
+for detail retention (adaptive quantization, psy-RD, psy-trellis).
+
+
Many libx264 encoder options are mapped to FFmpeg global codec
+options, while unique encoder options are provided through private
+options. Additionally the x264opts and x264-params
+private options allows one to pass a list of key=value tuples as accepted
+by the libx264 x264_param_parse
function.
+
+
The x264 project website is at
+http://www.videolan.org/developers/x264.html .
+
+
The libx264rgb encoder is the same as libx264, except it accepts packed RGB
+pixel formats as input instead of YUV.
+
+
+
18.4.1 Supported Pixel Formats# TOC
+
+
x264 supports 8- to 10-bit color spaces. The exact bit depth is controlled at
+x264’s configure time. FFmpeg only supports one bit depth in one particular
+build. In other words, it is not possible to build one FFmpeg with multiple
+versions of x264 with different bit depths.
+
+
+
18.4.2 Options# TOC
+
+
The following options are supported by the libx264 wrapper. The
+x264
-equivalent options or values are listed in parentheses
+for easy migration.
+
+
To reduce the duplication of documentation, only the private options
+and some others requiring special attention are documented here. For
+the documentation of the undocumented generic options, see
+the Codec Options chapter .
+
+
To get a more accurate and extensive documentation of the libx264
+options, invoke the command x264 --full-help
or consult
+the libx264 documentation.
+
+
+b (bitrate )
+Set bitrate in bits/s. Note that FFmpeg’s b option is
+expressed in bits/s, while x264
’s bitrate is in
+kilobits/s.
+
+
+bf (bframes )
+g (keyint )
+qmin (qpmin )
+Minimum quantizer scale.
+
+
+qmax (qpmax )
+Maximum quantizer scale.
+
+
+qdiff (qpstep )
+Maximum difference between quantizer scales.
+
+
+qblur (qblur )
+Quantizer curve blur
+
+
+qcomp (qcomp )
+Quantizer curve compression factor
+
+
+refs (ref )
+Number of reference frames each P-frame can use. The range is from 0-16 .
+
+
+sc_threshold (scenecut )
+Sets the threshold for the scene change detection.
+
+
+trellis (trellis )
+Performs Trellis quantization to increase efficiency. Enabled by default.
+
+
+nr (nr )
+me_range (merange )
+Maximum range of the motion search in pixels.
+
+
+me_method (me )
+Set motion estimation method. Possible values in the decreasing order
+of speed:
+
+
+‘dia (dia ) ’
+‘epzs (dia ) ’
+Diamond search with radius 1 (fastest). ‘epzs ’ is an alias for
+‘dia ’.
+
+‘hex (hex ) ’
+Hexagonal search with radius 2.
+
+‘umh (umh ) ’
+Uneven multi-hexagon search.
+
+‘esa (esa ) ’
+Exhaustive search.
+
+‘tesa (tesa ) ’
+Hadamard exhaustive search (slowest).
+
+
+
+
+subq (subme )
+Sub-pixel motion estimation method.
+
+
+b_strategy (b-adapt )
+Adaptive B-frame placement decision algorithm. Use only on first-pass.
+
+
+keyint_min (min-keyint )
+Minimum GOP size.
+
+
+coder
+Set entropy encoder. Possible values:
+
+
+‘ac ’
+Enable CABAC.
+
+
+‘vlc ’
+Enable CAVLC and disable CABAC. It generates the same effect as
+x264
’s --no-cabac option.
+
+
+
+
+cmp
+Set full pixel motion estimation comparation algorithm. Possible values:
+
+
+‘chroma ’
+Enable chroma in motion estimation.
+
+
+‘sad ’
+Ignore chroma in motion estimation. It generates the same effect as
+x264
’s --no-chroma-me option.
+
+
+
+
+threads (threads )
+Number of encoding threads.
+
+
+thread_type
+Set multithreading technique. Possible values:
+
+
+‘slice ’
+Slice-based multithreading. It generates the same effect as
+x264
’s --sliced-threads option.
+
+‘frame ’
+Frame-based multithreading.
+
+
+
+
+flags
+Set encoding flags. It can be used to disable closed GOP and enable
+open GOP by setting it to -cgop
. The result is similar to
+the behavior of x264
’s --open-gop option.
+
+
+rc_init_occupancy (vbv-init )
+preset (preset )
+Set the encoding preset.
+
+
+tune (tune )
+Set tuning of the encoding params.
+
+
+profile (profile )
+Set profile restrictions.
+
+
+fastfirstpass
+Enable fast settings when encoding first pass, when set to 1. When set
+to 0, it has the same effect of x264
’s
+--slow-firstpass option.
+
+
+crf (crf )
+Set the quality for constant quality mode.
+
+
+crf_max (crf-max )
+In CRF mode, prevents VBV from lowering quality beyond this point.
+
+
+qp (qp )
+Set constant quantization rate control method parameter.
+
+
+aq-mode (aq-mode )
+Set AQ method. Possible values:
+
+
+‘none (0 ) ’
+Disabled.
+
+
+‘variance (1 ) ’
+Variance AQ (complexity mask).
+
+
+‘autovariance (2 ) ’
+Auto-variance AQ (experimental).
+
+
+
+
+aq-strength (aq-strength )
+Set AQ strength, reduce blocking and blurring in flat and textured areas.
+
+
+psy
+Use psychovisual optimizations when set to 1. When set to 0, it has the
+same effect as x264
’s --no-psy option.
+
+
+psy-rd (psy-rd )
+Set strength of psychovisual optimization, in
+psy-rd :psy-trellis format.
+
+
+rc-lookahead (rc-lookahead )
+Set number of frames to look ahead for frametype and ratecontrol.
+
+
+weightb
+Enable weighted prediction for B-frames when set to 1. When set to 0,
+it has the same effect as x264
’s --no-weightb option.
+
+
+weightp (weightp )
+Set weighted prediction method for P-frames. Possible values:
+
+
+‘none (0 ) ’
+Disabled
+
+‘simple (1 ) ’
+Enable only weighted refs
+
+‘smart (2 ) ’
+Enable both weighted refs and duplicates
+
+
+
+
+ssim (ssim )
+Enable calculation and printing SSIM stats after the encoding.
+
+
+intra-refresh (intra-refresh )
+Enable the use of Periodic Intra Refresh instead of IDR frames when set
+to 1.
+
+
+avcintra-class (class )
+Configure the encoder to generate AVC-Intra.
+Valid values are 50,100 and 200
+
+
+bluray-compat (bluray-compat )
+Configure the encoder to be compatible with the bluray standard.
+It is a shorthand for setting "bluray-compat=1 force-cfr=1".
+
+
+b-bias (b-bias )
+Set the influence on how often B-frames are used.
+
+
+b-pyramid (b-pyramid )
+Set method for keeping of some B-frames as references. Possible values:
+
+
+‘none (none ) ’
+Disabled.
+
+‘strict (strict ) ’
+Strictly hierarchical pyramid.
+
+‘normal (normal ) ’
+Non-strict (not Blu-ray compatible).
+
+
+
+
+mixed-refs
+Enable the use of one reference per partition, as opposed to one
+reference per macroblock when set to 1. When set to 0, it has the
+same effect as x264
’s --no-mixed-refs option.
+
+
+8x8dct
+Enable adaptive spatial transform (high profile 8x8 transform)
+when set to 1. When set to 0, it has the same effect as
+x264
’s --no-8x8dct option.
+
+
+fast-pskip
+Enable early SKIP detection on P-frames when set to 1. When set
+to 0, it has the same effect as x264
’s
+--no-fast-pskip option.
+
+
+aud (aud )
+Enable use of access unit delimiters when set to 1.
+
+
+mbtree
+Enable use macroblock tree ratecontrol when set to 1. When set
+to 0, it has the same effect as x264
’s
+--no-mbtree option.
+
+
+deblock (deblock )
+Set loop filter parameters, in alpha :beta form.
+
+
+cplxblur (cplxblur )
+Set fluctuations reduction in QP (before curve compression).
+
+
+partitions (partitions )
+Set partitions to consider as a comma-separated list of. Possible
+values in the list:
+
+
+‘p8x8 ’
+8x8 P-frame partition.
+
+‘p4x4 ’
+4x4 P-frame partition.
+
+‘b8x8 ’
+4x4 B-frame partition.
+
+‘i8x8 ’
+8x8 I-frame partition.
+
+‘i4x4 ’
+4x4 I-frame partition.
+(Enabling ‘p4x4 ’ requires ‘p8x8 ’ to be enabled. Enabling
+‘i8x8 ’ requires adaptive spatial transform (8x8dct
+option) to be enabled.)
+
+‘none (none ) ’
+Do not consider any partitions.
+
+‘all (all ) ’
+Consider every partition.
+
+
+
+
+direct-pred (direct )
+Set direct MV prediction mode. Possible values:
+
+
+‘none (none ) ’
+Disable MV prediction.
+
+‘spatial (spatial ) ’
+Enable spatial predicting.
+
+‘temporal (temporal ) ’
+Enable temporal predicting.
+
+‘auto (auto ) ’
+Automatically decided.
+
+
+
+
+slice-max-size (slice-max-size )
+Set the limit of the size of each slice in bytes. If not specified
+but RTP payload size (ps ) is specified, that is used.
+
+
+stats (stats )
+Set the file name for multi-pass stats.
+
+
+nal-hrd (nal-hrd )
+Set signal HRD information (requires vbv-bufsize to be set).
+Possible values:
+
+
+‘none (none ) ’
+Disable HRD information signaling.
+
+‘vbr (vbr ) ’
+Variable bit rate.
+
+‘cbr (cbr ) ’
+Constant bit rate (not allowed in MP4 container).
+
+
+
+
+x264opts (N.A.)
+Set any x264 option, see x264 --fullhelp
for a list.
+
+Argument is a list of key =value couples separated by
+":". In filter and psy-rd options that use ":" as a separator
+themselves, use "," instead. They accept it as well since long ago but this
+is kept undocumented for some reason.
+
+For example to specify libx264 encoding options with ffmpeg
:
+
+
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
+
+
+
+x264-params (N.A.)
+Override the x264 configuration using a :-separated list of key=value
+parameters.
+
+This option is functionally the same as the x264opts , but is
+duplicated for compatibility with the Libav fork.
+
+For example to specify libx264 encoding options with ffmpeg
:
+
+
ffmpeg -i INPUT -c:v libx264 -x264-params level=30:bframes=0:weightp=0:\
+cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:\
+no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
+
+
+
+
+
Encoding ffpresets for common usages are provided so they can be used with the
+general presets system (e.g. passing the pre option).
+
+
+
18.5 libx265# TOC
+
+
x265 H.265/HEVC encoder wrapper.
+
+
This encoder requires the presence of the libx265 headers and library
+during configuration. You need to explicitly configure the build with
+--enable-libx265 .
+
+
+
18.5.1 Options# TOC
+
+
+preset
+Set the x265 preset.
+
+
+tune
+Set the x265 tune parameter.
+
+
+x265-params
+Set x265 options using a list of key =value couples separated
+by ":". See x265 --help
for a list of options.
+
+For example to specify libx265 encoding options with -x265-params :
+
+
+
ffmpeg -i input -c:v libx265 -x265-params crf=26:psy-rd=1 output.mp4
+
+
+
+
+
+
18.6 libxvid# TOC
+
+
Xvid MPEG-4 Part 2 encoder wrapper.
+
+
This encoder requires the presence of the libxvidcore headers and library
+during configuration. You need to explicitly configure the build with
+--enable-libxvid --enable-gpl
.
+
+
The native mpeg4
encoder supports the MPEG-4 Part 2 format, so
+users can encode to this format without this library.
+
+
+
18.6.1 Options# TOC
+
+
The following options are supported by the libxvid wrapper. Some of
+the following options are listed but are not documented, and
+correspond to shared codec options. See the Codec
+Options chapter for their documentation. The other shared options
+which are not listed have no effect for the libxvid encoder.
+
+
+b
+g
+qmin
+qmax
+mpeg_quant
+threads
+bf
+b_qfactor
+b_qoffset
+flags
+Set specific encoding flags. Possible values:
+
+
+‘mv4 ’
+Use four motion vector by macroblock.
+
+
+‘aic ’
+Enable high quality AC prediction.
+
+
+‘gray ’
+Only encode grayscale.
+
+
+‘gmc ’
+Enable the use of global motion compensation (GMC).
+
+
+‘qpel ’
+Enable quarter-pixel motion compensation.
+
+
+‘cgop ’
+Enable closed GOP.
+
+
+‘global_header ’
+Place global headers in extradata instead of every keyframe.
+
+
+
+
+
+trellis
+me_method
+Set motion estimation method. Possible values in decreasing order of
+speed and increasing order of quality:
+
+
+‘zero ’
+Use no motion estimation (default).
+
+
+‘phods ’
+‘x1 ’
+‘log ’
+Enable advanced diamond zonal search for 16x16 blocks and half-pixel
+refinement for 16x16 blocks. ‘x1 ’ and ‘log ’ are aliases for
+‘phods ’.
+
+
+‘epzs ’
+Enable all of the things described above, plus advanced diamond zonal
+search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion
+estimation on chroma planes.
+
+
+‘full ’
+Enable all of the things described above, plus extended 16x16 and 8x8
+blocks search.
+
+
+
+
+mbd
+Set macroblock decision algorithm. Possible values in the increasing
+order of quality:
+
+
+‘simple ’
+Use macroblock comparing function algorithm (default).
+
+
+‘bits ’
+Enable rate distortion-based half pixel and quarter pixel refinement for
+16x16 blocks.
+
+
+‘rd ’
+Enable all of the things described above, plus rate distortion-based
+half pixel and quarter pixel refinement for 8x8 blocks, and rate
+distortion-based search using square pattern.
+
+
+
+
+lumi_aq
+Enable lumi masking adaptive quantization when set to 1. Default is 0
+(disabled).
+
+
+variance_aq
+Enable variance adaptive quantization when set to 1. Default is 0
+(disabled).
+
+When combined with lumi_aq , the resulting quality will not
+be better than any of the two specified individually. In other
+words, the resulting quality will be the worse one of the two
+effects.
+
+
+ssim
+Set structural similarity (SSIM) displaying method. Possible values:
+
+
+‘off ’
+Disable displaying of SSIM information.
+
+
+‘avg ’
+Output average SSIM at the end of encoding to stdout. The format of
+showing the average SSIM is:
+
+
+
+For users who are not familiar with C, %f means a float number, or
+a decimal (e.g. 0.939232).
+
+
+‘frame ’
+Output both per-frame SSIM data during encoding and average SSIM at
+the end of encoding to stdout. The format of per-frame information
+is:
+
+
+
SSIM: avg: %1.3f min: %1.3f max: %1.3f
+
+
+For users who are not familiar with C, %1.3f means a float number
+rounded to 3 digits after the dot (e.g. 0.932).
+
+
+
+
+
+ssim_acc
+Set SSIM accuracy. Valid options are integers within the range of
+0-4, while 0 gives the most accurate result and 4 computes the
+fastest.
+
+
+
+
+
+
18.7 mpeg2# TOC
+
+
MPEG-2 video encoder.
+
+
+
18.7.1 Options# TOC
+
+
+seq_disp_ext integer
+Specifies if the encoder should write a sequence_display_extension to the
+output.
+
+-1
+auto
+Decide automatically to write it or not (this is the default) by checking if
+the data to be written is different from the default or unspecified values.
+
+0
+never
+Never write it.
+
+1
+always
+Always write it.
+
+
+
+
+
+
+
18.8 png# TOC
+
+
PNG image encoder.
+
+
+
18.8.1 Private options# TOC
+
+
+dpi integer
+Set physical density of pixels, in dots per inch, unset by default
+
+dpm integer
+Set physical density of pixels, in dots per meter, unset by default
+
+
+
+
+
18.9 ProRes# TOC
+
+
Apple ProRes encoder.
+
+
FFmpeg contains 2 ProRes encoders, the prores-aw and prores-ks encoder.
+The used encoder can be chosen with the -vcodec
option.
+
+
+
18.9.1 Private Options for prores-ks# TOC
+
+
+profile integer
+Select the ProRes profile to encode
+
+‘proxy ’
+‘lt ’
+‘standard ’
+‘hq ’
+‘4444 ’
+
+
+
+quant_mat integer
+Select quantization matrix.
+
+‘auto ’
+‘default ’
+‘proxy ’
+‘lt ’
+‘standard ’
+‘hq ’
+
+If set to auto , the matrix matching the profile will be picked.
+If not set, the matrix providing the highest quality, default , will be
+picked.
+
+
+bits_per_mb integer
+How many bits to allot for coding one macroblock. Different profiles use
+between 200 and 2400 bits per macroblock, the maximum is 8000.
+
+
+mbs_per_slice integer
+Number of macroblocks in each slice (1-8); the default value (8)
+should be good in almost all situations.
+
+
+vendor string
+Override the 4-byte vendor ID.
+A custom vendor ID like apl0 would claim the stream was produced by
+the Apple encoder.
+
+
+alpha_bits integer
+Specify number of bits for alpha component.
+Possible values are 0 , 8 and 16 .
+Use 0 to disable alpha plane coding.
+
+
+
+
+
+
18.9.2 Speed considerations# TOC
+
+
In the default mode of operation the encoder has to honor frame constraints
+(i.e. not produce frames with size bigger than requested) while still making
+output picture as good as possible.
+A frame containing a lot of small details is harder to compress and the encoder
+would spend more time searching for appropriate quantizers for each slice.
+
+
Setting a higher bits_per_mb limit will improve the speed.
+
+
For the fastest encoding speed set the qscale parameter (4 is the
+recommended value) and do not set a size constraint.
+
+
+
+
19 Subtitles Encoders# TOC
+
+
+
19.1 dvdsub# TOC
+
+
This codec encodes the bitmap subtitle format that is used in DVDs.
+Typically they are stored in VOBSUB file pairs (*.idx + *.sub),
+and they can also be used in Matroska files.
+
+
+
19.1.1 Options# TOC
+
+
+even_rows_fix
+When set to 1, enable a work-around that makes the number of pixel rows
+even in all subtitles. This fixes a problem with some players that
+cut off the bottom row if the number is odd. The work-around just adds
+a fully transparent row if needed. The overhead is low, typically
+one byte per subtitle on average.
+
+By default, this work-around is disabled.
+
+
+
+
+
20 Bitstream Filters# TOC
+
+
When you configure your FFmpeg build, all the supported bitstream
+filters are enabled by default. You can list all available ones using
+the configure option --list-bsfs
.
+
+
You can disable all the bitstream filters using the configure option
+--disable-bsfs
, and selectively enable any bitstream filter using
+the option --enable-bsf=BSF
, or you can disable a particular
+bitstream filter using the option --disable-bsf=BSF
.
+
+
The option -bsfs
of the ff* tools will display the list of
+all the supported bitstream filters included in your build.
+
+
The ff* tools have a -bsf option applied per stream, taking a
+comma-separated list of filters, whose parameters follow the filter
+name after a ’=’.
+
+
+
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
+
+
+
Below is a description of the currently available bitstream filters,
+with their parameters, if any.
+
+
+
20.1 aac_adtstoasc# TOC
+
+
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
+bitstream filter.
+
+
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
+ADTS header and removes the ADTS header.
+
+
This is required for example when copying an AAC stream from a raw
+ADTS AAC container to a FLV or a MOV/MP4 file.
+
+
+
20.2 chomp# TOC
+
+
Remove zero padding at the end of a packet.
+
+
+
20.3 dump_extra# TOC
+
+
Add extradata to the beginning of the filtered packets.
+
+
The additional argument specifies which packets should be filtered.
+It accepts the values:
+
+‘a ’
+add extradata to all key packets, but only if local_header is
+set in the flags2 codec context field
+
+
+‘k ’
+add extradata to all key packets
+
+
+‘e ’
+add extradata to all packets
+
+
+
+
If not specified it is assumed ‘k ’.
+
+
For example the following ffmpeg
command forces a global
+header (thus disabling individual packet headers) in the H.264 packets
+generated by the libx264
encoder, but corrects them by adding
+the header stored in extradata to the key packets:
+
+
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
+
+
+
+
20.4 h264_mp4toannexb# TOC
+
+
Convert an H.264 bitstream from length prefixed mode to start code
+prefixed mode (as defined in the Annex B of the ITU-T H.264
+specification).
+
+
This is required by some streaming formats, typically the MPEG-2
+transport stream format ("mpegts").
+
+
For example to remux an MP4 file containing an H.264 stream to mpegts
+format with ffmpeg
, you can use the command:
+
+
+
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
+
+
+
+
20.5 imxdump# TOC
+
+
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
+Pro decoder. This filter only applies to the mpeg2video codec, and is
+likely not needed for Final Cut Pro 7 and newer with the appropriate
+-tag:v .
+
+
For example, to remux 30 MB/sec NTSC IMX to MOV:
+
+
+
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
+
+
+
+
20.6 mjpeg2jpeg# TOC
+
+
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
+
+
MJPEG is a video codec wherein each video frame is essentially a
+JPEG image. The individual frames can be extracted without loss,
+e.g. by
+
+
+
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
+
+
+
Unfortunately, these chunks are incomplete JPEG images, because
+they lack the DHT segment required for decoding. Quoting from
+http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml :
+
+
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
+commented that "MJPEG, or at least the MJPEG in AVIs having the
+MJPG fourcc, is restricted JPEG with a fixed – and *omitted* –
+Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
+and it must use basic Huffman encoding, not arithmetic or
+progressive. . . . You can indeed extract the MJPEG frames and
+decode them with a regular JPEG decoder, but you have to prepend
+the DHT segment to them, or else the decoder won’t have any idea
+how to decompress the data. The exact table necessary is given in
+the OpenDML spec."
+
+
This bitstream filter patches the header of frames extracted from an MJPEG
+stream (carrying the AVI1 header ID and lacking a DHT segment) to
+produce fully qualified JPEG images.
+
+
+
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
+exiftran -i -9 frame*.jpg
+ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
+
+
+
+
20.7 mjpega_dump_header# TOC
+
+
+
20.8 movsub# TOC
+
+
+
20.9 mp3_header_decompress# TOC
+
+
+
20.10 noise# TOC
+
+
Damages the contents of packets without damaging the container. Can be
+used for fuzzing or testing error resilience/concealment.
+
+
Parameters:
+A numeral string, whose value is related to how often output bytes will
+be modified. Therefore, values below or equal to 0 are forbidden, and
+the lower the more frequent bytes will be modified, with 1 meaning
+every byte is modified.
+
+
+
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
+
+
applies the modification to every byte.
+
+
+
20.11 remove_extra# TOC
+
+
+
21 Format Options# TOC
+
+
The libavformat library provides some generic global options, which
+can be set on all the muxers and demuxers. In addition each muxer or
+demuxer may support so-called private options, which are specific for
+that component.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, or by setting the value explicitly in the
+AVFormatContext
options or using the libavutil/opt.h API
+for programmatic use.
+
+
The list of supported options follows:
+
+
+avioflags flags (input/output )
+Possible values:
+
+‘direct ’
+Reduce buffering.
+
+
+
+
+probesize integer (input )
+Set probing size in bytes, i.e. the size of the data to analyze to get
+stream information. A higher value will allow to detect more
+information in case it is dispersed into the stream, but will increase
+latency. Must be an integer not lesser than 32. It is 5000000 by default.
+
+
+packetsize integer (output )
+Set packet size.
+
+
+fflags flags (input/output )
+Set format flags.
+
+Possible values:
+
+‘ignidx ’
+Ignore index.
+
+‘genpts ’
+Generate PTS.
+
+‘nofillin ’
+Do not fill in missing values that can be exactly calculated.
+
+‘noparse ’
+Disable AVParsers, this needs +nofillin
too.
+
+‘igndts ’
+Ignore DTS.
+
+‘discardcorrupt ’
+Discard corrupted frames.
+
+‘sortdts ’
+Try to interleave output packets by DTS.
+
+‘keepside ’
+Do not merge side data.
+
+‘latm ’
+Enable RTP MP4A-LATM payload.
+
+‘nobuffer ’
+Reduce the latency introduced by optional buffering
+
+‘bitexact ’
+Only write platform-, build- and time-independent data.
+This ensures that file and data checksums are reproducible and match between
+platforms. Its primary use is for regression testing.
+
+
+
+
+seek2any integer (input )
+Allow seeking to non-keyframes on demuxer level when supported if set to 1.
+Default is 0.
+
+
+analyzeduration integer (input )
+Specify how many microseconds are analyzed to probe the input. A
+higher value will allow to detect more accurate information, but will
+increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
+
+
+cryptokey hexadecimal string (input )
+Set decryption key.
+
+
+indexmem integer (input )
+Set max memory used for timestamp index (per stream).
+
+
+rtbufsize integer (input )
+Set max memory used for buffering real-time frames.
+
+
+fdebug flags (input/output )
+Print specific debug info.
+
+Possible values:
+
+‘ts ’
+
+
+
+max_delay integer (input/output )
+Set maximum muxing or demuxing delay in microseconds.
+
+
+fpsprobesize integer (input )
+Set number of frames used to probe fps.
+
+
+audio_preload integer (output )
+Set microseconds by which audio packets should be interleaved earlier.
+
+
+chunk_duration integer (output )
+Set microseconds for each chunk.
+
+
+chunk_size integer (output )
+Set size in bytes for each chunk.
+
+
+err_detect, f_err_detect flags (input )
+Set error detection flags. f_err_detect
is deprecated and
+should be used only via the ffmpeg
tool.
+
+Possible values:
+
+‘crccheck ’
+Verify embedded CRCs.
+
+‘bitstream ’
+Detect bitstream specification deviations.
+
+‘buffer ’
+Detect improper bitstream length.
+
+‘explode ’
+Abort decoding on minor error detection.
+
+‘careful ’
+Consider things that violate the spec and have not been seen in the
+wild as errors.
+
+‘compliant ’
+Consider all spec non compliancies as errors.
+
+‘aggressive ’
+Consider things that a sane encoder should not do as an error.
+
+
+
+
+use_wallclock_as_timestamps integer (input )
+Use wallclock as timestamps.
+
+
+avoid_negative_ts integer (output )
+
+Possible values:
+
+‘make_non_negative ’
+Shift timestamps to make them non-negative.
+Also note that this affects only leading negative timestamps, and not
+non-monotonic negative timestamps.
+
+‘make_zero ’
+Shift timestamps so that the first timestamp is 0.
+
+‘auto (default) ’
+Enables shifting when required by the target format.
+
+‘disabled ’
+Disables shifting of timestamp.
+
+
+
+When shifting is enabled, all output timestamps are shifted by the
+same amount. Audio, video, and subtitles desynching and relative
+timestamp differences are preserved compared to how they would have
+been without shifting.
+
+
+skip_initial_bytes integer (input )
+Set number of bytes to skip before reading header and frames if set to 1.
+Default is 0.
+
+
+correct_ts_overflow integer (input )
+Correct single timestamp overflows if set to 1. Default is 1.
+
+
+flush_packets integer (output )
+Flush the underlying I/O stream after each packet. Default 1 enables it, and
+has the effect of reducing the latency; 0 disables it and may slightly
+increase performance in some cases.
+
+
+output_ts_offset offset (output )
+Set the output time offset.
+
+offset must be a time duration specification,
+see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
+
+The offset is added by the muxer to the output timestamps.
+
+Specifying a positive offset means that the corresponding streams are
+delayed bt the time duration specified in offset . Default value
+is 0
(meaning that no offset is applied).
+
+
+format_whitelist list (input )
+"," separated List of allowed demuxers. By default all are allowed.
+
+
+dump_separator string (input )
+Separator used to separate the fields printed on the command line about the
+Stream parameters.
+For example to separate the fields with newlines and indention:
+
+
ffprobe -dump_separator "
+ " -i ~/videos/matrixbench_mpeg2.mpg
+
+
+
+
+
+
+
21.1 Format stream specifiers# TOC
+
+
Format stream specifiers allow selection of one or more streams that
+match specific properties.
+
+
Possible forms of stream specifiers are:
+
+stream_index
+Matches the stream with this index.
+
+
+stream_type [:stream_index ]
+stream_type is one of following: ’v’ for video, ’a’ for audio,
+’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If
+stream_index is given, then it matches the stream number
+stream_index of this type. Otherwise, it matches all streams of
+this type.
+
+
+p:program_id [:stream_index ]
+If stream_index is given, then it matches the stream with number
+stream_index in the program with the id
+program_id . Otherwise, it matches all streams in the program.
+
+
+#stream_id
+Matches the stream by a format-specific ID.
+
+
+
+
The exact semantics of stream specifiers is defined by the
+avformat_match_stream_specifier()
function declared in the
+libavformat/avformat.h header.
+
+
+
22 Demuxers# TOC
+
+
Demuxers are configured elements in FFmpeg that can read the
+multimedia streams from a particular type of file.
+
+
When you configure your FFmpeg build, all the supported demuxers
+are enabled by default. You can list all available ones using the
+configure option --list-demuxers
.
+
+
You can disable all the demuxers using the configure option
+--disable-demuxers
, and selectively enable a single demuxer with
+the option --enable-demuxer=DEMUXER
, or disable it
+with the option --disable-demuxer=DEMUXER
.
+
+
The option -formats
of the ff* tools will display the list of
+enabled demuxers.
+
+
The description of some of the currently available demuxers follows.
+
+
+
22.1 applehttp# TOC
+
+
Apple HTTP Live Streaming demuxer.
+
+
This demuxer presents all AVStreams from all variant streams.
+The id field is set to the bitrate variant index number. By setting
+the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay),
+the caller can decide which variant streams to actually receive.
+The total bitrate of the variant that the stream belongs to is
+available in a metadata key named "variant_bitrate".
+
+
+
22.2 apng# TOC
+
+
Animated Portable Network Graphics demuxer.
+
+
This demuxer is used to demux APNG files.
+All headers, but the PNG signature, up to (but not including) the first
+fcTL chunk are transmitted as extradata.
+Frames are then split as being all the chunks between two fcTL ones, or
+between the last fcTL and IEND chunks.
+
+
+-ignore_loop bool
+Ignore the loop variable in the file if set.
+
+-max_fps int
+Maximum framerate in frames per second (0 for no limit).
+
+-default_fps int
+Default framerate in frames per second when none is specified in the file
+(0 meaning as fast as possible).
+
+
+
+
+
22.3 asf# TOC
+
+
Advanced Systems Format demuxer.
+
+
This demuxer is used to demux ASF files and MMS network streams.
+
+
+-no_resync_search bool
+Do not try to resynchronize by looking for a certain optional start code.
+
+
+
+
+
22.4 concat# TOC
+
+
Virtual concatenation script demuxer.
+
+
This demuxer reads a list of files and other directives from a text file and
+demuxes them one after the other, as if all their packet had been muxed
+together.
+
+
The timestamps in the files are adjusted so that the first file starts at 0
+and each next file starts where the previous one finishes. Note that it is
+done globally and may cause gaps if all streams do not have exactly the same
+length.
+
+
All files must have the same streams (same codecs, same time base, etc.).
+
+
The duration of each file is used to adjust the timestamps of the next file:
+if the duration is incorrect (because it was computed using the bit-rate or
+because the file is truncated, for example), it can cause artifacts. The
+duration
directive can be used to override the duration stored in
+each file.
+
+
+
22.4.1 Syntax# TOC
+
+
The script is a text file in extended-ASCII, with one directive per line.
+Empty lines, leading spaces and lines starting with ’#’ are ignored. The
+following directive is recognized:
+
+
+file path
+Path to a file to read; special characters and spaces must be escaped with
+backslash or single quotes.
+
+All subsequent file-related directives apply to that file.
+
+
+ffconcat version 1.0
+Identify the script type and version. It also sets the safe option
+to 1 if it was to its default -1.
+
+To make FFmpeg recognize the format automatically, this directive must
+appears exactly as is (no extra space or byte-order-mark) on the very first
+line of the script.
+
+
+duration dur
+Duration of the file. This information can be specified from the file;
+specifying it here may be more efficient or help if the information from the
+file is not available or accurate.
+
+If the duration is set for all files, then it is possible to seek in the
+whole concatenated video.
+
+
+stream
+Introduce a stream in the virtual file.
+All subsequent stream-related directives apply to the last introduced
+stream.
+Some streams properties must be set in order to allow identifying the
+matching streams in the subfiles.
+If no streams are defined in the script, the streams from the first file are
+copied.
+
+
+exact_stream_id id
+Set the id of the stream.
+If this directive is given, the string with the corresponding id in the
+subfiles will be used.
+This is especially useful for MPEG-PS (VOB) files, where the order of the
+streams is not reliable.
+
+
+
+
+
+
22.4.2 Options# TOC
+
+
This demuxer accepts the following option:
+
+
+safe
+If set to 1, reject unsafe file paths. A file path is considered safe if it
+does not contain a protocol specification and is relative and all components
+only contain characters from the portable character set (letters, digits,
+period, underscore and hyphen) and have no period at the beginning of a
+component.
+
+If set to 0, any file name is accepted.
+
+The default is -1, it is equivalent to 1 if the format was automatically
+probed and 0 otherwise.
+
+
+auto_convert
+If set to 1, try to perform automatic conversions on packet data to make the
+streams concatenable.
+
+Currently, the only conversion is adding the h264_mp4toannexb bitstream
+filter to H.264 streams in MP4 format. This is necessary in particular if
+there are resolution changes.
+
+
+
+
+
+
22.5 flv# TOC
+
+
Adobe Flash Video Format demuxer.
+
+
This demuxer is used to demux FLV files and RTMP network streams.
+
+
+-flv_metadata bool
+Allocate the streams according to the onMetaData array content.
+
+
+
+
+
22.6 libgme# TOC
+
+
The Game Music Emu library is a collection of video game music file emulators.
+
+
See http://code.google.com/p/game-music-emu/ for more information.
+
+
Some files have multiple tracks. The demuxer will pick the first track by
+default. The track_index option can be used to select a different
+track. Track indexes start at 0. The demuxer exports the number of tracks as
+tracks meta data entry.
+
+
For very large files, the max_size option may have to be adjusted.
+
+
+
22.7 libquvi# TOC
+
+
Play media from Internet services using the quvi project.
+
+
The demuxer accepts a format option to request a specific quality. It
+is by default set to best .
+
+
See http://quvi.sourceforge.net/ for more information.
+
+
FFmpeg needs to be built with --enable-libquvi
for this demuxer to be
+enabled.
+
+
+
22.8 gif# TOC
+
+
Animated GIF demuxer.
+
+
It accepts the following options:
+
+
+min_delay
+Set the minimum valid delay between frames in hundredths of seconds.
+Range is 0 to 6000. Default value is 2.
+
+
+default_delay
+Set the default delay between frames in hundredths of seconds.
+Range is 0 to 6000. Default value is 10.
+
+
+ignore_loop
+GIF files can contain information to loop a certain number of times (or
+infinitely). If ignore_loop is set to 1, then the loop setting
+from the input will be ignored and looping will not occur. If set to 0,
+then looping will occur and will cycle the number of times according to
+the GIF. Default value is 1.
+
+
+
+
For example, with the overlay filter, place an infinitely looping GIF
+over another video:
+
+
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
+
+
+
Note that in the above example the shortest option for overlay filter is
+used to end the output video at the length of the shortest input file,
+which in this case is input.mp4 as the GIF in this example loops
+infinitely.
+
+
+
22.9 image2# TOC
+
+
Image file demuxer.
+
+
This demuxer reads from a list of image files specified by a pattern.
+The syntax and meaning of the pattern is specified by the
+option pattern_type .
+
+
The pattern may contain a suffix which is used to automatically
+determine the format of the images contained in the files.
+
+
The size, the pixel format, and the format of each image must be the
+same for all the files in the sequence.
+
+
This demuxer accepts the following options:
+
+framerate
+Set the frame rate for the video stream. It defaults to 25.
+
+loop
+If set to 1, loop over the input. Default value is 0.
+
+pattern_type
+Select the pattern type used to interpret the provided filename.
+
+pattern_type accepts one of the following values.
+
+sequence
+Select a sequence pattern type, used to specify a sequence of files
+indexed by sequential numbers.
+
+A sequence pattern may contain the string "%d" or "%0N d", which
+specifies the position of the characters representing a sequential
+number in each filename matched by the pattern. If the form
+"%d0N d" is used, the string representing the number in each
+filename is 0-padded and N is the total number of 0-padded
+digits representing the number. The literal character ’%’ can be
+specified in the pattern with the string "%%".
+
+If the sequence pattern contains "%d" or "%0N d", the first filename of
+the file list specified by the pattern must contain a number
+inclusively contained between start_number and
+start_number +start_number_range -1, and all the following
+numbers must be sequential.
+
+For example the pattern "img-%03d.bmp" will match a sequence of
+filenames of the form img-001.bmp , img-002.bmp , ...,
+img-010.bmp , etc.; the pattern "i%%m%%g-%d.jpg" will match a
+sequence of filenames of the form i%m%g-1.jpg ,
+i%m%g-2.jpg , ..., i%m%g-10.jpg , etc.
+
+Note that the pattern must not necessarily contain "%d" or
+"%0N d", for example to convert a single image file
+img.jpeg you can employ the command:
+
+
ffmpeg -i img.jpeg img.png
+
+
+
+glob
+Select a glob wildcard pattern type.
+
+The pattern is interpreted like a glob()
pattern. This is only
+selectable if libavformat was compiled with globbing support.
+
+
+glob_sequence (deprecated, will be removed)
+Select a mixed glob wildcard/sequence pattern.
+
+If your version of libavformat was compiled with globbing support, and
+the provided pattern contains at least one glob meta character among
+%*?[]{}
that is preceded by an unescaped "%", the pattern is
+interpreted like a glob()
pattern, otherwise it is interpreted
+like a sequence pattern.
+
+All glob special characters %*?[]{}
must be prefixed
+with "%". To escape a literal "%" you shall use "%%".
+
+For example the pattern foo-%*.jpeg
will match all the
+filenames prefixed by "foo-" and terminating with ".jpeg", and
+foo-%?%?%?.jpeg
will match all the filenames prefixed with
+"foo-", followed by a sequence of three characters, and terminating
+with ".jpeg".
+
+This pattern type is deprecated in favor of glob and
+sequence .
+
+
+
+Default value is glob_sequence .
+
+pixel_format
+Set the pixel format of the images to read. If not specified the pixel
+format is guessed from the first image file in the sequence.
+
+start_number
+Set the index of the file matched by the image file pattern to start
+to read from. Default value is 0.
+
+start_number_range
+Set the index interval range to check when looking for the first image
+file in the sequence, starting from start_number . Default value
+is 5.
+
+ts_from_file
+If set to 1, will set frame timestamp to modification time of image file. Note
+that monotonity of timestamps is not provided: images go in the same order as
+without this option. Default value is 0.
+If set to 2, will set frame timestamp to the modification time of the image file in
+nanosecond precision.
+
+video_size
+Set the video size of the images to read. If not specified the video
+size is guessed from the first image file in the sequence.
+
+
+
+
+
22.9.1 Examples# TOC
+
+
+ Use ffmpeg
for creating a video from the images in the file
+sequence img-001.jpeg , img-002.jpeg , ..., assuming an
+input frame rate of 10 frames per second:
+
+
ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
+
+
+ As above, but start by reading from a file with index 100 in the sequence:
+
+
ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
+
+
+ Read images matching the "*.png" glob pattern , that is all the files
+terminating with the ".png" suffix:
+
+
ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
+
+
+
+
+
22.10 mpegts# TOC
+
+
MPEG-2 transport stream demuxer.
+
+
+fix_teletext_pts
+Overrides teletext packet PTS and DTS values with the timestamps calculated
+from the PCR of the first program which the teletext stream is part of and is
+not discarded. Default value is 1, set this option to 0 if you want your
+teletext packet PTS and DTS values untouched.
+
+
+
+
+
22.11 rawvideo# TOC
+
+
Raw video demuxer.
+
+
This demuxer allows one to read raw video data. Since there is no header
+specifying the assumed video parameters, the user must specify them
+in order to be able to decode the data correctly.
+
+
This demuxer accepts the following options:
+
+framerate
+Set input video frame rate. Default value is 25.
+
+
+pixel_format
+Set the input video pixel format. Default value is yuv420p
.
+
+
+video_size
+Set the input video size. This value must be specified explicitly.
+
+
+
+
For example to read a rawvideo file input.raw with
+ffplay
, assuming a pixel format of rgb24
, a video
+size of 320x240
, and a frame rate of 10 images per second, use
+the command:
+
+
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
+
+
+
+
22.12 sbg# TOC
+
+
SBaGen script demuxer.
+
+
This demuxer reads the script language used by SBaGen
+http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG
+script looks like that:
+
+
-SE
+a: 300-2.5/3 440+4.5/0
+b: 300-2.5/0 440+4.5/3
+off: -
+NOW == a
++0:07:00 == b
++0:14:00 == a
++0:21:00 == b
++0:30:00 off
+
+
+
A SBG script can mix absolute and relative timestamps. If the script uses
+either only absolute timestamps (including the script start time) or only
+relative ones, then its layout is fixed, and the conversion is
+straightforward. On the other hand, if the script mixes both kind of
+timestamps, then the NOW reference for relative timestamps will be
+taken from the current time of day at the time the script is read, and the
+script layout will be frozen according to that reference. That means that if
+the script is directly played, the actual times will match the absolute
+timestamps up to the sound controller’s clock accuracy, but if the user
+somehow pauses the playback or seeks, all times will be shifted accordingly.
+
+
+
22.13 tedcaptions# TOC
+
+
JSON captions used for TED Talks .
+
+
TED does not provide links to the captions, but they can be guessed from the
+page. The file tools/bookmarklets.html from the FFmpeg source tree
+contains a bookmarklet to expose them.
+
+
This demuxer accepts the following option:
+
+start_time
+Set the start time of the TED talk, in milliseconds. The default is 15000
+(15s). It is used to sync the captions with the downloadable videos, because
+they include a 15s intro.
+
+
+
+
Example: convert the captions to a format most players understand:
+
+
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
+
+
+
+
23 Muxers# TOC
+
+
Muxers are configured elements in FFmpeg which allow writing
+multimedia streams to a particular type of file.
+
+
When you configure your FFmpeg build, all the supported muxers
+are enabled by default. You can list all available muxers using the
+configure option --list-muxers
.
+
+
You can disable all the muxers with the configure option
+--disable-muxers
and selectively enable / disable single muxers
+with the options --enable-muxer=MUXER
/
+--disable-muxer=MUXER
.
+
+
The option -formats
of the ff* tools will display the list of
+enabled muxers.
+
+
A description of some of the currently available muxers follows.
+
+
+
23.1 aiff# TOC
+
+
Audio Interchange File Format muxer.
+
+
+
23.1.1 Options# TOC
+
+
It accepts the following options:
+
+
+write_id3v2
+Enable ID3v2 tags writing when set to 1. Default is 0 (disabled).
+
+
+id3v2_version
+Select ID3v2 version to write. Currently only version 3 and 4 (aka.
+ID3v2.3 and ID3v2.4) are supported. The default is version 4.
+
+
+
+
+
+
23.2 crc# TOC
+
+
CRC (Cyclic Redundancy Check) testing format.
+
+
This muxer computes and prints the Adler-32 CRC of all the input audio
+and video frames. By default audio frames are converted to signed
+16-bit raw audio and video frames to raw video before computing the
+CRC.
+
+
The output of the muxer consists of a single line of the form:
+CRC=0xCRC , where CRC is a hexadecimal number 0-padded to
+8 digits containing the CRC for all the decoded input frames.
+
+
See also the framecrc muxer.
+
+
+
23.2.1 Examples# TOC
+
+
For example to compute the CRC of the input, and store it in the file
+out.crc :
+
+
ffmpeg -i INPUT -f crc out.crc
+
+
+
You can print the CRC to stdout with the command:
+
+
ffmpeg -i INPUT -f crc -
+
+
+
You can select the output format of each frame with ffmpeg
by
+specifying the audio and video codec and format. For example to
+compute the CRC of the input audio converted to PCM unsigned 8-bit
+and the input video converted to MPEG-2 video, use the command:
+
+
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f crc -
+
+
+
+
23.3 framecrc# TOC
+
+
Per-packet CRC (Cyclic Redundancy Check) testing format.
+
+
This muxer computes and prints the Adler-32 CRC for each audio
+and video packet. By default audio frames are converted to signed
+16-bit raw audio and video frames to raw video before computing the
+CRC.
+
+
The output of the muxer consists of a line for each audio and video
+packet of the form:
+
+
stream_index , packet_dts , packet_pts , packet_duration , packet_size , 0xCRC
+
+
+
CRC is a hexadecimal number 0-padded to 8 digits containing the
+CRC of the packet.
+
+
+
23.3.1 Examples# TOC
+
+
For example to compute the CRC of the audio and video frames in
+INPUT , converted to raw audio and video packets, and store it
+in the file out.crc :
+
+
ffmpeg -i INPUT -f framecrc out.crc
+
+
+
To print the information to stdout, use the command:
+
+
ffmpeg -i INPUT -f framecrc -
+
+
+
With ffmpeg
, you can select the output format to which the
+audio and video frames are encoded before computing the CRC for each
+packet by specifying the audio and video codec. For example, to
+compute the CRC of each decoded input audio frame converted to PCM
+unsigned 8-bit and of each decoded input video frame converted to
+MPEG-2 video, use the command:
+
+
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc -
+
+
+
See also the crc muxer.
+
+
+
23.4 framemd5# TOC
+
+
Per-packet MD5 testing format.
+
+
This muxer computes and prints the MD5 hash for each audio
+and video packet. By default audio frames are converted to signed
+16-bit raw audio and video frames to raw video before computing the
+hash.
+
+
The output of the muxer consists of a line for each audio and video
+packet of the form:
+
+
stream_index , packet_dts , packet_pts , packet_duration , packet_size , MD5
+
+
+
MD5 is a hexadecimal number representing the computed MD5 hash
+for the packet.
+
+
+
23.4.1 Examples# TOC
+
+
For example to compute the MD5 of the audio and video frames in
+INPUT , converted to raw audio and video packets, and store it
+in the file out.md5 :
+
+
ffmpeg -i INPUT -f framemd5 out.md5
+
+
+
To print the information to stdout, use the command:
+
+
ffmpeg -i INPUT -f framemd5 -
+
+
+
See also the md5 muxer.
+
+
+
23.5 gif# TOC
+
+
Animated GIF muxer.
+
+
It accepts the following options:
+
+
+loop
+Set the number of times to loop the output. Use -1
for no loop, 0
+for looping indefinitely (default).
+
+
+final_delay
+Force the delay (expressed in centiseconds) after the last frame. Each frame
+ends with a delay until the next frame. The default is -1
, which is a
+special value to tell the muxer to re-use the previous delay. In case of a
+loop, you might want to customize this value to mark a pause for instance.
+
+
+
+
For example, to encode a gif looping 10 times, with a 5 seconds delay between
+the loops:
+
+
ffmpeg -i INPUT -loop 10 -final_delay 500 out.gif
+
+
+
Note 1: if you wish to extract the frames in separate GIF files, you need to
+force the image2 muxer:
+
+
ffmpeg -i INPUT -c:v gif -f image2 "out%d.gif"
+
+
+
Note 2: the GIF format has a very small time base: the delay between two frames
+can not be smaller than one centi second.
+
+
+
23.6 hls# TOC
+
+
Apple HTTP Live Streaming muxer that segments MPEG-TS according to
+the HTTP Live Streaming (HLS) specification.
+
+
It creates a playlist file, and one or more segment files. The output filename
+specifies the playlist filename.
+
+
By default, the muxer creates a file for each segment produced. These files
+have the same name as the playlist, followed by a sequential number and a
+.ts extension.
+
+
For example, to convert an input file with ffmpeg
:
+
+
ffmpeg -i in.nut out.m3u8
+
+
This example will produce the playlist, out.m3u8 , and segment files:
+out0.ts , out1.ts , out2.ts , etc.
+
+
See also the segment muxer, which provides a more generic and
+flexible implementation of a segmenter, and can be used to perform HLS
+segmentation.
+
+
+
23.6.1 Options# TOC
+
+
This muxer supports the following options:
+
+
+hls_time seconds
+Set the segment length in seconds. Default value is 2.
+
+
+hls_list_size size
+Set the maximum number of playlist entries. If set to 0 the list file
+will contain all the segments. Default value is 5.
+
+
+hls_ts_options options_list
+Set output format options using a :-separated list of key=value
+parameters. Values containing :
special characters must be
+escaped.
+
+
+hls_wrap wrap
+Set the number after which the segment filename number (the number
+specified in each segment file) wraps. If set to 0 the number will be
+never wrapped. Default value is 0.
+
+This option is useful to avoid to fill the disk with many segment
+files, and limits the maximum number of segment files written to disk
+to wrap .
+
+
+start_number number
+Start the playlist sequence number from number . Default value is
+0.
+
+
+hls_allow_cache allowcache
+Explicitly set whether the client MAY (1) or MUST NOT (0) cache media segments.
+
+
+hls_base_url baseurl
+Append baseurl to every entry in the playlist.
+Useful to generate playlists with absolute paths.
+
+Note that the playlist sequence number must be unique for each segment
+and it is not to be confused with the segment filename sequence number
+which can be cyclic, for example if the wrap option is
+specified.
+
+
+hls_segment_filename filename
+Set the segment filename. Unless hls_flags single_file is set filename
+is used as a string format with the segment number:
+
+
ffmpeg in.nut -hls_segment_filename 'file%03d.ts' out.m3u8
+
+This example will produce the playlist, out.m3u8 , and segment files:
+file000.ts , file001.ts , file002.ts , etc.
+
+
+hls_flags single_file
+If this flag is set, the muxer will store all segments in a single MPEG-TS
+file, and will use byte ranges in the playlist. HLS playlists generated with
+this way will have the version number 4.
+For example:
+
+
ffmpeg -i in.nut -hls_flags single_file out.m3u8
+
+Will produce the playlist, out.m3u8 , and a single segment file,
+out.ts .
+
+
+hls_flags delete_segments
+Segment files removed from the playlist are deleted after a period of time
+equal to the duration of the segment plus the duration of the playlist.
+
+
+
+
+
23.7 ico# TOC
+
+
ICO file muxer.
+
+
Microsoft’s icon file format (ICO) has some strict limitations that should be noted:
+
+
+ Size cannot exceed 256 pixels in any dimension
+
+ Only BMP and PNG images can be stored
+
+ If a BMP image is used, it must be one of the following pixel formats:
+
+
BMP Bit Depth FFmpeg Pixel Format
+1bit pal8
+4bit pal8
+8bit pal8
+16bit rgb555le
+24bit bgr24
+32bit bgra
+
+
+ If a BMP image is used, it must use the BITMAPINFOHEADER DIB header
+
+ If a PNG image is used, it must use the rgba pixel format
+
+
+
+
23.8 image2# TOC
+
+
Image file muxer.
+
+
The image file muxer writes video frames to image files.
+
+
The output filenames are specified by a pattern, which can be used to
+produce sequentially numbered series of files.
+The pattern may contain the string "%d" or "%0N d", this string
+specifies the position of the characters representing a numbering in
+the filenames. If the form "%0N d" is used, the string
+representing the number in each filename is 0-padded to N
+digits. The literal character ’%’ can be specified in the pattern with
+the string "%%".
+
+
If the pattern contains "%d" or "%0N d", the first filename of
+the file list specified will contain the number 1, all the following
+numbers will be sequential.
+
+
The pattern may contain a suffix which is used to automatically
+determine the format of the image files to write.
+
+
For example the pattern "img-%03d.bmp" will specify a sequence of
+filenames of the form img-001.bmp , img-002.bmp , ...,
+img-010.bmp , etc.
+The pattern "img%%-%d.jpg" will specify a sequence of filenames of the
+form img%-1.jpg , img%-2.jpg , ..., img%-10.jpg ,
+etc.
+
+
+
23.8.1 Examples# TOC
+
+
The following example shows how to use ffmpeg
for creating a
+sequence of files img-001.jpeg , img-002.jpeg , ...,
+taking one image every second from the input video:
+
+
ffmpeg -i in.avi -vsync 1 -r 1 -f image2 'img-%03d.jpeg'
+
+
+
Note that with ffmpeg
, if the format is not specified with the
+-f
option and the output filename specifies an image file
+format, the image2 muxer is automatically selected, so the previous
+command can be written as:
+
+
ffmpeg -i in.avi -vsync 1 -r 1 'img-%03d.jpeg'
+
+
+
Note also that the pattern must not necessarily contain "%d" or
+"%0N d", for example to create a single image file
+img.jpeg from the input video you can employ the command:
+
+
ffmpeg -i in.avi -f image2 -frames:v 1 img.jpeg
+
+
+
The strftime option allows you to expand the filename with
+date and time information. Check the documentation of
+the strftime()
function for the syntax.
+
+
For example to generate image files from the strftime()
+"%Y-%m-%d_%H-%M-%S" pattern, the following ffmpeg
command
+can be used:
+
+
ffmpeg -f v4l2 -r 1 -i /dev/video0 -f image2 -strftime 1 "%Y-%m-%d_%H-%M-%S.jpg"
+
+
+
+
23.8.2 Options# TOC
+
+
+start_number
+Start the sequence from the specified number. Default value is 1. Must
+be a non-negative number.
+
+
+update
+If set to 1, the filename will always be interpreted as just a
+filename, not a pattern, and the corresponding file will be continuously
+overwritten with new images. Default value is 0.
+
+
+strftime
+If set to 1, expand the filename with date and time information from
+strftime()
. Default value is 0.
+
+
+
+
The image muxer supports the .Y.U.V image file format. This format is
+special in that that each image frame consists of three files, for
+each of the YUV420P components. To read or write this image file format,
+specify the name of the ’.Y’ file. The muxer will automatically open the
+’.U’ and ’.V’ files as required.
+
+
+
23.9 matroska# TOC
+
+
Matroska container muxer.
+
+
This muxer implements the matroska and webm container specs.
+
+
+
23.9.1 Metadata# TOC
+
+
The recognized metadata settings in this muxer are:
+
+
+title
+Set title name provided to a single track.
+
+
+language
+Specify the language of the track in the Matroska languages form.
+
+The language can be either the 3 letters bibliographic ISO-639-2 (ISO
+639-2/B) form (like "fre" for French), or a language code mixed with a
+country code for specialities in languages (like "fre-ca" for Canadian
+French).
+
+
+stereo_mode
+Set stereo 3D video layout of two views in a single video track.
+
+The following values are recognized:
+
+‘mono ’
+video is not stereo
+
+‘left_right ’
+Both views are arranged side by side, Left-eye view is on the left
+
+‘bottom_top ’
+Both views are arranged in top-bottom orientation, Left-eye view is at bottom
+
+‘top_bottom ’
+Both views are arranged in top-bottom orientation, Left-eye view is on top
+
+‘checkerboard_rl ’
+Each view is arranged in a checkerboard interleaved pattern, Left-eye view being first
+
+‘checkerboard_lr ’
+Each view is arranged in a checkerboard interleaved pattern, Right-eye view being first
+
+‘row_interleaved_rl ’
+Each view is constituted by a row based interleaving, Right-eye view is first row
+
+‘row_interleaved_lr ’
+Each view is constituted by a row based interleaving, Left-eye view is first row
+
+‘col_interleaved_rl ’
+Both views are arranged in a column based interleaving manner, Right-eye view is first column
+
+‘col_interleaved_lr ’
+Both views are arranged in a column based interleaving manner, Left-eye view is first column
+
+‘anaglyph_cyan_red ’
+All frames are in anaglyph format viewable through red-cyan filters
+
+‘right_left ’
+Both views are arranged side by side, Right-eye view is on the left
+
+‘anaglyph_green_magenta ’
+All frames are in anaglyph format viewable through green-magenta filters
+
+‘block_lr ’
+Both eyes laced in one Block, Left-eye view is first
+
+‘block_rl ’
+Both eyes laced in one Block, Right-eye view is first
+
+
+
+
+
+
For example a 3D WebM clip can be created using the following command line:
+
+
ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm
+
+
+
+
23.9.2 Options# TOC
+
+
This muxer supports the following options:
+
+
+reserve_index_space
+By default, this muxer writes the index for seeking (called cues in Matroska
+terms) at the end of the file, because it cannot know in advance how much space
+to leave for the index at the beginning of the file. However for some use cases
+– e.g. streaming where seeking is possible but slow – it is useful to put the
+index at the beginning of the file.
+
+If this option is set to a non-zero value, the muxer will reserve a given amount
+of space in the file header and then try to write the cues there when the muxing
+finishes. If the available space does not suffice, muxing will fail. A safe size
+for most use cases should be about 50kB per hour of video.
+
+Note that cues are only written if the output is seekable and this option will
+have no effect if it is not.
+
+
+
+
+
23.10 md5# TOC
+
+
MD5 testing format.
+
+
This muxer computes and prints the MD5 hash of all the input audio
+and video frames. By default audio frames are converted to signed
+16-bit raw audio and video frames to raw video before computing the
+hash.
+
+
The output of the muxer consists of a single line of the form:
+MD5=MD5 , where MD5 is a hexadecimal number representing
+the computed MD5 hash.
+
+
For example to compute the MD5 hash of the input converted to raw
+audio and video, and store it in the file out.md5 :
+
+
ffmpeg -i INPUT -f md5 out.md5
+
+
+
You can print the MD5 to stdout with the command:
+
+
ffmpeg -i INPUT -f md5 -
+
+
+
See also the framemd5 muxer.
+
+
+
23.11 mov, mp4, ismv# TOC
+
+
MOV/MP4/ISMV (Smooth Streaming) muxer.
+
+
The mov/mp4/ismv muxer supports fragmentation. Normally, a MOV/MP4
+file has all the metadata about all packets stored in one location
+(written at the end of the file, it can be moved to the start for
+better playback by adding faststart to the movflags , or
+using the qt-faststart
tool). A fragmented
+file consists of a number of fragments, where packets and metadata
+about these packets are stored together. Writing a fragmented
+file has the advantage that the file is decodable even if the
+writing is interrupted (while a normal MOV/MP4 is undecodable if
+it is not properly finished), and it requires less memory when writing
+very long files (since writing normal MOV/MP4 files stores info about
+every single packet in memory until the file is closed). The downside
+is that it is less compatible with other applications.
+
+
+
23.11.1 Options# TOC
+
+
Fragmentation is enabled by setting one of the AVOptions that define
+how to cut the file into fragments:
+
+
+-moov_size bytes
+Reserves space for the moov atom at the beginning of the file instead of placing the
+moov atom at the end. If the space reserved is insufficient, muxing will fail.
+
+-movflags frag_keyframe
+Start a new fragment at each video keyframe.
+
+-frag_duration duration
+Create fragments that are duration microseconds long.
+
+-frag_size size
+Create fragments that contain up to size bytes of payload data.
+
+-movflags frag_custom
+Allow the caller to manually choose when to cut fragments, by
+calling av_write_frame(ctx, NULL)
to write a fragment with
+the packets written so far. (This is only useful with other
+applications integrating libavformat, not from ffmpeg
.)
+
+-min_frag_duration duration
+Don’t create fragments that are shorter than duration microseconds long.
+
+
+
+
If more than one condition is specified, fragments are cut when
+one of the specified conditions is fulfilled. The exception to this is
+-min_frag_duration
, which has to be fulfilled for any of the other
+conditions to apply.
+
+
Additionally, the way the output file is written can be adjusted
+through a few other options:
+
+
+-movflags empty_moov
+Write an initial moov atom directly at the start of the file, without
+describing any samples in it. Generally, an mdat/moov pair is written
+at the start of the file, as a normal MOV/MP4 file, containing only
+a short portion of the file. With this option set, there is no initial
+mdat atom, and the moov atom only describes the tracks but has
+a zero duration.
+
+This option is implicitly set when writing ismv (Smooth Streaming) files.
+
+-movflags separate_moof
+Write a separate moof (movie fragment) atom for each track. Normally,
+packets for all tracks are written in a moof atom (which is slightly
+more efficient), but with this option set, the muxer writes one moof/mdat
+pair for each track, making it easier to separate tracks.
+
+This option is implicitly set when writing ismv (Smooth Streaming) files.
+
+-movflags faststart
+Run a second pass moving the index (moov atom) to the beginning of the file.
+This operation can take a while, and will not work in various situations such
+as fragmented output, thus it is not enabled by default.
+
+-movflags rtphint
+Add RTP hinting tracks to the output file.
+
+-movflags disable_chpl
+Disable Nero chapter markers (chpl atom). Normally, both Nero chapters
+and a QuickTime chapter track are written to the file. With this option
+set, only the QuickTime chapter track will be written. Nero chapters can
+cause failures when the file is reprocessed with certain tagging programs, like
+mp3Tag 2.61a and iTunes 11.3, most likely other versions are affected as well.
+
+-movflags omit_tfhd_offset
+Do not write any absolute base_data_offset in tfhd atoms. This avoids
+tying fragments to absolute byte positions in the file/streams.
+
+-movflags default_base_moof
+Similarly to the omit_tfhd_offset, this flag avoids writing the
+absolute base_data_offset field in tfhd atoms, but does so by using
+the new default-base-is-moof flag instead. This flag is new from
+14496-12:2012. This may make the fragments easier to parse in certain
+circumstances (avoiding basing track fragment location calculations
+on the implicit end of the previous track fragment).
+
+
+
+
+
23.11.2 Example# TOC
+
+
Smooth Streaming content can be pushed in real time to a publishing
+point on IIS with this muxer. Example:
+
+
ffmpeg -re <normal input/transcoding options> -movflags isml+frag_keyframe -f ismv http://server/publishingpoint.isml/Streams(Encoder1)
+
+
+
+
23.12 mp3# TOC
+
+
The MP3 muxer writes a raw MP3 stream with the following optional features:
+
+ An ID3v2 metadata header at the beginning (enabled by default). Versions 2.3 and
+2.4 are supported, the id3v2_version
private option controls which one is
+used (3 or 4). Setting id3v2_version
to 0 disables the ID3v2 header
+completely.
+
+The muxer supports writing attached pictures (APIC frames) to the ID3v2 header.
+The pictures are supplied to the muxer in form of a video stream with a single
+packet. There can be any number of those streams, each will correspond to a
+single APIC frame. The stream metadata tags title and comment map
+to APIC description and picture type respectively. See
+http://id3.org/id3v2.4.0-frames for allowed picture types.
+
+Note that the APIC frames must be written at the beginning, so the muxer will
+buffer the audio frames until it gets all the pictures. It is therefore advised
+to provide the pictures as soon as possible to avoid excessive buffering.
+
+ A Xing/LAME frame right after the ID3v2 header (if present). It is enabled by
+default, but will be written only if the output is seekable. The
+write_xing
private option can be used to disable it. The frame contains
+various information that may be useful to the decoder, like the audio duration
+or encoder delay.
+
+ A legacy ID3v1 tag at the end of the file (disabled by default). It may be
+enabled with the write_id3v1
private option, but as its capabilities are
+very limited, its usage is not recommended.
+
+
+
Examples:
+
+
Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
+
+
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
+
+
+
To attach a picture to an mp3 file select both the audio and the picture stream
+with map
:
+
+
ffmpeg -i input.mp3 -i cover.png -c copy -map 0 -map 1
+-metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3
+
+
+
Write a "clean" MP3 without any extra features:
+
+
ffmpeg -i input.wav -write_xing 0 -id3v2_version 0 out.mp3
+
+
+
+
23.13 mpegts# TOC
+
+
MPEG transport stream muxer.
+
+
This muxer implements ISO 13818-1 and part of ETSI EN 300 468.
+
+
The recognized metadata settings in mpegts muxer are service_provider
+and service_name
. If they are not set the default for
+service_provider
is "FFmpeg" and the default for
+service_name
is "Service01".
+
+
+
23.13.1 Options# TOC
+
+
The muxer options are:
+
+
+-mpegts_original_network_id number
+Set the original_network_id (default 0x0001). This is unique identifier
+of a network in DVB. Its main use is in the unique identification of a
+service through the path Original_Network_ID, Transport_Stream_ID.
+
+-mpegts_transport_stream_id number
+Set the transport_stream_id (default 0x0001). This identifies a
+transponder in DVB.
+
+-mpegts_service_id number
+Set the service_id (default 0x0001) also known as program in DVB.
+
+-mpegts_pmt_start_pid number
+Set the first PID for PMT (default 0x1000, max 0x1f00).
+
+-mpegts_start_pid number
+Set the first PID for data packets (default 0x0100, max 0x0f00).
+
+-mpegts_m2ts_mode number
+Enable m2ts mode if set to 1. Default value is -1 which disables m2ts mode.
+
+-muxrate number
+Set a constant muxrate (default VBR).
+
+-pcr_period numer
+Override the default PCR retransmission time (default 20ms), ignored
+if variable muxrate is selected.
+
+-pes_payload_size number
+Set minimum PES packet payload in bytes.
+
+-mpegts_flags flags
+Set flags (see below).
+
+-mpegts_copyts number
+Preserve original timestamps, if value is set to 1. Default value is -1, which
+results in shifting timestamps so that they start from 0.
+
+-tables_version number
+Set PAT, PMT and SDT version (default 0, valid values are from 0 to 31, inclusively).
+This option allows updating stream structure so that standard consumer may
+detect the change. To do so, reopen output AVFormatContext (in case of API
+usage) or restart ffmpeg instance, cyclically changing tables_version value:
+
+
ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
+ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
+...
+ffmpeg -i source3.ts -codec copy -f mpegts -tables_version 31 udp://1.1.1.1:1111
+ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
+ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
+...
+
+
+
+
+
Option mpegts_flags may take a set of such flags:
+
+
+resend_headers
+Reemit PAT/PMT before writing the next packet.
+
+latm
+Use LATM packetization for AAC.
+
+
+
+
+
23.13.2 Example# TOC
+
+
+
ffmpeg -i file.mpg -c copy \
+ -mpegts_original_network_id 0x1122 \
+ -mpegts_transport_stream_id 0x3344 \
+ -mpegts_service_id 0x5566 \
+ -mpegts_pmt_start_pid 0x1500 \
+ -mpegts_start_pid 0x150 \
+ -metadata service_provider="Some provider" \
+ -metadata service_name="Some Channel" \
+ -y out.ts
+
+
+
+
23.14 null# TOC
+
+
Null muxer.
+
+
This muxer does not generate any output file, it is mainly useful for
+testing or benchmarking purposes.
+
+
For example to benchmark decoding with ffmpeg
you can use the
+command:
+
+
ffmpeg -benchmark -i INPUT -f null out.null
+
+
+
Note that the above command does not read or write the out.null
+file, but specifying the output file is required by the ffmpeg
+syntax.
+
+
Alternatively you can write the command as:
+
+
ffmpeg -benchmark -i INPUT -f null -
+
+
+
+
23.15 nut# TOC
+
+
+-syncpoints flags
+Change the syncpoint usage in nut:
+
+default use the normal low-overhead seeking aids.
+none do not use the syncpoints at all, reducing the overhead but making the stream non-seekable;
+Use of this option is not recommended, as the resulting files are very damage
+ sensitive and seeking is not possible. Also in general the overhead from
+ syncpoints is negligible. Note, -write_index
0 can be used to disable
+ all growing data tables, allowing to mux endless streams with limited memory
+ and without these disadvantages.
+
+timestamped extend the syncpoint with a wallclock field.
+
+The none and timestamped flags are experimental.
+
+-write_index bool
+Write index at the end, the default is to write an index.
+
+
+
+
+
ffmpeg -i INPUT -f_strict experimental -syncpoints none - | processor
+
+
+
+
23.16 ogg# TOC
+
+
Ogg container muxer.
+
+
+-page_duration duration
+Preferred page duration, in microseconds. The muxer will attempt to create
+pages that are approximately duration microseconds long. This allows the
+user to compromise between seek granularity and container overhead. The default
+is 1 second. A value of 0 will fill all segments, making pages as large as
+possible. A value of 1 will effectively use 1 packet-per-page in most
+situations, giving a small seek granularity at the cost of additional container
+overhead.
+
+
+
+
+
23.17 segment, stream_segment, ssegment# TOC
+
+
Basic stream segmenter.
+
+
This muxer outputs streams to a number of separate files of nearly
+fixed duration. Output filename pattern can be set in a fashion similar to
+image2 .
+
+
stream_segment
is a variant of the muxer used to write to
+streaming output formats, i.e. which do not require global headers,
+and is recommended for outputting e.g. to MPEG transport stream segments.
+ssegment
is a shorter alias for stream_segment
.
+
+
Every segment starts with a keyframe of the selected reference stream,
+which is set through the reference_stream option.
+
+
Note that if you want accurate splitting for a video file, you need to
+make the input key frames correspond to the exact splitting times
+expected by the segmenter, or the segment muxer will start the new
+segment with the key frame found next after the specified start
+time.
+
+
The segment muxer works best with a single constant frame rate video.
+
+
Optionally it can generate a list of the created segments, by setting
+the option segment_list . The list type is specified by the
+segment_list_type option. The entry filenames in the segment
+list are set by default to the basename of the corresponding segment
+files.
+
+
See also the hls muxer, which provides a more specific
+implementation for HLS segmentation.
+
+
+
23.17.1 Options# TOC
+
+
The segment muxer supports the following options:
+
+
+reference_stream specifier
+Set the reference stream, as specified by the string specifier .
+If specifier is set to auto
, the reference is chosen
+automatically. Otherwise it must be a stream specifier (see the “Stream
+specifiers” chapter in the ffmpeg manual) which specifies the
+reference stream. The default value is auto
.
+
+
+segment_format format
+Override the inner container format, by default it is guessed by the filename
+extension.
+
+
+segment_format_options options_list
+Set output format options using a :-separated list of key=value
+parameters. Values containing the :
special character must be
+escaped.
+
+
+segment_list name
+Generate also a listfile named name . If not specified no
+listfile is generated.
+
+
+segment_list_flags flags
+Set flags affecting the segment list generation.
+
+It currently supports the following flags:
+
+‘cache ’
+Allow caching (only affects M3U8 list files).
+
+
+‘live ’
+Allow live-friendly file generation.
+
+
+
+
+segment_list_type type
+Select the listing format.
+
+flat use a simple flat list of entries.
+hls use a m3u8-like structure.
+
+
+
+segment_list_size size
+Update the list file so that it contains at most size
+segments. If 0 the list file will contain all the segments. Default
+value is 0.
+
+
+segment_list_entry_prefix prefix
+Prepend prefix to each entry. Useful to generate absolute paths.
+By default no prefix is applied.
+
+The following values are recognized:
+
+‘flat ’
+Generate a flat list for the created segments, one segment per line.
+
+
+‘csv, ext ’
+Generate a list for the created segments, one segment per line,
+each line matching the format (comma-separated values):
+
+
segment_filename ,segment_start_time ,segment_end_time
+
+
+segment_filename is the name of the output file generated by the
+muxer according to the provided pattern. CSV escaping (according to
+RFC4180) is applied if required.
+
+segment_start_time and segment_end_time specify
+the segment start and end time expressed in seconds.
+
+A list file with the suffix ".csv"
or ".ext"
will
+auto-select this format.
+
+‘ext ’ is deprecated in favor or ‘csv ’.
+
+
+‘ffconcat ’
+Generate an ffconcat file for the created segments. The resulting file
+can be read using the FFmpeg concat demuxer.
+
+A list file with the suffix ".ffcat"
or ".ffconcat"
will
+auto-select this format.
+
+
+‘m3u8 ’
+Generate an extended M3U8 file, version 3, compliant with
+http://tools.ietf.org/id/draft-pantos-http-live-streaming .
+
+A list file with the suffix ".m3u8"
will auto-select this format.
+
+
+
+If not specified the type is guessed from the list file name suffix.
+
+
+segment_time time
+Set segment duration to time , the value must be a duration
+specification. Default value is "2". See also the
+segment_times option.
+
+Note that splitting may not be accurate, unless you force the
+reference stream key-frames at the given time. See the introductory
+notice and the examples below.
+
+
+segment_atclocktime 1|0
+If set to "1" split at regular clock time intervals starting from 00:00
+o’clock. The time value specified in segment_time is
+used for setting the length of the splitting interval.
+
+For example with segment_time set to "900" this makes it possible
+to create files at 12:00 o’clock, 12:15, 12:30, etc.
+
+Default value is "0".
+
+
+segment_time_delta delta
+Specify the accuracy time when selecting the start time for a
+segment, expressed as a duration specification. Default value is "0".
+
+When delta is specified a key-frame will start a new segment if its
+PTS satisfies the relation:
+
+
PTS >= start_time - time_delta
+
+
+This option is useful when splitting video content, which is always
+split at GOP boundaries, in case a key frame is found just before the
+specified split time.
+
+In particular may be used in combination with the ffmpeg option
+force_key_frames . The key frame times specified by
+force_key_frames may not be set accurately because of rounding
+issues, with the consequence that a key frame time may result set just
+before the specified time. For constant frame rate videos a value of
+1/(2*frame_rate ) should address the worst case mismatch between
+the specified time and the time set by force_key_frames .
+
+
+segment_times times
+Specify a list of split points. times contains a list of comma
+separated duration specifications, in increasing order. See also
+the segment_time option.
+
+
+segment_frames frames
+Specify a list of split video frame numbers. frames contains a
+list of comma separated integer numbers, in increasing order.
+
+This option specifies to start a new segment whenever a reference
+stream key frame is found and the sequential number (starting from 0)
+of the frame is greater or equal to the next value in the list.
+
+
+segment_wrap limit
+Wrap around segment index once it reaches limit .
+
+
+segment_start_number number
+Set the sequence number of the first segment. Defaults to 0
.
+
+
+reset_timestamps 1|0
+Reset timestamps at the begin of each segment, so that each segment
+will start with near-zero timestamps. It is meant to ease the playback
+of the generated segments. May not work with some combinations of
+muxers/codecs. It is set to 0
by default.
+
+
+initial_offset offset
+Specify timestamp offset to apply to the output packet timestamps. The
+argument must be a time duration specification, and defaults to 0.
+
+
+
+
+
23.17.2 Examples# TOC
+
+
+
+
+
23.18 smoothstreaming# TOC
+
+
Smooth Streaming muxer generates a set of files (Manifest, chunks) suitable for serving with conventional web server.
+
+
+window_size
+Specify the number of fragments kept in the manifest. Default 0 (keep all).
+
+
+extra_window_size
+Specify the number of fragments kept outside of the manifest before removing from disk. Default 5.
+
+
+lookahead_count
+Specify the number of lookahead fragments. Default 2.
+
+
+min_frag_duration
+Specify the minimum fragment duration (in microseconds). Default 5000000.
+
+
+remove_at_exit
+Specify whether to remove all fragments when finished. Default 0 (do not remove).
+
+
+
+
+
+
23.19 tee# TOC
+
+
The tee muxer can be used to write the same data to several files or any
+other kind of muxer. It can be used, for example, to both stream a video to
+the network and save it to disk at the same time.
+
+
It is different from specifying several outputs to the ffmpeg
+command-line tool because the audio and video data will be encoded only once
+with the tee muxer; encoding can be a very expensive process. It is not
+useful when using the libavformat API directly because it is then possible
+to feed the same packets to several muxers directly.
+
+
The slave outputs are specified in the file name given to the muxer,
+separated by ’|’. If any of the slave name contains the ’|’ separator,
+leading or trailing spaces or any special character, it must be
+escaped (see (ffmpeg-utils)the "Quoting and escaping"
+section in the ffmpeg-utils(1) manual ).
+
+
Muxer options can be specified for each slave by prepending them as a list of
+key =value pairs separated by ’:’, between square brackets. If
+the options values contain a special character or the ’:’ separator, they
+must be escaped; note that this is a second level escaping.
+
+
The following special options are also recognized:
+
+f
+Specify the format name. Useful if it cannot be guessed from the
+output name suffix.
+
+
+bsfs[/spec ]
+Specify a list of bitstream filters to apply to the specified
+output.
+
+It is possible to specify to which streams a given bitstream filter
+applies, by appending a stream specifier to the option separated by
+/
. spec must be a stream specifier (see Format stream specifiers ). If the stream specifier is not specified, the
+bitstream filters will be applied to all streams in the output.
+
+Several bitstream filters can be specified, separated by ",".
+
+
+select
+Select the streams that should be mapped to the slave output,
+specified by a stream specifier. If not specified, this defaults to
+all the input streams.
+
+
+
+
+
23.19.1 Examples# TOC
+
+
+ Encode something and both archive it in a WebM file and stream it
+as MPEG-TS over UDP (the streams need to be explicitly mapped):
+
+
ffmpeg -i ... -c:v libx264 -c:a mp2 -f tee -map 0:v -map 0:a
+ "archive-20121107.mkv|[f=mpegts]udp://10.0.1.255:1234/"
+
+
+ Use ffmpeg
to encode the input, and send the output
+to three different destinations. The dump_extra
bitstream
+filter is used to add extradata information to all the output video
+keyframes packets, as requested by the MPEG-TS format. The select
+option is applied to out.aac in order to make it contain only
+audio packets.
+
+
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
+ -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=a]out.aac"
+
+
+ As below, but select only stream a:1
for the audio output. Note
+that a second level escaping must be performed, as ":" is a special
+character used to separate options.
+
+
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
+ -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=\'a:1\']out.aac"
+
+
+
+
Note: some codecs may need different options depending on the output format;
+the auto-detection of this can not work with the tee muxer. The main example
+is the global_header flag.
+
+
+
23.20 webm_dash_manifest# TOC
+
+
WebM DASH Manifest muxer.
+
+
This muxer implements the WebM DASH Manifest specification to generate the DASH manifest XML.
+
+
+
23.20.1 Options# TOC
+
+
This muxer supports the following options:
+
+
+adaptation_sets
+This option has the following syntax: "id=x,streams=a,b,c id=y,streams=d,e" where x and y are the
+unique identifiers of the adaptation sets and a,b,c,d and e are the indices of the corresponding
+audio and video streams. Any number of adaptation sets can be added using this option.
+
+
+
+
+
23.20.2 Example# TOC
+
+
ffmpeg -f webm_dash_manifest -i video1.webm \
+ -f webm_dash_manifest -i video2.webm \
+ -f webm_dash_manifest -i audio1.webm \
+ -f webm_dash_manifest -i audio2.webm \
+ -map 0 -map 1 -map 2 -map 3 \
+ -c copy \
+ -f webm_dash_manifest \
+ -adaptation_sets "id=0,streams=0,1 id=1,streams=2,3" \
+ manifest.xml
+
+
+
+
24 Metadata# TOC
+
+
FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded
+INI-like text file and then load it back using the metadata muxer/demuxer.
+
+
The file format is as follows:
+
+ A file consists of a header and a number of metadata tags divided into sections,
+each on its own line.
+
+ The header is a ’;FFMETADATA’ string, followed by a version number (now 1).
+
+ Metadata tags are of the form ’key=value’
+
+ Immediately after header follows global metadata
+
+ After global metadata there may be sections with per-stream/per-chapter
+metadata.
+
+ A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in
+brackets (’[’, ’]’) and ends with next section or end of file.
+
+ At the beginning of a chapter section there may be an optional timebase to be
+used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and
+den are integers. If the timebase is missing then start/end times are assumed to
+be in milliseconds.
+Next a chapter section must contain chapter start and end times in form
+’START=num’, ’END=num’, where num is a positive integer.
+
+ Empty lines and lines starting with ’;’ or ’#’ are ignored.
+
+ Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a
+newline) must be escaped with a backslash ’\’.
+
+ Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of
+the tag (in the example above key is ’foo ’, value is ’ bar’).
+
+
+
A ffmetadata file might look like this:
+
+
;FFMETADATA1
+title=bike\\shed
+;this is a comment
+artist=FFmpeg troll team
+
+[CHAPTER]
+TIMEBASE=1/1000
+START=0
+#chapter ends at 0:01:00
+END=60000
+title=chapter \#1
+[STREAM]
+title=multi\
+line
+
+
+
By using the ffmetadata muxer and demuxer it is possible to extract
+metadata from an input file to an ffmetadata file, and then transcode
+the file into an output file with the edited ffmetadata file.
+
+
Extracting an ffmetadata file with ffmpeg goes as follows:
+
+
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
+
+
+
Reinserting edited metadata information from the FFMETADATAFILE file can
+be done as:
+
+
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
+
+
+
+
25 Protocols# TOC
+
+
Protocols are configured elements in FFmpeg that enable access to
+resources that require specific protocols.
+
+
When you configure your FFmpeg build, all the supported protocols are
+enabled by default. You can list all available ones using the
+configure option "–list-protocols".
+
+
You can disable all the protocols using the configure option
+"–disable-protocols", and selectively enable a protocol using the
+option "–enable-protocol=PROTOCOL ", or you can disable a
+particular protocol using the option
+"–disable-protocol=PROTOCOL ".
+
+
The option "-protocols" of the ff* tools will display the list of
+supported protocols.
+
+
A description of the currently available protocols follows.
+
+
+
25.1 bluray# TOC
+
+
Read BluRay playlist.
+
+
The accepted options are:
+
+angle
+BluRay angle
+
+
+chapter
+Start chapter (1...N)
+
+
+playlist
+Playlist to read (BDMV/PLAYLIST/?????.mpls)
+
+
+
+
+
Examples:
+
+
Read longest playlist from BluRay mounted to /mnt/bluray:
+
+
+
Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2:
+
+
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
+
+
+
+
25.2 cache# TOC
+
+
Caching wrapper for input stream.
+
+
Cache the input stream to temporary file. It brings seeking capability to live streams.
+
+
+
+
+
25.3 concat# TOC
+
+
Physical concatenation protocol.
+
+
Allow to read and seek from many resource in sequence as if they were
+a unique resource.
+
+
A URL accepted by this protocol has the syntax:
+
+
concat:URL1 |URL2 |...|URLN
+
+
+
where URL1 , URL2 , ..., URLN are the urls of the
+resource to be concatenated, each one possibly specifying a distinct
+protocol.
+
+
For example to read a sequence of files split1.mpeg ,
+split2.mpeg , split3.mpeg with ffplay
use the
+command:
+
+
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
+
+
+
Note that you may need to escape the character "|" which is special for
+many shells.
+
+
+
25.4 crypto# TOC
+
+
AES-encrypted stream reading protocol.
+
+
The accepted options are:
+
+key
+Set the AES decryption key binary block from given hexadecimal representation.
+
+
+iv
+Set the AES decryption initialization vector binary block from given hexadecimal representation.
+
+
+
+
Accepted URL formats:
+
+
crypto:URL
+crypto+URL
+
+
+
+
25.5 data# TOC
+
+
Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme .
+
+
For example, to convert a GIF file given inline with ffmpeg
:
+
+
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
+
+
+
+
25.6 file# TOC
+
+
File access protocol.
+
+
Allow to read from or write to a file.
+
+
A file URL can have the form:
+
+
+
where filename is the path of the file to read.
+
+
An URL that does not have a protocol prefix will be assumed to be a
+file URL. Depending on the build, an URL that looks like a Windows
+path with the drive letter at the beginning will also be assumed to be
+a file URL (usually not the case in builds for unix-like systems).
+
+
For example to read from a file input.mpeg with ffmpeg
+use the command:
+
+
ffmpeg -i file:input.mpeg output.mpeg
+
+
+
This protocol accepts the following options:
+
+
+truncate
+Truncate existing files on write, if set to 1. A value of 0 prevents
+truncating. Default value is 1.
+
+
+blocksize
+Set I/O operation maximum block size, in bytes. Default value is
+INT_MAX
, which results in not limiting the requested block size.
+Setting this value reasonably low improves user termination request reaction
+time, which is valuable for files on slow medium.
+
+
+
+
+
25.7 ftp# TOC
+
+
FTP (File Transfer Protocol).
+
+
Allow to read from or write to remote resources using FTP protocol.
+
+
Following syntax is required.
+
+
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+
+
This protocol accepts the following options.
+
+
+timeout
+Set timeout in microseconds of socket I/O operations used by the underlying low level
+operation. By default it is set to -1, which means that the timeout is
+not specified.
+
+
+ftp-anonymous-password
+Password used when login as anonymous user. Typically an e-mail address
+should be used.
+
+
+ftp-write-seekable
+Control seekability of connection during encoding. If set to 1 the
+resource is supposed to be seekable, if set to 0 it is assumed not
+to be seekable. Default value is 0.
+
+
+
+
NOTE: Protocol can be used as output, but it is recommended to not do
+it, unless special care is taken (tests, customized server configuration
+etc.). Different FTP servers behave in different way during seek
+operation. ff* tools may produce incomplete content due to server limitations.
+
+
+
25.8 gopher# TOC
+
+
Gopher protocol.
+
+
+
25.9 hls# TOC
+
+
Read Apple HTTP Live Streaming compliant segmented stream as
+a uniform one. The M3U8 playlists describing the segments can be
+remote HTTP resources or local files, accessed using the standard
+file protocol.
+The nested protocol is declared by specifying
+"+proto " after the hls URI scheme name, where proto
+is either "file" or "http".
+
+
+
hls+http://host/path/to/remote/resource.m3u8
+hls+file://path/to/local/resource.m3u8
+
+
+
Using this protocol is discouraged - the hls demuxer should work
+just as well (if not, please report the issues) and is more complete.
+To use the hls demuxer instead, simply use the direct URLs to the
+m3u8 files.
+
+
+
25.10 http# TOC
+
+
HTTP (Hyper Text Transfer Protocol).
+
+
This protocol accepts the following options:
+
+
+seekable
+Control seekability of connection. If set to 1 the resource is
+supposed to be seekable, if set to 0 it is assumed not to be seekable,
+if set to -1 it will try to autodetect if it is seekable. Default
+value is -1.
+
+
+chunked_post
+If set to 1 use chunked Transfer-Encoding for posts, default is 1.
+
+
+content_type
+Set a specific content type for the POST messages.
+
+
+headers
+Set custom HTTP headers, can override built in default headers. The
+value must be a string encoding the headers.
+
+
+multiple_requests
+Use persistent connections if set to 1, default is 0.
+
+
+post_data
+Set custom HTTP post data.
+
+
+user-agent
+user_agent
+Override the User-Agent header. If not specified the protocol will use a
+string describing the libavformat build. ("Lavf/<version>")
+
+
+timeout
+Set timeout in microseconds of socket I/O operations used by the underlying low level
+operation. By default it is set to -1, which means that the timeout is
+not specified.
+
+
+mime_type
+Export the MIME type.
+
+
+icy
+If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
+supports this, the metadata has to be retrieved by the application by reading
+the icy_metadata_headers and icy_metadata_packet options.
+The default is 1.
+
+
+icy_metadata_headers
+If the server supports ICY metadata, this contains the ICY-specific HTTP reply
+headers, separated by newline characters.
+
+
+icy_metadata_packet
+If the server supports ICY metadata, and icy was set to 1, this
+contains the last non-empty metadata packet sent by the server. It should be
+polled in regular intervals by applications interested in mid-stream metadata
+updates.
+
+
+cookies
+Set the cookies to be sent in future requests. The format of each cookie is the
+same as the value of a Set-Cookie HTTP response field. Multiple cookies can be
+delimited by a newline character.
+
+
+offset
+Set initial byte offset.
+
+
+end_offset
+Try to limit the request to bytes preceding this offset.
+
+
+
+
+
25.10.1 HTTP Cookies# TOC
+
+
Some HTTP requests will be denied unless cookie values are passed in with the
+request. The cookies option allows these cookies to be specified. At
+the very least, each cookie must specify a value along with a path and domain.
+HTTP requests that match both the domain and path will automatically include the
+cookie value in the HTTP Cookie header field. Multiple cookies can be delimited
+by a newline.
+
+
The required syntax to play a stream specifying a cookie is:
+
+
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
+
+
+
+
25.11 Icecast# TOC
+
+
Icecast protocol (stream to Icecast servers)
+
+
This protocol accepts the following options:
+
+
+ice_genre
+Set the stream genre.
+
+
+ice_name
+Set the stream name.
+
+
+ice_description
+Set the stream description.
+
+
+ice_url
+Set the stream website URL.
+
+
+ice_public
+Set if the stream should be public.
+The default is 0 (not public).
+
+
+user_agent
+Override the User-Agent header. If not specified a string of the form
+"Lavf/<version>" will be used.
+
+
+password
+Set the Icecast mountpoint password.
+
+
+content_type
+Set the stream content type. This must be set if it is different from
+audio/mpeg.
+
+
+legacy_icecast
+This enables support for Icecast versions < 2.4.0, that do not support the
+HTTP PUT method but the SOURCE method.
+
+
+
+
+
+
icecast://[username [:password ]@]server :port /mountpoint
+
+
+
+
25.12 mmst# TOC
+
+
MMS (Microsoft Media Server) protocol over TCP.
+
+
+
25.13 mmsh# TOC
+
+
MMS (Microsoft Media Server) protocol over HTTP.
+
+
The required syntax is:
+
+
mmsh://server [:port ][/app ][/playpath ]
+
+
+
+
25.14 md5# TOC
+
+
MD5 output protocol.
+
+
Computes the MD5 hash of the data to be written, and on close writes
+this to the designated output or stdout if none is specified. It can
+be used to test muxers without writing an actual file.
+
+
Some examples follow.
+
+
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
+ffmpeg -i input.flv -f avi -y md5:output.avi.md5
+
+# Write the MD5 hash of the encoded AVI file to stdout.
+ffmpeg -i input.flv -f avi -y md5:
+
+
+
Note that some formats (typically MOV) require the output protocol to
+be seekable, so they will fail with the MD5 output protocol.
+
+
+
25.15 pipe# TOC
+
+
UNIX pipe access protocol.
+
+
Allow to read and write from UNIX pipes.
+
+
The accepted syntax is:
+
+
+
number is the number corresponding to the file descriptor of the
+pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number
+is not specified, by default the stdout file descriptor will be used
+for writing, stdin for reading.
+
+
For example to read from stdin with ffmpeg
:
+
+
cat test.wav | ffmpeg -i pipe:0
+# ...this is the same as...
+cat test.wav | ffmpeg -i pipe:
+
+
+
For writing to stdout with ffmpeg
:
+
+
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
+# ...this is the same as...
+ffmpeg -i test.wav -f avi pipe: | cat > test.avi
+
+
+
This protocol accepts the following options:
+
+
+blocksize
+Set I/O operation maximum block size, in bytes. Default value is
+INT_MAX
, which results in not limiting the requested block size.
+Setting this value reasonably low improves user termination request reaction
+time, which is valuable if data transmission is slow.
+
+
+
+
Note that some formats (typically MOV), require the output protocol to
+be seekable, so they will fail with the pipe output protocol.
+
+
+
25.16 rtmp# TOC
+
+
Real-Time Messaging Protocol.
+
+
The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia
+content across a TCP/IP network.
+
+
The required syntax is:
+
+
rtmp://[username :password @]server [:port ][/app ][/instance ][/playpath ]
+
+
+
The accepted parameters are:
+
+username
+An optional username (mostly for publishing).
+
+
+password
+An optional password (mostly for publishing).
+
+
+server
+The address of the RTMP server.
+
+
+port
+The number of the TCP port to use (by default is 1935).
+
+
+app
+It is the name of the application to access. It usually corresponds to
+the path where the application is installed on the RTMP server
+(e.g. /ondemand/ , /flash/live/ , etc.). You can override
+the value parsed from the URI through the rtmp_app
option, too.
+
+
+playpath
+It is the path or name of the resource to play with reference to the
+application specified in app , may be prefixed by "mp4:". You
+can override the value parsed from the URI through the rtmp_playpath
+option, too.
+
+
+listen
+Act as a server, listening for an incoming connection.
+
+
+timeout
+Maximum time to wait for the incoming connection. Implies listen.
+
+
+
+
Additionally, the following parameters can be set via command line options
+(or in code via AVOption
s):
+
+rtmp_app
+Name of application to connect on the RTMP server. This option
+overrides the parameter specified in the URI.
+
+
+rtmp_buffer
+Set the client buffer time in milliseconds. The default is 3000.
+
+
+rtmp_conn
+Extra arbitrary AMF connection parameters, parsed from a string,
+e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0
.
+Each value is prefixed by a single character denoting the type,
+B for Boolean, N for number, S for string, O for object, or Z for null,
+followed by a colon. For Booleans the data must be either 0 or 1 for
+FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or
+1 to end or begin an object, respectively. Data items in subobjects may
+be named, by prefixing the type with ’N’ and specifying the name before
+the value (i.e. NB:myFlag:1
). This option may be used multiple
+times to construct arbitrary AMF sequences.
+
+
+rtmp_flashver
+Version of the Flash plugin used to run the SWF player. The default
+is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible;
+<libavformat version>).)
+
+
+rtmp_flush_interval
+Number of packets flushed in the same request (RTMPT only). The default
+is 10.
+
+
+rtmp_live
+Specify that the media is a live stream. No resuming or seeking in
+live streams is possible. The default value is any
, which means the
+subscriber first tries to play the live stream specified in the
+playpath. If a live stream of that name is not found, it plays the
+recorded stream. The other possible values are live
and
+recorded
.
+
+
+rtmp_pageurl
+URL of the web page in which the media was embedded. By default no
+value will be sent.
+
+
+rtmp_playpath
+Stream identifier to play or to publish. This option overrides the
+parameter specified in the URI.
+
+
+rtmp_subscribe
+Name of live stream to subscribe to. By default no value will be sent.
+It is only sent if the option is specified or if rtmp_live
+is set to live.
+
+
+rtmp_swfhash
+SHA256 hash of the decompressed SWF file (32 bytes).
+
+
+rtmp_swfsize
+Size of the decompressed SWF file, required for SWFVerification.
+
+
+rtmp_swfurl
+URL of the SWF player for the media. By default no value will be sent.
+
+
+rtmp_swfverify
+URL to player swf file, compute hash/size automatically.
+
+
+rtmp_tcurl
+URL of the target stream. Defaults to proto://host[:port]/app.
+
+
+
+
+
For example to read with ffplay
a multimedia resource named
+"sample" from the application "vod" from an RTMP server "myserver":
+
+
ffplay rtmp://myserver/vod/sample
+
+
+
To publish to a password protected server, passing the playpath and
+app names separately:
+
+
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
+
+
+
+
25.17 rtmpe# TOC
+
+
Encrypted Real-Time Messaging Protocol.
+
+
The Encrypted Real-Time Messaging Protocol (RTMPE) is used for
+streaming multimedia content within standard cryptographic primitives,
+consisting of Diffie-Hellman key exchange and HMACSHA256, generating
+a pair of RC4 keys.
+
+
+
25.18 rtmps# TOC
+
+
Real-Time Messaging Protocol over a secure SSL connection.
+
+
The Real-Time Messaging Protocol (RTMPS) is used for streaming
+multimedia content across an encrypted connection.
+
+
+
25.19 rtmpt# TOC
+
+
Real-Time Messaging Protocol tunneled through HTTP.
+
+
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
+for streaming multimedia content within HTTP requests to traverse
+firewalls.
+
+
+
25.20 rtmpte# TOC
+
+
Encrypted Real-Time Messaging Protocol tunneled through HTTP.
+
+
The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE)
+is used for streaming multimedia content within HTTP requests to traverse
+firewalls.
+
+
+
25.21 rtmpts# TOC
+
+
Real-Time Messaging Protocol tunneled through HTTPS.
+
+
The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
+for streaming multimedia content within HTTPS requests to traverse
+firewalls.
+
+
+
25.22 libsmbclient# TOC
+
+
libsmbclient permits one to manipulate CIFS/SMB network resources.
+
+
Following syntax is required.
+
+
+
smb://[[domain:]user[:password@]]server[/share[/path[/file]]]
+
+
+
This protocol accepts the following options.
+
+
+timeout
+Set timeout in miliseconds of socket I/O operations used by the underlying
+low level operation. By default it is set to -1, which means that the timeout
+is not specified.
+
+
+truncate
+Truncate existing files on write, if set to 1. A value of 0 prevents
+truncating. Default value is 1.
+
+
+workgroup
+Set the workgroup used for making connections. By default workgroup is not specified.
+
+
+
+
+
For more information see: http://www.samba.org/ .
+
+
+
25.23 libssh# TOC
+
+
Secure File Transfer Protocol via libssh
+
+
Allow to read from or write to remote resources using SFTP protocol.
+
+
Following syntax is required.
+
+
+
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+
+
This protocol accepts the following options.
+
+
+timeout
+Set timeout of socket I/O operations used by the underlying low level
+operation. By default it is set to -1, which means that the timeout
+is not specified.
+
+
+truncate
+Truncate existing files on write, if set to 1. A value of 0 prevents
+truncating. Default value is 1.
+
+
+private_key
+Specify the path of the file containing private key to use during authorization.
+By default libssh searches for keys in the ~/.ssh/ directory.
+
+
+
+
+
Example: Play a file stored on remote server.
+
+
+
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
+
+
+
+
25.24 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte# TOC
+
+
Real-Time Messaging Protocol and its variants supported through
+librtmp.
+
+
Requires the presence of the librtmp headers and library during
+configuration. You need to explicitly configure the build with
+"–enable-librtmp". If enabled this will replace the native RTMP
+protocol.
+
+
This protocol provides most client functions and a few server
+functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT),
+encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled
+variants of these encrypted types (RTMPTE, RTMPTS).
+
+
The required syntax is:
+
+
rtmp_proto ://server [:port ][/app ][/playpath ] options
+
+
+
where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe",
+"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and
+server , port , app and playpath have the same
+meaning as specified for the RTMP native protocol.
+options contains a list of space-separated options of the form
+key =val .
+
+
See the librtmp manual page (man 3 librtmp) for more information.
+
+
For example, to stream a file in real-time to an RTMP server using
+ffmpeg
:
+
+
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
+
+
+
To play the same stream using ffplay
:
+
+
ffplay "rtmp://myserver/live/mystream live=1"
+
+
+
+
25.25 rtp# TOC
+
+
Real-time Transport Protocol.
+
+
The required syntax for an RTP URL is:
+rtp://hostname [:port ][?option =val ...]
+
+
port specifies the RTP port to use.
+
+
The following URL options are supported:
+
+
+ttl=n
+Set the TTL (Time-To-Live) value (for multicast only).
+
+
+rtcpport=n
+Set the remote RTCP port to n .
+
+
+localrtpport=n
+Set the local RTP port to n .
+
+
+localrtcpport=n '
+Set the local RTCP port to n .
+
+
+pkt_size=n
+Set max packet size (in bytes) to n .
+
+
+connect=0|1
+Do a connect()
on the UDP socket (if set to 1) or not (if set
+to 0).
+
+
+sources=ip [,ip ]
+List allowed source IP addresses.
+
+
+block=ip [,ip ]
+List disallowed (blocked) source IP addresses.
+
+
+write_to_source=0|1
+Send packets to the source address of the latest received packet (if
+set to 1) or to a default remote address (if set to 0).
+
+
+localport=n
+Set the local RTP port to n .
+
+This is a deprecated option. Instead, localrtpport should be
+used.
+
+
+
+
+
Important notes:
+
+
+ If rtcpport is not set the RTCP port will be set to the RTP
+port value plus 1.
+
+ If localrtpport (the local RTP port) is not set any available
+port will be used for the local RTP and RTCP ports.
+
+ If localrtcpport (the local RTCP port) is not set it will be
+set to the local RTP port value plus 1.
+
+
+
+
25.26 rtsp# TOC
+
+
Real-Time Streaming Protocol.
+
+
RTSP is not technically a protocol handler in libavformat, it is a demuxer
+and muxer. The demuxer supports both normal RTSP (with data transferred
+over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with
+data transferred over RDT).
+
+
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
+supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s
+RTSP server ).
+
+
The required syntax for a RTSP url is:
+
+
rtsp://hostname [:port ]/path
+
+
+
Options can be set on the ffmpeg
/ffplay
command
+line, or set in code via AVOption
s or in
+avformat_open_input
.
+
+
The following options are supported.
+
+
+initial_pause
+Do not start playing the stream immediately if set to 1. Default value
+is 0.
+
+
+rtsp_transport
+Set RTSP transport protocols.
+
+It accepts the following values:
+
+‘udp ’
+Use UDP as lower transport protocol.
+
+
+‘tcp ’
+Use TCP (interleaving within the RTSP control channel) as lower
+transport protocol.
+
+
+‘udp_multicast ’
+Use UDP multicast as lower transport protocol.
+
+
+‘http ’
+Use HTTP tunneling as lower transport protocol, which is useful for
+passing proxies.
+
+
+
+Multiple lower transport protocols may be specified, in that case they are
+tried one at a time (if the setup of one fails, the next one is tried).
+For the muxer, only the ‘tcp ’ and ‘udp ’ options are supported.
+
+
+rtsp_flags
+Set RTSP flags.
+
+The following values are accepted:
+
+‘filter_src ’
+Accept packets only from negotiated peer address and port.
+
+‘listen ’
+Act as a server, listening for an incoming connection.
+
+‘prefer_tcp ’
+Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
+
+
+
+Default value is ‘none ’.
+
+
+allowed_media_types
+Set media types to accept from the server.
+
+The following flags are accepted:
+
+‘video ’
+‘audio ’
+‘data ’
+
+
+By default it accepts all media types.
+
+
+min_port
+Set minimum local UDP port. Default value is 5000.
+
+
+max_port
+Set maximum local UDP port. Default value is 65000.
+
+
+timeout
+Set maximum timeout (in seconds) to wait for incoming connections.
+
+A value of -1 means infinite (default). This option implies the
+rtsp_flags set to ‘listen ’.
+
+
+reorder_queue_size
+Set number of packets to buffer for handling of reordered packets.
+
+
+stimeout
+Set socket TCP I/O timeout in microseconds.
+
+
+user-agent
+Override User-Agent header. If not specified, it defaults to the
+libavformat identifier string.
+
+
+
+
When receiving data over UDP, the demuxer tries to reorder received packets
+(since they may arrive out of order, or packets may get lost totally). This
+can be disabled by setting the maximum demuxing delay to zero (via
+the max_delay
field of AVFormatContext).
+
+
When watching multi-bitrate Real-RTSP streams with ffplay
, the
+streams to display can be chosen with -vst
n and
+-ast
n for video and audio respectively, and can be switched
+on the fly by pressing v
and a
.
+
+
+
25.26.1 Examples# TOC
+
+
The following examples all make use of the ffplay
and
+ffmpeg
tools.
+
+
+ Watch a stream over UDP, with a max reordering delay of 0.5 seconds:
+
+
ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
+
+
+ Watch a stream tunneled over HTTP:
+
+
ffplay -rtsp_transport http rtsp://server/video.mp4
+
+
+ Send a stream in realtime to a RTSP server, for others to watch:
+
+
ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
+
+
+ Receive a stream in realtime:
+
+
ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
+
+
+
+
+
25.27 sap# TOC
+
+
Session Announcement Protocol (RFC 2974). This is not technically a
+protocol handler in libavformat, it is a muxer and demuxer.
+It is used for signalling of RTP streams, by announcing the SDP for the
+streams regularly on a separate port.
+
+
+
25.27.1 Muxer# TOC
+
+
The syntax for a SAP url given to the muxer is:
+
+
sap://destination [:port ][?options ]
+
+
+
The RTP packets are sent to destination on port port ,
+or to port 5004 if no port is specified.
+options is a &
-separated list. The following options
+are supported:
+
+
+announce_addr=address
+Specify the destination IP address for sending the announcements to.
+If omitted, the announcements are sent to the commonly used SAP
+announcement multicast address 224.2.127.254 (sap.mcast.net), or
+ff0e::2:7ffe if destination is an IPv6 address.
+
+
+announce_port=port
+Specify the port to send the announcements on, defaults to
+9875 if not specified.
+
+
+ttl=ttl
+Specify the time to live value for the announcements and RTP packets,
+defaults to 255.
+
+
+same_port=0|1
+If set to 1, send all RTP streams on the same port pair. If zero (the
+default), all streams are sent on unique ports, with each stream on a
+port 2 numbers higher than the previous.
+VLC/Live555 requires this to be set to 1, to be able to receive the stream.
+The RTP stack in libavformat for receiving requires all streams to be sent
+on unique ports.
+
+
+
+
Example command lines follow.
+
+
To broadcast a stream on the local subnet, for watching in VLC:
+
+
+
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
+
+
+
Similarly, for watching in ffplay
:
+
+
+
ffmpeg -re -i input -f sap sap://224.0.0.255
+
+
+
And for watching in ffplay
, over IPv6:
+
+
+
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
+
+
+
+
25.27.2 Demuxer# TOC
+
+
The syntax for a SAP url given to the demuxer is:
+
+
sap://[address ][:port ]
+
+
+
address is the multicast address to listen for announcements on,
+if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port
+is the port that is listened on, 9875 if omitted.
+
+
The demuxers listens for announcements on the given address and port.
+Once an announcement is received, it tries to receive that particular stream.
+
+
Example command lines follow.
+
+
To play back the first stream announced on the normal SAP multicast address:
+
+
+
+
To play back the first stream announced on one the default IPv6 SAP multicast address:
+
+
+
ffplay sap://[ff0e::2:7ffe]
+
+
+
+
25.28 sctp# TOC
+
+
Stream Control Transmission Protocol.
+
+
The accepted URL syntax is:
+
+
sctp://host :port [?options ]
+
+
+
The protocol accepts the following options:
+
+listen
+If set to any value, listen for an incoming connection. Outgoing connection is done by default.
+
+
+max_streams
+Set the maximum number of streams. By default no limit is set.
+
+
+
+
+
25.29 srtp# TOC
+
+
Secure Real-time Transport Protocol.
+
+
The accepted options are:
+
+srtp_in_suite
+srtp_out_suite
+Select input and output encoding suites.
+
+Supported values:
+
+‘AES_CM_128_HMAC_SHA1_80 ’
+‘SRTP_AES128_CM_HMAC_SHA1_80 ’
+‘AES_CM_128_HMAC_SHA1_32 ’
+‘SRTP_AES128_CM_HMAC_SHA1_32 ’
+
+
+
+srtp_in_params
+srtp_out_params
+Set input and output encoding parameters, which are expressed by a
+base64-encoded representation of a binary block. The first 16 bytes of
+this binary block are used as master key, the following 14 bytes are
+used as master salt.
+
+
+
+
+
25.30 subfile# TOC
+
+
Virtually extract a segment of a file or another stream.
+The underlying stream must be seekable.
+
+
Accepted options:
+
+start
+Start offset of the extracted segment, in bytes.
+
+end
+End offset of the extracted segment, in bytes.
+
+
+
+
Examples:
+
+
Extract a chapter from a DVD VOB file (start and end sectors obtained
+externally and multiplied by 2048):
+
+
subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
+
+
+
Play an AVI file directly from a TAR archive:
+subfile,,start,183241728,end,366490624,,:archive.tar
+
+
+
25.31 tcp# TOC
+
+
Transmission Control Protocol.
+
+
The required syntax for a TCP url is:
+
+
tcp://hostname :port [?options ]
+
+
+
options contains a list of &-separated options of the form
+key =val .
+
+
The list of supported options follows.
+
+
+listen=1|0
+Listen for an incoming connection. Default value is 0.
+
+
+timeout=microseconds
+Set raise error timeout, expressed in microseconds.
+
+This option is only relevant in read mode: if no data arrived in more
+than this time interval, raise error.
+
+
+listen_timeout=microseconds
+Set listen timeout, expressed in microseconds.
+
+
+
+
The following example shows how to setup a listening TCP connection
+with ffmpeg
, which is then accessed with ffplay
:
+
+
ffmpeg -i input -f format tcp://hostname :port ?listen
+ffplay tcp://hostname :port
+
+
+
+
25.32 tls# TOC
+
+
Transport Layer Security (TLS) / Secure Sockets Layer (SSL)
+
+
The required syntax for a TLS/SSL url is:
+
+
tls://hostname :port [?options ]
+
+
+
The following parameters can be set via command line options
+(or in code via AVOption
s):
+
+
+ca_file, cafile=filename
+A file containing certificate authority (CA) root certificates to treat
+as trusted. If the linked TLS library contains a default this might not
+need to be specified for verification to work, but not all libraries and
+setups have defaults built in.
+The file must be in OpenSSL PEM format.
+
+
+tls_verify=1|0
+If enabled, try to verify the peer that we are communicating with.
+Note, if using OpenSSL, this currently only makes sure that the
+peer certificate is signed by one of the root certificates in the CA
+database, but it does not validate that the certificate actually
+matches the host name we are trying to connect to. (With GnuTLS,
+the host name is validated as well.)
+
+This is disabled by default since it requires a CA database to be
+provided by the caller in many cases.
+
+
+cert_file, cert=filename
+A file containing a certificate to use in the handshake with the peer.
+(When operating as server, in listen mode, this is more often required
+by the peer, while client certificates only are mandated in certain
+setups.)
+
+
+key_file, key=filename
+A file containing the private key for the certificate.
+
+
+listen=1|0
+If enabled, listen for connections on the provided port, and assume
+the server role in the handshake instead of the client role.
+
+
+
+
+
Example command lines:
+
+
To create a TLS/SSL server that serves an input stream.
+
+
+
ffmpeg -i input -f format tls://hostname :port ?listen&cert=server.crt &key=server.key
+
+
+
To play back a stream from the TLS/SSL server using ffplay
:
+
+
+
ffplay tls://hostname :port
+
+
+
+
25.33 udp# TOC
+
+
User Datagram Protocol.
+
+
The required syntax for an UDP URL is:
+
+
udp://hostname :port [?options ]
+
+
+
options contains a list of &-separated options of the form key =val .
+
+
In case threading is enabled on the system, a circular buffer is used
+to store the incoming data, which allows one to reduce loss of data due to
+UDP socket buffer overruns. The fifo_size and
+overrun_nonfatal options are related to this buffer.
+
+
The list of supported options follows.
+
+
+buffer_size=size
+Set the UDP maximum socket buffer size in bytes. This is used to set either
+the receive or send buffer size, depending on what the socket is used for.
+Default is 64KB. See also fifo_size .
+
+
+localport=port
+Override the local UDP port to bind with.
+
+
+localaddr=addr
+Choose the local IP address. This is useful e.g. if sending multicast
+and the host has multiple interfaces, where the user can choose
+which interface to send on by specifying the IP address of that interface.
+
+
+pkt_size=size
+Set the size in bytes of UDP packets.
+
+
+reuse=1|0
+Explicitly allow or disallow reusing UDP sockets.
+
+
+ttl=ttl
+Set the time to live value (for multicast only).
+
+
+connect=1|0
+Initialize the UDP socket with connect()
. In this case, the
+destination address can’t be changed with ff_udp_set_remote_url later.
+If the destination address isn’t known at the start, this option can
+be specified in ff_udp_set_remote_url, too.
+This allows finding out the source address for the packets with getsockname,
+and makes writes return with AVERROR(ECONNREFUSED) if "destination
+unreachable" is received.
+For receiving, this gives the benefit of only receiving packets from
+the specified peer address/port.
+
+
+sources=address [,address ]
+Only receive packets sent to the multicast group from one of the
+specified sender IP addresses.
+
+
+block=address [,address ]
+Ignore packets sent to the multicast group from the specified
+sender IP addresses.
+
+
+fifo_size=units
+Set the UDP receiving circular buffer size, expressed as a number of
+packets with size of 188 bytes. If not specified defaults to 7*4096.
+
+
+overrun_nonfatal=1|0
+Survive in case of UDP receiving circular buffer overrun. Default
+value is 0.
+
+
+timeout=microseconds
+Set raise error timeout, expressed in microseconds.
+
+This option is only relevant in read mode: if no data arrived in more
+than this time interval, raise error.
+
+
+broadcast=1|0
+Explicitly allow or disallow UDP broadcasting.
+
+Note that broadcasting may not work properly on networks having
+a broadcast storm protection.
+
+
+
+
+
25.33.1 Examples# TOC
+
+
+ Use ffmpeg
to stream over UDP to a remote endpoint:
+
+
ffmpeg -i input -f format udp://hostname :port
+
+
+ Use ffmpeg
to stream in mpegts format over UDP using 188
+sized UDP packets, using a large input buffer:
+
+
ffmpeg -i input -f mpegts udp://hostname :port ?pkt_size=188&buffer_size=65535
+
+
+ Use ffmpeg
to receive over UDP from a remote endpoint:
+
+
ffmpeg -i udp://[multicast-address ]:port ...
+
+
+
+
+
25.34 unix# TOC
+
+
Unix local socket
+
+
The required syntax for a Unix socket URL is:
+
+
+
+
The following parameters can be set via command line options
+(or in code via AVOption
s):
+
+
+timeout
+Timeout in ms.
+
+listen
+Create the Unix socket in listening mode.
+
+
+
+
+
26 Device Options# TOC
+
+
The libavdevice library provides the same interface as
+libavformat. Namely, an input device is considered like a demuxer, and
+an output device like a muxer, and the interface and generic device
+options are the same provided by libavformat (see the ffmpeg-formats
+manual).
+
+
In addition each input or output device may support so-called private
+options, which are specific for that component.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, or by setting the value explicitly in the device
+AVFormatContext
options or using the libavutil/opt.h API
+for programmatic use.
+
+
+
+
27 Input Devices# TOC
+
+
Input devices are configured elements in FFmpeg which allow to access
+the data coming from a multimedia device attached to your system.
+
+
When you configure your FFmpeg build, all the supported input devices
+are enabled by default. You can list all available ones using the
+configure option "–list-indevs".
+
+
You can disable all the input devices using the configure option
+"–disable-indevs", and selectively enable an input device using the
+option "–enable-indev=INDEV ", or you can disable a particular
+input device using the option "–disable-indev=INDEV ".
+
+
The option "-devices" of the ff* tools will display the list of
+supported input devices.
+
+
A description of the currently available input devices follows.
+
+
+
27.1 alsa# TOC
+
+
ALSA (Advanced Linux Sound Architecture) input device.
+
+
To enable this input device during configuration you need libasound
+installed on your system.
+
+
This device allows capturing from an ALSA device. The name of the
+device to capture has to be an ALSA card identifier.
+
+
An ALSA identifier has the syntax:
+
+
hw:CARD [,DEV [,SUBDEV ]]
+
+
+
where the DEV and SUBDEV components are optional.
+
+
The three arguments (in order: CARD ,DEV ,SUBDEV )
+specify card number or identifier, device number and subdevice number
+(-1 means any).
+
+
To see the list of cards currently recognized by your system check the
+files /proc/asound/cards and /proc/asound/devices .
+
+
For example to capture with ffmpeg
from an ALSA device with
+card id 0, you may run the command:
+
+
ffmpeg -f alsa -i hw:0 alsaout.wav
+
+
+
For more information see:
+http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html
+
+
+
27.2 avfoundation# TOC
+
+
AVFoundation input device.
+
+
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
+The older QTKit framework has been marked deprecated since OSX version 10.7.
+
+
The input filename has to be given in the following syntax:
+
+
-i "[[VIDEO]:[AUDIO]]"
+
+
The first entry selects the video input while the latter selects the audio input.
+The stream has to be specified by the device name or the device index as shown by the device list.
+Alternatively, the video and/or audio input device can be chosen by index using the
+
+ -video_device_index <INDEX>
+
+and/or
+
+ -audio_device_index <INDEX>
+
+, overriding any
+device name or index given in the input filename.
+
+
All available devices can be enumerated by using -list_devices true , listing
+all device names and corresponding indices.
+
+
There are two device name aliases:
+
+default
+Select the AVFoundation default device of the corresponding type.
+
+
+none
+Do not record the corresponding media type.
+This is equivalent to specifying an empty device name or index.
+
+
+
+
+
+
27.2.1 Options# TOC
+
+
AVFoundation supports the following options:
+
+
+-list_devices <TRUE|FALSE>
+If set to true, a list of all available input devices is given showing all
+device names and indices.
+
+
+-video_device_index <INDEX>
+Specify the video device by its index. Overrides anything given in the input filename.
+
+
+-audio_device_index <INDEX>
+Specify the audio device by its index. Overrides anything given in the input filename.
+
+
+-pixel_format <FORMAT>
+Request the video device to use a specific pixel format.
+If the specified format is not supported, a list of available formats is given
+und the first one in this list is used instead. Available pixel formats are:
+monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
+ bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
+ yuv420p, nv12, yuyv422, gray
+
+
+
+
+
+
27.2.2 Examples# TOC
+
+
+ Print the list of AVFoundation supported devices and exit:
+
+
$ ffmpeg -f avfoundation -list_devices true -i ""
+
+
+ Record video from video device 0 and audio from audio device 0 into out.avi:
+
+
$ ffmpeg -f avfoundation -i "0:0" out.avi
+
+
+ Record video from video device 2 and audio from audio device 1 into out.avi:
+
+
$ ffmpeg -f avfoundation -video_device_index 2 -i ":1" out.avi
+
+
+ Record video from the system default video device using the pixel format bgr0 and do not record any audio into out.avi:
+
+
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
+
+
+
+
+
+
27.3 bktr# TOC
+
+
BSD video input device.
+
+
+
27.4 dshow# TOC
+
+
Windows DirectShow input device.
+
+
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
+Currently only audio and video devices are supported.
+
+
Multiple devices may be opened as separate inputs, but they may also be
+opened on the same input, which should improve synchronism between them.
+
+
The input name should be in the format:
+
+
+
+
where TYPE can be either audio or video ,
+and NAME is the device’s name.
+
+
+
27.4.1 Options# TOC
+
+
If no options are specified, the device’s defaults are used.
+If the device does not support the requested options, it will
+fail to open.
+
+
+video_size
+Set the video size in the captured video.
+
+
+framerate
+Set the frame rate in the captured video.
+
+
+sample_rate
+Set the sample rate (in Hz) of the captured audio.
+
+
+sample_size
+Set the sample size (in bits) of the captured audio.
+
+
+channels
+Set the number of channels in the captured audio.
+
+
+list_devices
+If set to true , print a list of devices and exit.
+
+
+list_options
+If set to true , print a list of selected device’s options
+and exit.
+
+
+video_device_number
+Set video device number for devices with same name (starts at 0,
+defaults to 0).
+
+
+audio_device_number
+Set audio device number for devices with same name (starts at 0,
+defaults to 0).
+
+
+pixel_format
+Select pixel format to be used by DirectShow. This may only be set when
+the video codec is not set or set to rawvideo.
+
+
+audio_buffer_size
+Set audio device buffer size in milliseconds (which can directly
+impact latency, depending on the device).
+Defaults to using the audio device’s
+default buffer size (typically some multiple of 500ms).
+Setting this value too low can degrade performance.
+See also
+http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx
+
+
+
+
+
+
27.4.2 Examples# TOC
+
+
+ Print the list of DirectShow supported devices and exit:
+
+
$ ffmpeg -list_devices true -f dshow -i dummy
+
+
+ Open video device Camera :
+
+
$ ffmpeg -f dshow -i video="Camera"
+
+
+ Open second video device with name Camera :
+
+
$ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
+
+
+ Open video device Camera and audio device Microphone :
+
+
$ ffmpeg -f dshow -i video="Camera":audio="Microphone"
+
+
+ Print the list of supported options in selected device and exit:
+
+
$ ffmpeg -list_options true -f dshow -i video="Camera"
+
+
+
+
+
+
27.5 dv1394# TOC
+
+
Linux DV 1394 input device.
+
+
+
27.6 fbdev# TOC
+
+
Linux framebuffer input device.
+
+
The Linux framebuffer is a graphic hardware-independent abstraction
+layer to show graphics on a computer monitor, typically on the
+console. It is accessed through a file device node, usually
+/dev/fb0 .
+
+
For more detailed information read the file
+Documentation/fb/framebuffer.txt included in the Linux source tree.
+
+
To record from the framebuffer device /dev/fb0 with
+ffmpeg
:
+
+
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
+
+
+
You can take a single screenshot image with the command:
+
+
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
+
+
+
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
+
+
+
27.7 gdigrab# TOC
+
+
Win32 GDI-based screen capture device.
+
+
This device allows you to capture a region of the display on Windows.
+
+
There are two options for the input filename:
+
+
or
+
+
+
The first option will capture the entire desktop, or a fixed region of the
+desktop. The second option will instead capture the contents of a single
+window, regardless of its position on the screen.
+
+
For example, to grab the entire desktop using ffmpeg
:
+
+
ffmpeg -f gdigrab -framerate 6 -i desktop out.mpg
+
+
+
Grab a 640x480 region at position 10,20
:
+
+
ffmpeg -f gdigrab -framerate 6 -offset_x 10 -offset_y 20 -video_size vga -i desktop out.mpg
+
+
+
Grab the contents of the window named "Calculator"
+
+
ffmpeg -f gdigrab -framerate 6 -i title=Calculator out.mpg
+
+
+
+
27.7.1 Options# TOC
+
+
+draw_mouse
+Specify whether to draw the mouse pointer. Use the value 0
to
+not draw the pointer. Default value is 1
.
+
+
+framerate
+Set the grabbing frame rate. Default value is ntsc
,
+corresponding to a frame rate of 30000/1001
.
+
+
+show_region
+Show grabbed region on screen.
+
+If show_region is specified with 1
, then the grabbing
+region will be indicated on screen. With this option, it is easy to
+know what is being grabbed if only a portion of the screen is grabbed.
+
+Note that show_region is incompatible with grabbing the contents
+of a single window.
+
+For example:
+
+
ffmpeg -f gdigrab -show_region 1 -framerate 6 -video_size cif -offset_x 10 -offset_y 20 -i desktop out.mpg
+
+
+
+video_size
+Set the video frame size. The default is to capture the full screen if desktop is selected, or the full window size if title=window_title is selected.
+
+
+offset_x
+When capturing a region with video_size , set the distance from the left edge of the screen or desktop.
+
+Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned to the left of your primary monitor, you will need to use a negative offset_x value to move the region to that monitor.
+
+
+offset_y
+When capturing a region with video_size , set the distance from the top edge of the screen or desktop.
+
+Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned above your primary monitor, you will need to use a negative offset_y value to move the region to that monitor.
+
+
+
+
+
+
27.8 iec61883# TOC
+
+
FireWire DV/HDV input device using libiec61883.
+
+
To enable this input device, you need libiec61883, libraw1394 and
+libavc1394 installed on your system. Use the configure option
+--enable-libiec61883
to compile with the device enabled.
+
+
The iec61883 capture device supports capturing from a video device
+connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
+FireWire stack (juju). This is the default DV/HDV input method in Linux
+Kernel 2.6.37 and later, since the old FireWire stack was removed.
+
+
Specify the FireWire port to be used as input file, or "auto"
+to choose the first port connected.
+
+
+
27.8.1 Options# TOC
+
+
+dvtype
+Override autodetection of DV/HDV. This should only be used if auto
+detection does not work, or if usage of a different device type
+should be prohibited. Treating a DV device as HDV (or vice versa) will
+not work and result in undefined behavior.
+The values auto , dv and hdv are supported.
+
+
+dvbuffer
+Set maximum size of buffer for incoming data, in frames. For DV, this
+is an exact value. For HDV, it is not frame exact, since HDV does
+not have a fixed frame size.
+
+
+dvguid
+Select the capture device by specifying it’s GUID. Capturing will only
+be performed from the specified device and fails if no device with the
+given GUID is found. This is useful to select the input if multiple
+devices are connected at the same time.
+Look at /sys/bus/firewire/devices to find out the GUIDs.
+
+
+
+
+
+
27.8.2 Examples# TOC
+
+
+ Grab and show the input of a FireWire DV/HDV device.
+
+
ffplay -f iec61883 -i auto
+
+
+ Grab and record the input of a FireWire DV/HDV device,
+using a packet buffer of 100000 packets if the source is HDV.
+
+
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
+
+
+
+
+
+
27.9 jack# TOC
+
+
JACK input device.
+
+
To enable this input device during configuration you need libjack
+installed on your system.
+
+
A JACK input device creates one or more JACK writable clients, one for
+each audio channel, with name client_name :input_N , where
+client_name is the name provided by the application, and N
+is a number which identifies the channel.
+Each writable client will send the acquired data to the FFmpeg input
+device.
+
+
Once you have created one or more JACK readable clients, you need to
+connect them to one or more JACK writable clients.
+
+
To connect or disconnect JACK clients you can use the jack_connect
+and jack_disconnect
programs, or do it through a graphical interface,
+for example with qjackctl
.
+
+
To list the JACK clients and their properties you can invoke the command
+jack_lsp
.
+
+
Follows an example which shows how to capture a JACK readable client
+with ffmpeg
.
+
+
# Create a JACK writable client with name "ffmpeg".
+$ ffmpeg -f jack -i ffmpeg -y out.wav
+
+# Start the sample jack_metro readable client.
+$ jack_metro -b 120 -d 0.2 -f 4000
+
+# List the current JACK clients.
+$ jack_lsp -c
+system:capture_1
+system:capture_2
+system:playback_1
+system:playback_2
+ffmpeg:input_1
+metro:120_bpm
+
+# Connect metro to the ffmpeg writable client.
+$ jack_connect metro:120_bpm ffmpeg:input_1
+
+
+
For more information read:
+http://jackaudio.org/
+
+
+
27.10 lavfi# TOC
+
+
Libavfilter input virtual device.
+
+
This input device reads data from the open output pads of a libavfilter
+filtergraph.
+
+
For each filtergraph open output, the input device will create a
+corresponding stream which is mapped to the generated output. Currently
+only video data is supported. The filtergraph is specified through the
+option graph .
+
+
+
27.10.1 Options# TOC
+
+
+graph
+Specify the filtergraph to use as input. Each video open output must be
+labelled by a unique string of the form "outN ", where N is a
+number starting from 0 corresponding to the mapped input stream
+generated by the device.
+The first unlabelled output is automatically assigned to the "out0"
+label, but all the others need to be specified explicitly.
+
+The suffix "+subcc" can be appended to the output label to create an extra
+stream with the closed captions packets attached to that output
+(experimental; only for EIA-608 / CEA-708 for now).
+The subcc streams are created after all the normal streams, in the order of
+the corresponding stream.
+For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
+stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
+
+If not specified defaults to the filename specified for the input
+device.
+
+
+graph_file
+Set the filename of the filtergraph to be read and sent to the other
+filters. Syntax of the filtergraph is the same as the one specified by
+the option graph .
+
+
+
+
+
+
27.10.2 Examples# TOC
+
+
+ Create a color video stream and play it back with ffplay
:
+
+
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
+
+
+ As the previous example, but use filename for specifying the graph
+description, and omit the "out0" label:
+
+
ffplay -f lavfi color=c=pink
+
+
+ Create three different video test filtered sources and play them:
+
+
ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
+
+
+ Read an audio stream from a file using the amovie source and play it
+back with ffplay
:
+
+
ffplay -f lavfi "amovie=test.wav"
+
+
+ Read an audio stream and a video stream and play it back with
+ffplay
:
+
+
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
+
+
+ Dump decoded frames to images and closed captions to a file (experimental):
+
+
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
+
+
+
+
+
+
27.11 libcdio# TOC
+
+
Audio-CD input device based on cdio.
+
+
To enable this input device during configuration you need libcdio
+installed on your system. Requires the configure option
+--enable-libcdio
.
+
+
This device allows playing and grabbing from an Audio-CD.
+
+
For example to copy with ffmpeg
the entire Audio-CD in /dev/sr0,
+you may run the command:
+
+
ffmpeg -f libcdio -i /dev/sr0 cd.wav
+
+
+
+
27.12 libdc1394# TOC
+
+
IIDC1394 input device, based on libdc1394 and libraw1394.
+
+
Requires the configure option --enable-libdc1394
.
+
+
+
27.13 openal# TOC
+
+
The OpenAL input device provides audio capture on all systems with a
+working OpenAL 1.1 implementation.
+
+
To enable this input device during configuration, you need OpenAL
+headers and libraries installed on your system, and need to configure
+FFmpeg with --enable-openal
.
+
+
OpenAL headers and libraries should be provided as part of your OpenAL
+implementation, or as an additional download (an SDK). Depending on your
+installation you may need to specify additional flags via the
+--extra-cflags
and --extra-ldflags
for allowing the build
+system to locate the OpenAL headers and libraries.
+
+
An incomplete list of OpenAL implementations follows:
+
+
+Creative
+The official Windows implementation, providing hardware acceleration
+with supported devices and software fallback.
+See http://openal.org/ .
+
+OpenAL Soft
+Portable, open source (LGPL) software implementation. Includes
+backends for the most common sound APIs on the Windows, Linux,
+Solaris, and BSD operating systems.
+See http://kcat.strangesoft.net/openal.html .
+
+Apple
+OpenAL is part of Core Audio, the official Mac OS X Audio interface.
+See http://developer.apple.com/technologies/mac/audio-and-video.html
+
+
+
+
This device allows one to capture from an audio input device handled
+through OpenAL.
+
+
You need to specify the name of the device to capture in the provided
+filename. If the empty string is provided, the device will
+automatically select the default device. You can get the list of the
+supported devices by using the option list_devices .
+
+
+
27.13.1 Options# TOC
+
+
+channels
+Set the number of channels in the captured audio. Only the values
+1 (monaural) and 2 (stereo) are currently supported.
+Defaults to 2 .
+
+
+sample_size
+Set the sample size (in bits) of the captured audio. Only the values
+8 and 16 are currently supported. Defaults to
+16 .
+
+
+sample_rate
+Set the sample rate (in Hz) of the captured audio.
+Defaults to 44.1k .
+
+
+list_devices
+If set to true , print a list of devices and exit.
+Defaults to false .
+
+
+
+
+
+
27.13.2 Examples# TOC
+
+
Print the list of OpenAL supported devices and exit:
+
+
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
+
+
+
Capture from the OpenAL device DR-BT101 via PulseAudio :
+
+
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
+
+
+
Capture from the default device (note the empty string ” as filename):
+
+
$ ffmpeg -f openal -i '' out.ogg
+
+
+
Capture from two devices simultaneously, writing to two different files,
+within the same ffmpeg
command:
+
+
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
+
+
Note: not all OpenAL implementations support multiple simultaneous capture -
+try the latest OpenAL Soft if the above does not work.
+
+
+
27.14 oss# TOC
+
+
Open Sound System input device.
+
+
The filename to provide to the input device is the device node
+representing the OSS input device, and is usually set to
+/dev/dsp .
+
+
For example to grab from /dev/dsp using ffmpeg
use the
+command:
+
+
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
+
+
+
For more information about OSS see:
+http://manuals.opensound.com/usersguide/dsp.html
+
+
+
27.15 pulse# TOC
+
+
PulseAudio input device.
+
+
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
+
+
The filename to provide to the input device is a source device or the
+string "default"
+
+
To list the PulseAudio source devices and their properties you can invoke
+the command pactl list sources
.
+
+
More information about PulseAudio can be found on http://www.pulseaudio.org .
+
+
+
27.15.1 Options# TOC
+
+server
+Connect to a specific PulseAudio server, specified by an IP address.
+Default server is used when not provided.
+
+
+name
+Specify the application name PulseAudio will use when showing active clients,
+by default it is the LIBAVFORMAT_IDENT
string.
+
+
+stream_name
+Specify the stream name PulseAudio will use when showing active streams,
+by default it is "record".
+
+
+sample_rate
+Specify the samplerate in Hz, by default 48kHz is used.
+
+
+channels
+Specify the channels in use, by default 2 (stereo) is set.
+
+
+frame_size
+Specify the number of bytes per frame, by default it is set to 1024.
+
+
+fragment_size
+Specify the minimal buffering fragment in PulseAudio, it will affect the
+audio latency. By default it is unset.
+
+
+
+
+
27.15.2 Examples# TOC
+
Record a stream from default device:
+
+
ffmpeg -f pulse -i default /tmp/pulse.wav
+
+
+
+
27.16 qtkit# TOC
+
+
QTKit input device.
+
+
The filename passed as input is parsed to contain either a device name or index.
+The device index can also be given by using -video_device_index.
+A given device index will override any given device name.
+If the desired device consists of numbers only, use -video_device_index to identify it.
+The default device will be chosen if an empty string or the device name "default" is given.
+The available devices can be enumerated by using -list_devices.
+
+
+
ffmpeg -f qtkit -i "0" out.mpg
+
+
+
+
ffmpeg -f qtkit -video_device_index 0 -i "" out.mpg
+
+
+
+
ffmpeg -f qtkit -i "default" out.mpg
+
+
+
+
ffmpeg -f qtkit -list_devices true -i ""
+
+
+
+
27.17 sndio# TOC
+
+
sndio input device.
+
+
To enable this input device during configuration you need libsndio
+installed on your system.
+
+
The filename to provide to the input device is the device node
+representing the sndio input device, and is usually set to
+/dev/audio0 .
+
+
For example to grab from /dev/audio0 using ffmpeg
use the
+command:
+
+
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
+
+
+
+
27.18 video4linux2, v4l2# TOC
+
+
Video4Linux2 input video device.
+
+
"v4l2" can be used as alias for "video4linux2".
+
+
If FFmpeg is built with v4l-utils support (by using the
+--enable-libv4l2
configure option), it is possible to use it with the
+-use_libv4l2
input device option.
+
+
The name of the device to grab is a file device node, usually Linux
+systems tend to automatically create such nodes when the device
+(e.g. an USB webcam) is plugged into the system, and has a name of the
+kind /dev/videoN , where N is a number associated to
+the device.
+
+
Video4Linux2 devices usually support a limited set of
+width xheight sizes and frame rates. You can check which are
+supported using -list_formats all
for Video4Linux2 devices.
+Some devices, like TV cards, support one or more standards. It is possible
+to list all the supported standards using -list_standards all
.
+
+
The time base for the timestamps is 1 microsecond. Depending on the kernel
+version and configuration, the timestamps may be derived from the real time
+clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
+boot time, unaffected by NTP or manual changes to the clock). The
+-timestamps abs or -ts abs option can be used to force
+conversion into the real time clock.
+
+
Some usage examples of the video4linux2 device with ffmpeg
+and ffplay
:
+
+ Grab and show the input of a video4linux2 device:
+
+
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
+
+
+ Grab and record the input of a video4linux2 device, leave the
+frame rate and size as previously set:
+
+
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
+
+
+
+
For more information about Video4Linux, check http://linuxtv.org/ .
+
+
+
27.18.1 Options# TOC
+
+
+standard
+Set the standard. Must be the name of a supported standard. To get a
+list of the supported standards, use the list_standards
+option.
+
+
+channel
+Set the input channel number. Default to -1, which means using the
+previously selected channel.
+
+
+video_size
+Set the video frame size. The argument must be a string in the form
+WIDTH xHEIGHT or a valid size abbreviation.
+
+
+pixel_format
+Select the pixel format (only valid for raw video input).
+
+
+input_format
+Set the preferred pixel format (for raw video) or a codec name.
+This option allows one to select the input format, when several are
+available.
+
+
+framerate
+Set the preferred video frame rate.
+
+
+list_formats
+List available formats (supported pixel formats, codecs, and frame
+sizes) and exit.
+
+Available values are:
+
+‘all ’
+Show all available (compressed and non-compressed) formats.
+
+
+‘raw ’
+Show only raw video (non-compressed) formats.
+
+
+‘compressed ’
+Show only compressed formats.
+
+
+
+
+list_standards
+List supported standards and exit.
+
+Available values are:
+
+‘all ’
+Show all supported standards.
+
+
+
+
+timestamps, ts
+Set type of timestamps for grabbed frames.
+
+Available values are:
+
+‘default ’
+Use timestamps from the kernel.
+
+
+‘abs ’
+Use absolute timestamps (wall clock).
+
+
+‘mono2abs ’
+Force conversion from monotonic to absolute timestamps.
+
+
+
+Default value is default
.
+
+
+
+
+
27.19 vfwcap# TOC
+
+
VfW (Video for Windows) capture input device.
+
+
The filename passed as input is the capture driver number, ranging from
+0 to 9. You may use "list" as filename to print a list of drivers. Any
+other filename will be interpreted as device number 0.
+
+
+
27.20 x11grab# TOC
+
+
X11 video input device.
+
+
Depends on X11, Xext, and Xfixes. Requires the configure option
+--enable-x11grab
.
+
+
This device allows one to capture a region of an X11 display.
+
+
The filename passed as input has the syntax:
+
+
[hostname ]:display_number .screen_number [+x_offset ,y_offset ]
+
+
+
hostname :display_number .screen_number specifies the
+X11 display name of the screen to grab from. hostname can be
+omitted, and defaults to "localhost". The environment variable
+DISPLAY
contains the default display name.
+
+
x_offset and y_offset specify the offsets of the grabbed
+area with respect to the top-left border of the X11 screen. They
+default to 0.
+
+
Check the X11 documentation (e.g. man X) for more detailed information.
+
+
Use the dpyinfo
program for getting basic information about the
+properties of your X11 display (e.g. grep for "name" or "dimensions").
+
+
For example to grab from :0.0 using ffmpeg
:
+
+
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+
Grab at position 10,20
:
+
+
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+
+
+
27.20.1 Options# TOC
+
+
+draw_mouse
+Specify whether to draw the mouse pointer. A value of 0
specify
+not to draw the pointer. Default value is 1
.
+
+
+follow_mouse
+Make the grabbed area follow the mouse. The argument can be
+centered
or a number of pixels PIXELS .
+
+When it is specified with "centered", the grabbing region follows the mouse
+pointer and keeps the pointer at the center of region; otherwise, the region
+follows only when the mouse pointer reaches within PIXELS (greater than
+zero) to the edge of region.
+
+For example:
+
+
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+To follow only when the mouse pointer reaches within 100 pixels to edge:
+
+
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+
+framerate
+Set the grabbing frame rate. Default value is ntsc
,
+corresponding to a frame rate of 30000/1001
.
+
+
+show_region
+Show grabbed region on screen.
+
+If show_region is specified with 1
, then the grabbing
+region will be indicated on screen. With this option, it is easy to
+know what is being grabbed if only a portion of the screen is grabbed.
+
+For example:
+
+
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+
+With follow_mouse :
+
+
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+
+video_size
+Set the video frame size. Default value is vga
.
+
+
+use_shm
+Use the MIT-SHM extension for shared memory. Default value is 1
.
+It may be necessary to disable it for remote displays.
+
+
+
+
+
27.21 decklink# TOC
+
+
The decklink input device provides capture capabilities for Blackmagic
+DeckLink devices.
+
+
To enable this input device, you need the Blackmagic DeckLink SDK and you
+need to configure with the appropriate --extra-cflags
+and --extra-ldflags
.
+On Windows, you need to run the IDL files through widl
.
+
+
DeckLink is very picky about the formats it supports. Pixel format is always
+uyvy422, framerate and video size must be determined for your device with
+-list_formats 1
. Audio sample rate is always 48 kHz and the number
+of channels currently is limited to 2 (stereo).
+
+
+
27.21.1 Options# TOC
+
+
+list_devices
+If set to true , print a list of devices and exit.
+Defaults to false .
+
+
+list_formats
+If set to true , print a list of supported formats and exit.
+Defaults to false .
+
+
+
+
+
+
27.21.2 Examples# TOC
+
+
+ List input devices:
+
+
ffmpeg -f decklink -list_devices 1 -i dummy
+
+
+ List supported formats:
+
+
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
+
+
+ Capture video clip at 1080i50 (format 11):
+
+
ffmpeg -f decklink -i 'Intensity Pro@11' -acodec copy -vcodec copy output.avi
+
+
+
+
+
+
+
28 Output Devices# TOC
+
+
Output devices are configured elements in FFmpeg that can write
+multimedia data to an output device attached to your system.
+
+
When you configure your FFmpeg build, all the supported output devices
+are enabled by default. You can list all available ones using the
+configure option "–list-outdevs".
+
+
You can disable all the output devices using the configure option
+"–disable-outdevs", and selectively enable an output device using the
+option "–enable-outdev=OUTDEV ", or you can disable a particular
+input device using the option "–disable-outdev=OUTDEV ".
+
+
The option "-devices" of the ff* tools will display the list of
+enabled output devices.
+
+
A description of the currently available output devices follows.
+
+
+
28.1 alsa# TOC
+
+
ALSA (Advanced Linux Sound Architecture) output device.
+
+
+
28.1.1 Examples# TOC
+
+
+ Play a file on default ALSA device:
+
+
ffmpeg -i INPUT -f alsa default
+
+
+ Play a file on soundcard 1, audio device 7:
+
+
ffmpeg -i INPUT -f alsa hw:1,7
+
+
+
+
+
28.2 caca# TOC
+
+
CACA output device.
+
+
This output device allows one to show a video stream in CACA window.
+Only one CACA window is allowed per application, so you can
+have only one instance of this output device in an application.
+
+
To enable this output device you need to configure FFmpeg with
+--enable-libcaca
.
+libcaca is a graphics library that outputs text instead of pixels.
+
+
For more information about libcaca, check:
+http://caca.zoy.org/wiki/libcaca
+
+
+
28.2.1 Options# TOC
+
+
+window_title
+Set the CACA window title, if not specified default to the filename
+specified for the output device.
+
+
+window_size
+Set the CACA window size, can be a string of the form
+width xheight or a video size abbreviation.
+If not specified it defaults to the size of the input video.
+
+
+driver
+Set display driver.
+
+
+algorithm
+Set dithering algorithm. Dithering is necessary
+because the picture being rendered has usually far more colours than
+the available palette.
+The accepted values are listed with -list_dither algorithms
.
+
+
+antialias
+Set antialias method. Antialiasing smoothens the rendered
+image and avoids the commonly seen staircase effect.
+The accepted values are listed with -list_dither antialiases
.
+
+
+charset
+Set which characters are going to be used when rendering text.
+The accepted values are listed with -list_dither charsets
.
+
+
+color
+Set color to be used when rendering text.
+The accepted values are listed with -list_dither colors
.
+
+
+list_drivers
+If set to true , print a list of available drivers and exit.
+
+
+list_dither
+List available dither options related to the argument.
+The argument must be one of algorithms
, antialiases
,
+charsets
, colors
.
+
+
+
+
+
28.2.2 Examples# TOC
+
+
+ The following command shows the ffmpeg
output is an
+CACA window, forcing its size to 80x25:
+
+
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt rgb24 -window_size 80x25 -f caca -
+
+
+ Show the list of available drivers and exit:
+
+
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_drivers true -
+
+
+ Show the list of available dither colors and exit:
+
+
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_dither colors -
+
+
+
+
+
28.3 decklink# TOC
+
+
The decklink output device provides playback capabilities for Blackmagic
+DeckLink devices.
+
+
To enable this output device, you need the Blackmagic DeckLink SDK and you
+need to configure with the appropriate --extra-cflags
+and --extra-ldflags
.
+On Windows, you need to run the IDL files through widl
.
+
+
DeckLink is very picky about the formats it supports. Pixel format is always
+uyvy422, framerate and video size must be determined for your device with
+-list_formats 1
. Audio sample rate is always 48 kHz.
+
+
+
28.3.1 Options# TOC
+
+
+list_devices
+If set to true , print a list of devices and exit.
+Defaults to false .
+
+
+list_formats
+If set to true , print a list of supported formats and exit.
+Defaults to false .
+
+
+preroll
+Amount of time to preroll video in seconds.
+Defaults to 0.5 .
+
+
+
+
+
+
28.3.2 Examples# TOC
+
+
+ List output devices:
+
+
ffmpeg -i test.avi -f decklink -list_devices 1 dummy
+
+
+ List supported formats:
+
+
ffmpeg -i test.avi -f decklink -list_formats 1 'DeckLink Mini Monitor'
+
+
+ Play video clip:
+
+
ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 'DeckLink Mini Monitor'
+
+
+ Play video clip with non-standard framerate or video size:
+
+
ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 -s 720x486 -r 24000/1001 'DeckLink Mini Monitor'
+
+
+
+
+
+
28.4 fbdev# TOC
+
+
Linux framebuffer output device.
+
+
The Linux framebuffer is a graphic hardware-independent abstraction
+layer to show graphics on a computer monitor, typically on the
+console. It is accessed through a file device node, usually
+/dev/fb0 .
+
+
For more detailed information read the file
+Documentation/fb/framebuffer.txt included in the Linux source tree.
+
+
+
28.4.1 Options# TOC
+
+xoffset
+yoffset
+Set x/y coordinate of top left corner. Default is 0.
+
+
+
+
+
28.4.2 Examples# TOC
+
Play a file on framebuffer device /dev/fb0 .
+Required pixel format depends on current framebuffer settings.
+
+
ffmpeg -re -i INPUT -vcodec rawvideo -pix_fmt bgra -f fbdev /dev/fb0
+
+
+
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
+
+
+
28.5 opengl# TOC
+
OpenGL output device.
+
+
To enable this output device you need to configure FFmpeg with --enable-opengl
.
+
+
This output device allows one to render to OpenGL context.
+Context may be provided by application or default SDL window is created.
+
+
When device renders to external context, application must implement handlers for following messages:
+AV_DEV_TO_APP_CREATE_WINDOW_BUFFER
- create OpenGL context on current thread.
+AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER
- make OpenGL context current.
+AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER
- swap buffers.
+AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER
- destroy OpenGL context.
+Application is also required to inform a device about current resolution by sending AV_APP_TO_DEV_WINDOW_SIZE
message.
+
+
+
28.5.1 Options# TOC
+
+background
+Set background color. Black is a default.
+
+no_window
+Disables default SDL window when set to non-zero value.
+Application must provide OpenGL context and both window_size_cb
and window_swap_buffers_cb
callbacks when set.
+
+window_title
+Set the SDL window title, if not specified default to the filename specified for the output device.
+Ignored when no_window is set.
+
+window_size
+Set preferred window size, can be a string of the form widthxheight or a video size abbreviation.
+If not specified it defaults to the size of the input video, downscaled according to the aspect ratio.
+Mostly usable when no_window is not set.
+
+
+
+
+
+
28.5.2 Examples# TOC
+
Play a file on SDL window using OpenGL rendering:
+
+
ffmpeg -i INPUT -f opengl "window title"
+
+
+
+
28.6 oss# TOC
+
+
OSS (Open Sound System) output device.
+
+
+
28.7 pulse# TOC
+
+
PulseAudio output device.
+
+
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
+
+
More information about PulseAudio can be found on http://www.pulseaudio.org
+
+
+
28.7.1 Options# TOC
+
+server
+Connect to a specific PulseAudio server, specified by an IP address.
+Default server is used when not provided.
+
+
+name
+Specify the application name PulseAudio will use when showing active clients,
+by default it is the LIBAVFORMAT_IDENT
string.
+
+
+stream_name
+Specify the stream name PulseAudio will use when showing active streams,
+by default it is set to the specified output name.
+
+
+device
+Specify the device to use. Default device is used when not provided.
+List of output devices can be obtained with command pactl list sinks
.
+
+
+buffer_size
+buffer_duration
+Control the size and duration of the PulseAudio buffer. A small buffer
+gives more control, but requires more frequent updates.
+
+buffer_size specifies size in bytes while
+buffer_duration specifies duration in milliseconds.
+
+When both options are provided then the highest value is used
+(duration is recalculated to bytes using stream parameters). If they
+are set to 0 (which is default), the device will use the default
+PulseAudio duration value. By default PulseAudio set buffer duration
+to around 2 seconds.
+
+
+prebuf
+Specify pre-buffering size in bytes. The server does not start with
+playback before at least prebuf bytes are available in the
+buffer. By default this option is initialized to the same value as
+buffer_size or buffer_duration (whichever is bigger).
+
+
+minreq
+Specify minimum request size in bytes. The server does not request less
+than minreq bytes from the client, instead waits until the buffer
+is free enough to request more bytes at once. It is recommended to not set
+this option, which will initialize this to a value that is deemed sensible
+by the server.
+
+
+
+
+
+
28.7.2 Examples# TOC
+
Play a file on default device on default server:
+
+
ffmpeg -i INPUT -f pulse "stream name"
+
+
+
+
28.8 sdl# TOC
+
+
SDL (Simple DirectMedia Layer) output device.
+
+
This output device allows one to show a video stream in an SDL
+window. Only one SDL window is allowed per application, so you can
+have only one instance of this output device in an application.
+
+
To enable this output device you need libsdl installed on your system
+when configuring your build.
+
+
For more information about SDL, check:
+http://www.libsdl.org/
+
+
+
28.8.1 Options# TOC
+
+
+window_title
+Set the SDL window title, if not specified default to the filename
+specified for the output device.
+
+
+icon_title
+Set the name of the iconified SDL window, if not specified it is set
+to the same value of window_title .
+
+
+window_size
+Set the SDL window size, can be a string of the form
+width xheight or a video size abbreviation.
+If not specified it defaults to the size of the input video,
+downscaled according to the aspect ratio.
+
+
+window_fullscreen
+Set fullscreen mode when non-zero value is provided.
+Default value is zero.
+
+
+
+
+
28.8.2 Interactive commands# TOC
+
+
The window created by the device can be controlled through the
+following interactive commands.
+
+
+q, ESC
+Quit the device immediately.
+
+
+
+
+
28.8.3 Examples# TOC
+
+
The following command shows the ffmpeg
output is an
+SDL window, forcing its size to the qcif format:
+
+
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL output"
+
+
+
+
28.9 sndio# TOC
+
+
sndio audio output device.
+
+
+
28.10 xv# TOC
+
+
XV (XVideo) output device.
+
+
This output device allows one to show a video stream in a X Window System
+window.
+
+
+
28.10.1 Options# TOC
+
+
+display_name
+Specify the hardware display name, which determines the display and
+communications domain to be used.
+
+The display name or DISPLAY environment variable can be a string in
+the format hostname [:number [.screen_number ]].
+
+hostname specifies the name of the host machine on which the
+display is physically attached. number specifies the number of
+the display server on that host machine. screen_number specifies
+the screen to be used on that server.
+
+If unspecified, it defaults to the value of the DISPLAY environment
+variable.
+
+For example, dual-headed:0.1
would specify screen 1 of display
+0 on the machine named “dual-headed”.
+
+Check the X11 specification for more detailed information about the
+display name format.
+
+
+window_id
+When set to non-zero value then device doesn’t create new window,
+but uses existing one with provided window_id . By default
+this options is set to zero and device creates its own window.
+
+
+window_size
+Set the created window size, can be a string of the form
+width xheight or a video size abbreviation. If not
+specified it defaults to the size of the input video.
+Ignored when window_id is set.
+
+
+window_x
+window_y
+Set the X and Y window offsets for the created window. They are both
+set to 0 by default. The values may be ignored by the window manager.
+Ignored when window_id is set.
+
+
+window_title
+Set the window title, if not specified default to the filename
+specified for the output device. Ignored when window_id is set.
+
+
+
+
For more information about XVideo see http://www.x.org/ .
+
+
+
28.10.2 Examples# TOC
+
+
+ Decode, display and encode video input with ffmpeg
at the
+same time:
+
+
ffmpeg -i INPUT OUTPUT -f xv display
+
+
+ Decode and display the input video to multiple X11 windows:
+
+
ffmpeg -i INPUT -f xv normal -vf negate -f xv negated
+
+
+
+
+
29 Resampler Options# TOC
+
+
The audio resampler supports the following named options.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, option =value for the aresample filter,
+by setting the value explicitly in the
+SwrContext
options or using the libavutil/opt.h API for
+programmatic use.
+
+
+ich, in_channel_count
+Set the number of input channels. Default value is 0. Setting this
+value is not mandatory if the corresponding channel layout
+in_channel_layout is set.
+
+
+och, out_channel_count
+Set the number of output channels. Default value is 0. Setting this
+value is not mandatory if the corresponding channel layout
+out_channel_layout is set.
+
+
+uch, used_channel_count
+Set the number of used input channels. Default value is 0. This option is
+only used for special remapping.
+
+
+isr, in_sample_rate
+Set the input sample rate. Default value is 0.
+
+
+osr, out_sample_rate
+Set the output sample rate. Default value is 0.
+
+
+isf, in_sample_fmt
+Specify the input sample format. It is set by default to none
.
+
+
+osf, out_sample_fmt
+Specify the output sample format. It is set by default to none
.
+
+
+tsf, internal_sample_fmt
+Set the internal sample format. Default value is none
.
+This will automatically be chosen when it is not explicitly set.
+
+
+icl, in_channel_layout
+ocl, out_channel_layout
+Set the input/output channel layout.
+
+See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
+for the required syntax.
+
+
+clev, center_mix_level
+Set the center mix level. It is a value expressed in deciBel, and must be
+in the interval [-32,32].
+
+
+slev, surround_mix_level
+Set the surround mix level. It is a value expressed in deciBel, and must
+be in the interval [-32,32].
+
+
+lfe_mix_level
+Set LFE mix into non LFE level. It is used when there is a LFE input but no
+LFE output. It is a value expressed in deciBel, and must
+be in the interval [-32,32].
+
+
+rmvol, rematrix_volume
+Set rematrix volume. Default value is 1.0.
+
+
+rematrix_maxval
+Set maximum output value for rematrixing.
+This can be used to prevent clipping vs. preventing volumn reduction
+A value of 1.0 prevents cliping.
+
+
+flags, swr_flags
+Set flags used by the converter. Default value is 0.
+
+It supports the following individual flags:
+
+res
+force resampling, this flag forces resampling to be used even when the
+input and output sample rates match.
+
+
+
+
+dither_scale
+Set the dither scale. Default value is 1.
+
+
+dither_method
+Set dither method. Default value is 0.
+
+Supported values:
+
+‘rectangular ’
+select rectangular dither
+
+‘triangular ’
+select triangular dither
+
+‘triangular_hp ’
+select triangular dither with high pass
+
+‘lipshitz ’
+select lipshitz noise shaping dither
+
+‘shibata ’
+select shibata noise shaping dither
+
+‘low_shibata ’
+select low shibata noise shaping dither
+
+‘high_shibata ’
+select high shibata noise shaping dither
+
+‘f_weighted ’
+select f-weighted noise shaping dither
+
+‘modified_e_weighted ’
+select modified-e-weighted noise shaping dither
+
+‘improved_e_weighted ’
+select improved-e-weighted noise shaping dither
+
+
+
+
+
+resampler
+Set resampling engine. Default value is swr.
+
+Supported values:
+
+‘swr ’
+select the native SW Resampler; filter options precision and cheby are not
+applicable in this case.
+
+‘soxr ’
+select the SoX Resampler (where available); compensation, and filter options
+filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
+case.
+
+
+
+
+filter_size
+For swr only, set resampling filter size, default value is 32.
+
+
+phase_shift
+For swr only, set resampling phase shift, default value is 10, and must be in
+the interval [0,30].
+
+
+linear_interp
+Use Linear Interpolation if set to 1, default value is 0.
+
+
+cutoff
+Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
+value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
+(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
+
+
+precision
+For soxr only, the precision in bits to which the resampled signal will be
+calculated. The default value of 20 (which, with suitable dithering, is
+appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a
+value of 28 gives SoX’s ’Very High Quality’.
+
+
+cheby
+For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
+approximation for ’irrational’ ratios. Default value is 0.
+
+
+async
+For swr only, simple 1 parameter audio sync to timestamps using stretching,
+squeezing, filling and trimming. Setting this to 1 will enable filling and
+trimming, larger values represent the maximum amount in samples that the data
+may be stretched or squeezed for each second.
+Default value is 0, thus no compensation is applied to make the samples match
+the audio timestamps.
+
+
+first_pts
+For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
+This allows for padding/trimming at the start of stream. By default, no
+assumption is made about the first frame’s expected pts, so no padding or
+trimming is done. For example, this could be set to 0 to pad the beginning with
+silence if an audio stream starts after the video stream or to trim any samples
+with a negative pts due to encoder delay.
+
+
+min_comp
+For swr only, set the minimum difference between timestamps and audio data (in
+seconds) to trigger stretching/squeezing/filling or trimming of the
+data to make it match the timestamps. The default is that
+stretching/squeezing/filling and trimming is disabled
+(min_comp = FLT_MAX
).
+
+
+min_hard_comp
+For swr only, set the minimum difference between timestamps and audio data (in
+seconds) to trigger adding/dropping samples to make it match the
+timestamps. This option effectively is a threshold to select between
+hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
+all compensation is by default disabled through min_comp .
+The default is 0.1.
+
+
+comp_duration
+For swr only, set duration (in seconds) over which data is stretched/squeezed
+to make it match the timestamps. Must be a non-negative double float value,
+default value is 1.0.
+
+
+max_soft_comp
+For swr only, set maximum factor by which data is stretched/squeezed to make it
+match the timestamps. Must be a non-negative double float value, default value
+is 0.
+
+
+matrix_encoding
+Select matrixed stereo encoding.
+
+It accepts the following values:
+
+‘none ’
+select none
+
+‘dolby ’
+select Dolby
+
+‘dplii ’
+select Dolby Pro Logic II
+
+
+
+Default value is none
.
+
+
+filter_type
+For swr only, select resampling filter type. This only affects resampling
+operations.
+
+It accepts the following values:
+
+‘cubic ’
+select cubic
+
+‘blackman_nuttall ’
+select Blackman Nuttall Windowed Sinc
+
+‘kaiser ’
+select Kaiser Windowed Sinc
+
+
+
+
+kaiser_beta
+For swr only, set Kaiser Window Beta value. Must be an integer in the
+interval [2,16], default value is 9.
+
+
+output_sample_bits
+For swr only, set number of used output sample bits for dithering. Must be an integer in the
+interval [0,64], default value is 0, which means it’s not used.
+
+
+
+
+
+
30 Scaler Options# TOC
+
+
The video scaler supports the following named options.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools. For programmatic use, they can be set explicitly in the
+SwsContext
options or through the libavutil/opt.h API.
+
+
+
+
+sws_flags
+Set the scaler flags. This is also used to set the scaling
+algorithm. Only a single algorithm should be selected.
+
+It accepts the following values:
+
+‘fast_bilinear ’
+Select fast bilinear scaling algorithm.
+
+
+‘bilinear ’
+Select bilinear scaling algorithm.
+
+
+‘bicubic ’
+Select bicubic scaling algorithm.
+
+
+‘experimental ’
+Select experimental scaling algorithm.
+
+
+‘neighbor ’
+Select nearest neighbor rescaling algorithm.
+
+
+‘area ’
+Select averaging area rescaling algorithm.
+
+
+‘bicublin ’
+Select bicubic scaling algorithm for the luma component, bilinear for
+chroma components.
+
+
+‘gauss ’
+Select Gaussian rescaling algorithm.
+
+
+‘sinc ’
+Select sinc rescaling algorithm.
+
+
+‘lanczos ’
+Select lanczos rescaling algorithm.
+
+
+‘spline ’
+Select natural bicubic spline rescaling algorithm.
+
+
+‘print_info ’
+Enable printing/debug logging.
+
+
+‘accurate_rnd ’
+Enable accurate rounding.
+
+
+‘full_chroma_int ’
+Enable full chroma interpolation.
+
+
+‘full_chroma_inp ’
+Select full chroma input.
+
+
+‘bitexact ’
+Enable bitexact output.
+
+
+
+
+srcw
+Set source width.
+
+
+srch
+Set source height.
+
+
+dstw
+Set destination width.
+
+
+dsth
+Set destination height.
+
+
+src_format
+Set source pixel format (must be expressed as an integer).
+
+
+dst_format
+Set destination pixel format (must be expressed as an integer).
+
+
+src_range
+Select source range.
+
+
+dst_range
+Select destination range.
+
+
+param0, param1
+Set scaling algorithm parameters. The specified values are specific of
+some scaling algorithms and ignored by others. The specified values
+are floating point number values.
+
+
+sws_dither
+Set the dithering algorithm. Accepts one of the following
+values. Default value is ‘auto ’.
+
+
+‘auto ’
+automatic choice
+
+
+‘none ’
+no dithering
+
+
+‘bayer ’
+bayer dither
+
+
+‘ed ’
+error diffusion dither
+
+
+‘a_dither ’
+arithmetic dither, based using addition
+
+
+‘x_dither ’
+arithmetic dither, based using xor (more random/less apparent patterning that
+a_dither).
+
+
+
+
+
+
+
+
+
31 Filtering Introduction# TOC
+
+
Filtering in FFmpeg is enabled through the libavfilter library.
+
+
In libavfilter, a filter can have multiple inputs and multiple
+outputs.
+To illustrate the sorts of things that are possible, we consider the
+following filtergraph.
+
+
+
[main]
+input --> split ---------------------> overlay --> output
+ | ^
+ |[tmp] [flip]|
+ +-----> crop --> vflip -------+
+
+
+
This filtergraph splits the input stream in two streams, then sends one
+stream through the crop filter and the vflip filter, before merging it
+back with the other stream by overlaying it on top. You can use the
+following command to achieve this:
+
+
+
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
+
+
+
The result will be that the top half of the video is mirrored
+onto the bottom half of the output video.
+
+
Filters in the same linear chain are separated by commas, and distinct
+linear chains of filters are separated by semicolons. In our example,
+crop,vflip are in one linear chain, split and
+overlay are separately in another. The points where the linear
+chains join are labelled by names enclosed in square brackets. In the
+example, the split filter generates two outputs that are associated to
+the labels [main] and [tmp] .
+
+
The stream sent to the second output of split , labelled as
+[tmp] , is processed through the crop filter, which crops
+away the lower half part of the video, and then vertically flipped. The
+overlay filter takes in input the first unchanged output of the
+split filter (which was labelled as [main] ), and overlay on its
+lower half the output generated by the crop,vflip filterchain.
+
+
Some filters take in input a list of parameters: they are specified
+after the filter name and an equal sign, and are separated from each other
+by a colon.
+
+
There exist so-called source filters that do not have an
+audio/video input, and sink filters that will not have audio/video
+output.
+
+
+
+
32 graph2dot# TOC
+
+
The graph2dot program included in the FFmpeg tools
+directory can be used to parse a filtergraph description and issue a
+corresponding textual representation in the dot language.
+
+
Invoke the command:
+
+
+
to see how to use graph2dot .
+
+
You can then pass the dot description to the dot program (from
+the graphviz suite of programs) and obtain a graphical representation
+of the filtergraph.
+
+
For example the sequence of commands:
+
+
echo GRAPH_DESCRIPTION | \
+tools/graph2dot -o graph.tmp && \
+dot -Tpng graph.tmp -o graph.png && \
+display graph.png
+
+
+
can be used to create and display an image representing the graph
+described by the GRAPH_DESCRIPTION string. Note that this string must be
+a complete self-contained graph, with its inputs and outputs explicitly defined.
+For example if your command line is of the form:
+
+
ffmpeg -i infile -vf scale=640:360 outfile
+
+
your GRAPH_DESCRIPTION string will need to be of the form:
+
+
nullsrc,scale=640:360,nullsink
+
+
you may also need to set the nullsrc parameters and add a format
+filter in order to simulate a specific input file.
+
+
+
+
33 Filtergraph description# TOC
+
+
A filtergraph is a directed graph of connected filters. It can contain
+cycles, and there can be multiple links between a pair of
+filters. Each link has one input pad on one side connecting it to one
+filter from which it takes its input, and one output pad on the other
+side connecting it to one filter accepting its output.
+
+
Each filter in a filtergraph is an instance of a filter class
+registered in the application, which defines the features and the
+number of input and output pads of the filter.
+
+
A filter with no input pads is called a "source", and a filter with no
+output pads is called a "sink".
+
+
+
33.1 Filtergraph syntax# TOC
+
+
A filtergraph has a textual representation, which is
+recognized by the -filter /-vf and -filter_complex
+options in ffmpeg
and -vf in ffplay
, and by the
+avfilter_graph_parse()
/avfilter_graph_parse2()
functions defined in
+libavfilter/avfilter.h .
+
+
A filterchain consists of a sequence of connected filters, each one
+connected to the previous one in the sequence. A filterchain is
+represented by a list of ","-separated filter descriptions.
+
+
A filtergraph consists of a sequence of filterchains. A sequence of
+filterchains is represented by a list of ";"-separated filterchain
+descriptions.
+
+
A filter is represented by a string of the form:
+[in_link_1 ]...[in_link_N ]filter_name =arguments [out_link_1 ]...[out_link_M ]
+
+
filter_name is the name of the filter class of which the
+described filter is an instance of, and has to be the name of one of
+the filter classes registered in the program.
+The name of the filter class is optionally followed by a string
+"=arguments ".
+
+
arguments is a string which contains the parameters used to
+initialize the filter instance. It may have one of two forms:
+
+ A ’:’-separated list of key=value pairs.
+
+ A ’:’-separated list of value . In this case, the keys are assumed to be
+the option names in the order they are declared. E.g. the fade
filter
+declares three options in this order – type , start_frame and
+nb_frames . Then the parameter list in:0:30 means that the value
+in is assigned to the option type , 0 to
+start_frame and 30 to nb_frames .
+
+ A ’:’-separated list of mixed direct value and long key=value
+pairs. The direct value must precede the key=value pairs, and
+follow the same constraints order of the previous point. The following
+key=value pairs can be set in any preferred order.
+
+
+
+
If the option value itself is a list of items (e.g. the format
filter
+takes a list of pixel formats), the items in the list are usually separated by
+’|’.
+
+
The list of arguments can be quoted using the character "’" as initial
+and ending mark, and the character ’\’ for escaping the characters
+within the quoted text; otherwise the argument string is considered
+terminated when the next special character (belonging to the set
+"[]=;,") is encountered.
+
+
The name and arguments of the filter are optionally preceded and
+followed by a list of link labels.
+A link label allows one to name a link and associate it to a filter output
+or input pad. The preceding labels in_link_1
+... in_link_N , are associated to the filter input pads,
+the following labels out_link_1 ... out_link_M , are
+associated to the output pads.
+
+
When two link labels with the same name are found in the
+filtergraph, a link between the corresponding input and output pad is
+created.
+
+
If an output pad is not labelled, it is linked by default to the first
+unlabelled input pad of the next filter in the filterchain.
+For example in the filterchain
+
+
nullsrc, split[L1], [L2]overlay, nullsink
+
+
the split filter instance has two output pads, and the overlay filter
+instance two input pads. The first output pad of split is labelled
+"L1", the first input pad of overlay is labelled "L2", and the second
+output pad of split is linked to the second input pad of overlay,
+which are both unlabelled.
+
+
In a complete filterchain all the unlabelled filter input and output
+pads must be connected. A filtergraph is considered valid if all the
+filter input and output pads of all the filterchains are connected.
+
+
Libavfilter will automatically insert scale filters where format
+conversion is required. It is possible to specify swscale flags
+for those automatically inserted scalers by prepending
+sws_flags=flags ;
+to the filtergraph description.
+
+
Here is a BNF description of the filtergraph syntax:
+
+
NAME ::= sequence of alphanumeric characters and '_'
+LINKLABEL ::= "[" NAME "]"
+LINKLABELS ::= LINKLABEL [LINKLABELS ]
+FILTER_ARGUMENTS ::= sequence of chars (possibly quoted)
+FILTER ::= [LINKLABELS ] NAME ["=" FILTER_ARGUMENTS ] [LINKLABELS ]
+FILTERCHAIN ::= FILTER [,FILTERCHAIN ]
+FILTERGRAPH ::= [sws_flags=flags ;] FILTERCHAIN [;FILTERGRAPH ]
+
+
+
+
33.2 Notes on filtergraph escaping# TOC
+
+
Filtergraph description composition entails several levels of
+escaping. See (ffmpeg-utils)the "Quoting and escaping"
+section in the ffmpeg-utils(1) manual for more
+information about the employed escaping procedure.
+
+
A first level escaping affects the content of each filter option
+value, which may contain the special character :
used to
+separate values, or one of the escaping characters \'
.
+
+
A second level escaping affects the whole filter description, which
+may contain the escaping characters \'
or the special
+characters [],;
used by the filtergraph description.
+
+
Finally, when you specify a filtergraph on a shell commandline, you
+need to perform a third level escaping for the shell special
+characters contained within it.
+
+
For example, consider the following string to be embedded in
+the drawtext filter description text value:
+
+
this is a 'string': may contain one, or more, special characters
+
+
+
This string contains the '
special escaping character, and the
+:
special character, so it needs to be escaped in this way:
+
+
text=this is a \'string\'\: may contain one, or more, special characters
+
+
+
A second level of escaping is required when embedding the filter
+description in a filtergraph description, in order to escape all the
+filtergraph special characters. Thus the example above becomes:
+
+
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
+
+
(note that in addition to the \'
escaping special characters,
+also ,
needs to be escaped).
+
+
Finally an additional level of escaping is needed when writing the
+filtergraph description in a shell command, which depends on the
+escaping rules of the adopted shell. For example, assuming that
+\
is special and needs to be escaped with another \
, the
+previous string will finally result in:
+
+
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
+
+
+
+
34 Timeline editing# TOC
+
+
Some filters support a generic enable option. For the filters
+supporting timeline editing, this option can be set to an expression which is
+evaluated before sending a frame to the filter. If the evaluation is non-zero,
+the filter will be enabled, otherwise the frame will be sent unchanged to the
+next filter in the filtergraph.
+
+
The expression accepts the following values:
+
+‘t ’
+timestamp expressed in seconds, NAN if the input timestamp is unknown
+
+
+‘n ’
+sequential number of the input frame, starting from 0
+
+
+‘pos ’
+the position in the file of the input frame, NAN if unknown
+
+
+‘w ’
+‘h ’
+width and height of the input frame if video
+
+
+
+
Additionally, these filters support an enable command that can be used
+to re-define the expression.
+
+
Like any other filtering option, the enable option follows the same
+rules.
+
+
For example, to enable a blur filter (smartblur ) from 10 seconds to 3
+minutes, and a curves filter starting at 3 seconds:
+
+
smartblur = enable='between(t,10,3*60)',
+curves = enable='gte(t,3)' : preset=cross_process
+
+
+
+
+
35 Audio Filters# TOC
+
+
When you configure your FFmpeg build, you can disable any of the
+existing filters using --disable-filters
.
+The configure output will show the audio filters included in your
+build.
+
+
Below is a description of the currently available audio filters.
+
+
+
35.1 adelay# TOC
+
+
Delay one or more audio channels.
+
+
Samples in delayed channel are filled with silence.
+
+
The filter accepts the following option:
+
+
+delays
+Set list of delays in milliseconds for each channel separated by ’|’.
+At least one delay greater than 0 should be provided.
+Unused delays will be silently ignored. If number of given delays is
+smaller than number of channels all remaining channels will not be delayed.
+
+
+
+
+
35.1.1 Examples# TOC
+
+
+ Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave
+the second channel (and any other channels that may be present) unchanged.
+
+
+
+
+
35.2 aecho# TOC
+
+
Apply echoing to the input audio.
+
+
Echoes are reflected sound and can occur naturally amongst mountains
+(and sometimes large buildings) when talking or shouting; digital echo
+effects emulate this behaviour and are often used to help fill out the
+sound of a single instrument or vocal. The time difference between the
+original signal and the reflection is the delay
, and the
+loudness of the reflected signal is the decay
.
+Multiple echoes can have different delays and decays.
+
+
A description of the accepted parameters follows.
+
+
+in_gain
+Set input gain of reflected signal. Default is 0.6
.
+
+
+out_gain
+Set output gain of reflected signal. Default is 0.3
.
+
+
+delays
+Set list of time intervals in milliseconds between original signal and reflections
+separated by ’|’. Allowed range for each delay
is (0 - 90000.0]
.
+Default is 1000
.
+
+
+decays
+Set list of loudnesses of reflected signals separated by ’|’.
+Allowed range for each decay
is (0 - 1.0]
.
+Default is 0.5
.
+
+
+
+
+
35.2.1 Examples# TOC
+
+
+ Make it sound as if there are twice as many instruments as are actually playing:
+
+
+ If delay is very short, then it sound like a (metallic) robot playing music:
+
+
+ A longer delay will sound like an open air concert in the mountains:
+
+
aecho=0.8:0.9:1000:0.3
+
+
+ Same as above but with one more mountain:
+
+
aecho=0.8:0.9:1000|1800:0.3|0.25
+
+
+
+
+
35.3 aeval# TOC
+
+
Modify an audio signal according to the specified expressions.
+
+
This filter accepts one or more expressions (one for each channel),
+which are evaluated and used to modify a corresponding audio signal.
+
+
It accepts the following parameters:
+
+
+exprs
+Set the ’|’-separated expressions list for each separate channel. If
+the number of input channels is greater than the number of
+expressions, the last specified expression is used for the remaining
+output channels.
+
+
+channel_layout, c
+Set output channel layout. If not specified, the channel layout is
+specified by the number of expressions. If set to ‘same ’, it will
+use by default the same input channel layout.
+
+
+
+
Each expression in exprs can contain the following constants and functions:
+
+
+ch
+channel number of the current expression
+
+
+n
+number of the evaluated sample, starting from 0
+
+
+s
+sample rate
+
+
+t
+time of the evaluated sample expressed in seconds
+
+
+nb_in_channels
+nb_out_channels
+input and output number of channels
+
+
+val(CH)
+the value of input channel with number CH
+
+
+
+
Note: this filter is slow. For faster processing you should use a
+dedicated filter.
+
+
+
35.3.1 Examples# TOC
+
+
+ Half volume:
+
+
aeval=val(ch)/2:c=same
+
+
+ Invert phase of the second channel:
+
+
+
+
+
35.4 afade# TOC
+
+
Apply fade-in/out effect to input audio.
+
+
A description of the accepted parameters follows.
+
+
+type, t
+Specify the effect type, can be either in
for fade-in, or
+out
for a fade-out effect. Default is in
.
+
+
+start_sample, ss
+Specify the number of the start sample for starting to apply the fade
+effect. Default is 0.
+
+
+nb_samples, ns
+Specify the number of samples for which the fade effect has to last. At
+the end of the fade-in effect the output audio will have the same
+volume as the input audio, at the end of the fade-out transition
+the output audio will be silence. Default is 44100.
+
+
+start_time, st
+Specify the start time of the fade effect. Default is 0.
+The value must be specified as a time duration; see
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+If set this option is used instead of start_sample .
+
+
+duration, d
+Specify the duration of the fade effect. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+At the end of the fade-in effect the output audio will have the same
+volume as the input audio, at the end of the fade-out transition
+the output audio will be silence.
+By default the duration is determined by nb_samples .
+If set this option is used instead of nb_samples .
+
+
+curve
+Set curve for fade transition.
+
+It accepts the following values:
+
+tri
+select triangular, linear slope (default)
+
+qsin
+select quarter of sine wave
+
+hsin
+select half of sine wave
+
+esin
+select exponential sine wave
+
+log
+select logarithmic
+
+par
+select inverted parabola
+
+qua
+select quadratic
+
+cub
+select cubic
+
+squ
+select square root
+
+cbr
+select cubic root
+
+
+
+
+
+
+
35.4.1 Examples# TOC
+
+
+ Fade in first 15 seconds of audio:
+
+
+ Fade out last 25 seconds of a 900 seconds audio:
+
+
afade=t=out:st=875:d=25
+
+
+
+
+
35.5 aformat# TOC
+
+
Set output format constraints for the input audio. The framework will
+negotiate the most appropriate format to minimize conversions.
+
+
It accepts the following parameters:
+
+sample_fmts
+A ’|’-separated list of requested sample formats.
+
+
+sample_rates
+A ’|’-separated list of requested sample rates.
+
+
+channel_layouts
+A ’|’-separated list of requested channel layouts.
+
+See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
+for the required syntax.
+
+
+
+
If a parameter is omitted, all values are allowed.
+
+
Force the output to either unsigned 8-bit or signed 16-bit stereo
+
+
aformat=sample_fmts=u8|s16:channel_layouts=stereo
+
+
+
+
35.6 allpass# TOC
+
+
Apply a two-pole all-pass filter with central frequency (in Hz)
+frequency , and filter-width width .
+An all-pass filter changes the audio’s frequency to phase relationship
+without changing its frequency to amplitude relationship.
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set frequency in Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+
+
+
35.7 amerge# TOC
+
+
Merge two or more audio streams into a single multi-channel stream.
+
+
The filter accepts the following options:
+
+
+inputs
+Set the number of inputs. Default is 2.
+
+
+
+
+
If the channel layouts of the inputs are disjoint, and therefore compatible,
+the channel layout of the output will be set accordingly and the channels
+will be reordered as necessary. If the channel layouts of the inputs are not
+disjoint, the output will have all the channels of the first input then all
+the channels of the second input, in that order, and the channel layout of
+the output will be the default value corresponding to the total number of
+channels.
+
+
For example, if the first input is in 2.1 (FL+FR+LF) and the second input
+is FC+BL+BR, then the output will be in 5.1, with the channels in the
+following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the
+first input, b1 is the first channel of the second input).
+
+
On the other hand, if both input are in stereo, the output channels will be
+in the default order: a1, a2, b1, b2, and the channel layout will be
+arbitrarily set to 4.0, which may or may not be the expected value.
+
+
All inputs must have the same sample rate, and format.
+
+
If inputs do not have the same duration, the output will stop with the
+shortest.
+
+
+
35.7.1 Examples# TOC
+
+
+ Merge two mono files into a stereo stream:
+
+
amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
+
+
+ Multiple merges assuming 1 video stream and 6 audio streams in input.mkv :
+
+
ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
+
+
+
+
+
35.8 amix# TOC
+
+
Mixes multiple audio inputs into a single output.
+
+
Note that this filter only supports float samples (the amerge
+and pan audio filters support many formats). If the amix
+input has integer samples then aresample will be automatically
+inserted to perform the conversion to float samples.
+
+
For example
+
+
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
+
+
will mix 3 input audio streams to a single output with the same duration as the
+first input and a dropout transition time of 3 seconds.
+
+
It accepts the following parameters:
+
+inputs
+The number of inputs. If unspecified, it defaults to 2.
+
+
+duration
+How to determine the end-of-stream.
+
+longest
+The duration of the longest input. (default)
+
+
+shortest
+The duration of the shortest input.
+
+
+first
+The duration of the first input.
+
+
+
+
+
+dropout_transition
+The transition time, in seconds, for volume renormalization when an input
+stream ends. The default value is 2 seconds.
+
+
+
+
+
+
35.9 anull# TOC
+
+
Pass the audio source unchanged to the output.
+
+
+
35.10 apad# TOC
+
+
Pad the end of an audio stream with silence.
+
+
This can be used together with ffmpeg
-shortest to
+extend audio streams to the same length as the video stream.
+
+
A description of the accepted options follows.
+
+
+packet_size
+Set silence packet size. Default value is 4096.
+
+
+pad_len
+Set the number of samples of silence to add to the end. After the
+value is reached, the stream is terminated. This option is mutually
+exclusive with whole_len .
+
+
+whole_len
+Set the minimum total number of samples in the output audio stream. If
+the value is longer than the input audio length, silence is added to
+the end, until the value is reached. This option is mutually exclusive
+with pad_len .
+
+
+
+
If neither the pad_len nor the whole_len option is
+set, the filter will add silence to the end of the input stream
+indefinitely.
+
+
+
35.10.1 Examples# TOC
+
+
+ Add 1024 samples of silence to the end of the input:
+
+
+ Make sure the audio output will contain at least 10000 samples, pad
+the input with silence if required:
+
+
+ Use ffmpeg
to pad the audio input with silence, so that the
+video stream will always result the shortest and will be converted
+until the end in the output file when using the shortest
+option:
+
+
ffmpeg -i VIDEO -i AUDIO -filter_complex "[1:0]apad" -shortest OUTPUT
+
+
+
+
+
35.11 aphaser# TOC
+
Add a phasing effect to the input audio.
+
+
A phaser filter creates series of peaks and troughs in the frequency spectrum.
+The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect.
+
+
A description of the accepted parameters follows.
+
+
+in_gain
+Set input gain. Default is 0.4.
+
+
+out_gain
+Set output gain. Default is 0.74
+
+
+delay
+Set delay in milliseconds. Default is 3.0.
+
+
+decay
+Set decay. Default is 0.4.
+
+
+speed
+Set modulation speed in Hz. Default is 0.5.
+
+
+type
+Set modulation type. Default is triangular.
+
+It accepts the following values:
+
+‘triangular, t ’
+‘sinusoidal, s ’
+
+
+
+
+
+
35.12 aresample# TOC
+
+
Resample the input audio to the specified parameters, using the
+libswresample library. If none are specified then the filter will
+automatically convert between its input and output.
+
+
This filter is also able to stretch/squeeze the audio data to make it match
+the timestamps or to inject silence / cut out audio to make it match the
+timestamps, do a combination of both or do neither.
+
+
The filter accepts the syntax
+[sample_rate :]resampler_options , where sample_rate
+expresses a sample rate and resampler_options is a list of
+key =value pairs, separated by ":". See the
+ffmpeg-resampler manual for the complete list of supported options.
+
+
+
35.12.1 Examples# TOC
+
+
+ Resample the input audio to 44100Hz:
+
+
+ Stretch/squeeze samples to the given timestamps, with a maximum of 1000
+samples per second compensation:
+
+
+
+
+
35.13 asetnsamples# TOC
+
+
Set the number of samples per each output audio frame.
+
+
The last output packet may contain a different number of samples, as
+the filter will flush all the remaining samples when the input audio
+signal its end.
+
+
The filter accepts the following options:
+
+
+nb_out_samples, n
+Set the number of frames per each output audio frame. The number is
+intended as the number of samples per each channel .
+Default value is 1024.
+
+
+pad, p
+If set to 1, the filter will pad the last audio frame with zeroes, so
+that the last frame will contain the same number of samples as the
+previous ones. Default value is 1.
+
+
+
+
For example, to set the number of per-frame samples to 1234 and
+disable padding for the last frame, use:
+
+
asetnsamples=n=1234:p=0
+
+
+
+
35.14 asetrate# TOC
+
+
Set the sample rate without altering the PCM data.
+This will result in a change of speed and pitch.
+
+
The filter accepts the following options:
+
+
+sample_rate, r
+Set the output sample rate. Default is 44100 Hz.
+
+
+
+
+
35.15 ashowinfo# TOC
+
+
Show a line containing various information for each input audio frame.
+The input audio is not modified.
+
+
The shown line contains a sequence of key/value pairs of the form
+key :value .
+
+
The following values are shown in the output:
+
+
+n
+The (sequential) number of the input frame, starting from 0.
+
+
+pts
+The presentation timestamp of the input frame, in time base units; the time base
+depends on the filter input pad, and is usually 1/sample_rate .
+
+
+pts_time
+The presentation timestamp of the input frame in seconds.
+
+
+pos
+position of the frame in the input stream, -1 if this information in
+unavailable and/or meaningless (for example in case of synthetic audio)
+
+
+fmt
+The sample format.
+
+
+chlayout
+The channel layout.
+
+
+rate
+The sample rate for the audio frame.
+
+
+nb_samples
+The number of samples (per channel) in the frame.
+
+
+checksum
+The Adler-32 checksum (printed in hexadecimal) of the audio data. For planar
+audio, the data is treated as if all the planes were concatenated.
+
+
+plane_checksums
+A list of Adler-32 checksums for each data plane.
+
+
+
+
+
35.16 astats# TOC
+
+
Display time domain statistical information about the audio channels.
+Statistics are calculated and displayed for each audio channel and,
+where applicable, an overall figure is also given.
+
+
It accepts the following option:
+
+length
+Short window length in seconds, used for peak and trough RMS measurement.
+Default is 0.05
(50 milliseconds). Allowed range is [0.1 - 10]
.
+
+
+
+
A description of each shown parameter follows:
+
+
+DC offset
+Mean amplitude displacement from zero.
+
+
+Min level
+Minimal sample level.
+
+
+Max level
+Maximal sample level.
+
+
+Peak level dB
+RMS level dB
+Standard peak and RMS level measured in dBFS.
+
+
+RMS peak dB
+RMS trough dB
+Peak and trough values for RMS level measured over a short window.
+
+
+Crest factor
+Standard ratio of peak to RMS level (note: not in dB).
+
+
+Flat factor
+Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels
+(i.e. either Min level or Max level ).
+
+
+Peak count
+Number of occasions (not the number of samples) that the signal attained either
+Min level or Max level .
+
+
+
+
+
35.17 astreamsync# TOC
+
+
Forward two audio streams and control the order the buffers are forwarded.
+
+
The filter accepts the following options:
+
+
+expr, e
+Set the expression deciding which stream should be
+forwarded next: if the result is negative, the first stream is forwarded; if
+the result is positive or zero, the second stream is forwarded. It can use
+the following variables:
+
+
+b1 b2
+number of buffers forwarded so far on each stream
+
+s1 s2
+number of samples forwarded so far on each stream
+
+t1 t2
+current timestamp of each stream
+
+
+
+The default value is t1-t2
, which means to always forward the stream
+that has a smaller timestamp.
+
+
+
+
+
35.17.1 Examples# TOC
+
+
Stress-test amerge
by randomly sending buffers on the wrong
+input, while avoiding too much of a desynchronization:
+
+
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
+[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
+[a2] [b2] amerge
+
+
+
+
35.18 asyncts# TOC
+
+
Synchronize audio data with timestamps by squeezing/stretching it and/or
+dropping samples/adding silence when needed.
+
+
This filter is not built by default, please use aresample to do squeezing/stretching.
+
+
It accepts the following parameters:
+
+compensate
+Enable stretching/squeezing the data to make it match the timestamps. Disabled
+by default. When disabled, time gaps are covered with silence.
+
+
+min_delta
+The minimum difference between timestamps and audio data (in seconds) to trigger
+adding/dropping samples. The default value is 0.1. If you get an imperfect
+sync with this filter, try setting this parameter to 0.
+
+
+max_comp
+The maximum compensation in samples per second. Only relevant with compensate=1.
+The default value is 500.
+
+
+first_pts
+Assume that the first PTS should be this value. The time base is 1 / sample
+rate. This allows for padding/trimming at the start of the stream. By default,
+no assumption is made about the first frame’s expected PTS, so no padding or
+trimming is done. For example, this could be set to 0 to pad the beginning with
+silence if an audio stream starts after the video stream or to trim any samples
+with a negative PTS due to encoder delay.
+
+
+
+
+
+
35.19 atempo# TOC
+
+
Adjust audio tempo.
+
+
The filter accepts exactly one parameter, the audio tempo. If not
+specified then the filter will assume nominal 1.0 tempo. Tempo must
+be in the [0.5, 2.0] range.
+
+
+
35.19.1 Examples# TOC
+
+
+ Slow down audio to 80% tempo:
+
+
+ To speed up audio to 125% tempo:
+
+
+
+
+
35.20 atrim# TOC
+
+
Trim the input so that the output contains one continuous subpart of the input.
+
+
It accepts the following parameters:
+
+start
+Timestamp (in seconds) of the start of the section to keep. I.e. the audio
+sample with the timestamp start will be the first sample in the output.
+
+
+end
+Specify time of the first audio sample that will be dropped, i.e. the
+audio sample immediately preceding the one with the timestamp end will be
+the last sample in the output.
+
+
+start_pts
+Same as start , except this option sets the start timestamp in samples
+instead of seconds.
+
+
+end_pts
+Same as end , except this option sets the end timestamp in samples instead
+of seconds.
+
+
+duration
+The maximum duration of the output in seconds.
+
+
+start_sample
+The number of the first sample that should be output.
+
+
+end_sample
+The number of the first sample that should be dropped.
+
+
+
+
start , end , and duration are expressed as time
+duration specifications; see
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
+
+
Note that the first two sets of the start/end options and the duration
+option look at the frame timestamp, while the _sample options simply count the
+samples that pass through the filter. So start/end_pts and start/end_sample will
+give different results when the timestamps are wrong, inexact or do not start at
+zero. Also note that this filter does not modify the timestamps. If you wish
+to have the output timestamps start at zero, insert the asetpts filter after the
+atrim filter.
+
+
If multiple start or end options are set, this filter tries to be greedy and
+keep all samples that match at least one of the specified constraints. To keep
+only the part that matches all the constraints at once, chain multiple atrim
+filters.
+
+
The defaults are such that all the input is kept. So it is possible to set e.g.
+just the end values to keep everything before the specified time.
+
+
Examples:
+
+ Drop everything except the second minute of input:
+
+
ffmpeg -i INPUT -af atrim=60:120
+
+
+ Keep only the first 1000 samples:
+
+
ffmpeg -i INPUT -af atrim=end_sample=1000
+
+
+
+
+
+
35.21 bandpass# TOC
+
+
Apply a two-pole Butterworth band-pass filter with central
+frequency frequency , and (3dB-point) band-width width.
+The csg option selects a constant skirt gain (peak gain = Q)
+instead of the default: constant 0dB peak gain.
+The filter roll off at 6dB per octave (20dB per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the filter’s central frequency. Default is 3000
.
+
+
+csg
+Constant skirt gain if set to 1. Defaults to 0.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+
+
+
35.22 bandreject# TOC
+
+
Apply a two-pole Butterworth band-reject filter with central
+frequency frequency , and (3dB-point) band-width width .
+The filter roll off at 6dB per octave (20dB per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the filter’s central frequency. Default is 3000
.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+
+
+
35.23 bass# TOC
+
+
Boost or cut the bass (lower) frequencies of the audio using a two-pole
+shelving filter with a response similar to that of a standard
+hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
+
+
The filter accepts the following options:
+
+
+gain, g
+Give the gain at 0 Hz. Its useful range is about -20
+(for a large cut) to +20 (for a large boost).
+Beware of clipping when using a positive gain.
+
+
+frequency, f
+Set the filter’s central frequency and so can be used
+to extend or reduce the frequency range to be boosted or cut.
+The default value is 100
Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Determine how steep is the filter’s shelf transition.
+
+
+
+
+
35.24 biquad# TOC
+
+
Apply a biquad IIR filter with the given coefficients.
+Where b0 , b1 , b2 and a0 , a1 , a2
+are the numerator and denominator coefficients respectively.
+
+
+
35.25 bs2b# TOC
+
Bauer stereo to binaural transformation, which improves headphone listening of
+stereo audio records.
+
+
It accepts the following parameters:
+
+profile
+Pre-defined crossfeed level.
+
+default
+Default level (fcut=700, feed=50).
+
+
+cmoy
+Chu Moy circuit (fcut=700, feed=60).
+
+
+jmeier
+Jan Meier circuit (fcut=650, feed=95).
+
+
+
+
+
+fcut
+Cut frequency (in Hz).
+
+
+feed
+Feed level (in Hz).
+
+
+
+
+
+
35.26 channelmap# TOC
+
+
Remap input channels to new locations.
+
+
It accepts the following parameters:
+
+channel_layout
+The channel layout of the output stream.
+
+
+map
+Map channels from input to output. The argument is a ’|’-separated list of
+mappings, each in the in_channel -out_channel
or
+in_channel form. in_channel can be either the name of the input
+channel (e.g. FL for front left) or its index in the input channel layout.
+out_channel is the name of the output channel or its index in the output
+channel layout. If out_channel is not given then it is implicitly an
+index, starting with zero and increasing by one for each mapping.
+
+
+
+
If no mapping is present, the filter will implicitly map input channels to
+output channels, preserving indices.
+
+
For example, assuming a 5.1+downmix input MOV file,
+
+
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
+
+
will create an output WAV file tagged as stereo from the downmix channels of
+the input.
+
+
To fix a 5.1 WAV improperly encoded in AAC’s native channel order
+
+
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
+
+
+
+
35.27 channelsplit# TOC
+
+
Split each channel from an input audio stream into a separate output stream.
+
+
It accepts the following parameters:
+
+channel_layout
+The channel layout of the input stream. The default is "stereo".
+
+
+
+
For example, assuming a stereo input MP3 file,
+
+
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
+
+
will create an output Matroska file with two audio streams, one containing only
+the left channel and the other the right channel.
+
+
Split a 5.1 WAV file into per-channel files:
+
+
ffmpeg -i in.wav -filter_complex
+'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
+-map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
+front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
+side_right.wav
+
+
+
+
35.28 compand# TOC
+
Compress or expand the audio’s dynamic range.
+
+
It accepts the following parameters:
+
+
+attacks
+decays
+A list of times in seconds for each channel over which the instantaneous level
+of the input signal is averaged to determine its volume. attacks refers to
+increase of volume and decays refers to decrease of volume. For most
+situations, the attack time (response to the audio getting louder) should be
+shorter than the decay time, because the human ear is more sensitive to sudden
+loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and
+a typical value for decay is 0.8 seconds.
+
+
+points
+A list of points for the transfer function, specified in dB relative to the
+maximum possible signal amplitude. Each key points list must be defined using
+the following syntax: x0/y0|x1/y1|x2/y2|....
or
+x0/y0 x1/y1 x2/y2 ....
+
+The input values must be in strictly increasing order but the transfer function
+does not have to be monotonically rising. The point 0/0
is assumed but
+may be overridden (by 0/out-dBn
). Typical values for the transfer
+function are -70/-70|-60/-20
.
+
+
+soft-knee
+Set the curve radius in dB for all joints. It defaults to 0.01.
+
+
+gain
+Set the additional gain in dB to be applied at all points on the transfer
+function. This allows for easy adjustment of the overall gain.
+It defaults to 0.
+
+
+volume
+Set an initial volume, in dB, to be assumed for each channel when filtering
+starts. This permits the user to supply a nominal level initially, so that, for
+example, a very large gain is not applied to initial signal levels before the
+companding has begun to operate. A typical value for audio which is initially
+quiet is -90 dB. It defaults to 0.
+
+
+delay
+Set a delay, in seconds. The input audio is analyzed immediately, but audio is
+delayed before being fed to the volume adjuster. Specifying a delay
+approximately equal to the attack/decay times allows the filter to effectively
+operate in predictive rather than reactive mode. It defaults to 0.
+
+
+
+
+
+
35.28.1 Examples# TOC
+
+
+ Make music with both quiet and loud passages suitable for listening to in a
+noisy environment:
+
+
compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
+
+
+ A noise gate for when the noise is at a lower level than the signal:
+
+
compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
+
+
+ Here is another noise gate, this time for when the noise is at a higher level
+than the signal (making it, in some ways, similar to squelch):
+
+
compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
+
+
+
+
+
35.29 earwax# TOC
+
+
Make audio easier to listen to on headphones.
+
+
This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio
+so that when listened to on headphones the stereo image is moved from
+inside your head (standard for headphones) to outside and in front of
+the listener (standard for speakers).
+
+
Ported from SoX.
+
+
+
35.30 equalizer# TOC
+
+
Apply a two-pole peaking equalisation (EQ) filter. With this
+filter, the signal-level at and around a selected frequency can
+be increased or decreased, whilst (unlike bandpass and bandreject
+filters) that at all other frequencies is unchanged.
+
+
In order to produce complex equalisation curves, this filter can
+be given several times, each with a different central frequency.
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the filter’s central frequency in Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+gain, g
+Set the required gain or attenuation in dB.
+Beware of clipping when using a positive gain.
+
+
+
+
+
35.30.1 Examples# TOC
+
+ Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz:
+
+
equalizer=f=1000:width_type=h:width=200:g=-10
+
+
+ Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2:
+
+
equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
+
+
+
+
+
35.31 flanger# TOC
+
Apply a flanging effect to the audio.
+
+
The filter accepts the following options:
+
+
+delay
+Set base delay in milliseconds. Range from 0 to 30. Default value is 0.
+
+
+depth
+Set added swep delay in milliseconds. Range from 0 to 10. Default value is 2.
+
+
+regen
+Set percentage regeneration (delayed signal feedback). Range from -95 to 95.
+Default value is 0.
+
+
+width
+Set percentage of delayed signal mixed with original. Range from 0 to 100.
+Default value is 71.
+
+
+speed
+Set sweeps per second (Hz). Range from 0.1 to 10. Default value is 0.5.
+
+
+shape
+Set swept wave shape, can be triangular or sinusoidal .
+Default value is sinusoidal .
+
+
+phase
+Set swept wave percentage-shift for multi channel. Range from 0 to 100.
+Default value is 25.
+
+
+interp
+Set delay-line interpolation, linear or quadratic .
+Default is linear .
+
+
+
+
+
35.32 highpass# TOC
+
+
Apply a high-pass filter with 3dB point frequency.
+The filter can be either single-pole, or double-pole (the default).
+The filter roll off at 6dB per pole per octave (20dB per pole per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set frequency in Hz. Default is 3000.
+
+
+poles, p
+Set number of poles. Default is 2.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+Applies only to double-pole filter.
+The default is 0.707q and gives a Butterworth response.
+
+
+
+
+
35.33 join# TOC
+
+
Join multiple input streams into one multi-channel stream.
+
+
It accepts the following parameters:
+
+inputs
+The number of input streams. It defaults to 2.
+
+
+channel_layout
+The desired output channel layout. It defaults to stereo.
+
+
+map
+Map channels from inputs to output. The argument is a ’|’-separated list of
+mappings, each in the input_idx .in_channel -out_channel
+form. input_idx is the 0-based index of the input stream. in_channel
+can be either the name of the input channel (e.g. FL for front left) or its
+index in the specified input stream. out_channel is the name of the output
+channel.
+
+
+
+
The filter will attempt to guess the mappings when they are not specified
+explicitly. It does so by first trying to find an unused matching input channel
+and if that fails it picks the first unused input channel.
+
+
Join 3 inputs (with properly set channel layouts):
+
+
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
+
+
+
Build a 5.1 output from 6 single-channel streams:
+
+
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
+'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
+out
+
+
+
+
35.34 ladspa# TOC
+
+
Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-ladspa
.
+
+
+file, f
+Specifies the name of LADSPA plugin library to load. If the environment
+variable LADSPA_PATH
is defined, the LADSPA plugin is searched in
+each one of the directories specified by the colon separated list in
+LADSPA_PATH
, otherwise in the standard LADSPA paths, which are in
+this order: HOME/.ladspa/lib/ , /usr/local/lib/ladspa/ ,
+/usr/lib/ladspa/ .
+
+
+plugin, p
+Specifies the plugin within the library. Some libraries contain only
+one plugin, but others contain many of them. If this is not set filter
+will list all available plugins within the specified library.
+
+
+controls, c
+Set the ’|’ separated list of controls which are zero or more floating point
+values that determine the behavior of the loaded plugin (for example delay,
+threshold or gain).
+Controls need to be defined using the following syntax:
+c0=value0 |c1=value1 |c2=value2 |..., where
+valuei is the value set on the i -th control.
+If controls is set to help
, all available controls and
+their valid ranges are printed.
+
+
+sample_rate, s
+Specify the sample rate, default to 44100. Only used if plugin have
+zero inputs.
+
+
+nb_samples, n
+Set the number of samples per channel per each output frame, default
+is 1024. Only used if plugin have zero inputs.
+
+
+duration, d
+Set the minimum duration of the sourced audio. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+Note that the resulting duration may be greater than the specified duration,
+as the generated audio is always cut at the end of a complete frame.
+If not specified, or the expressed duration is negative, the audio is
+supposed to be generated forever.
+Only used if plugin have zero inputs.
+
+
+
+
+
+
35.34.1 Examples# TOC
+
+
+ List all available plugins within amp (LADSPA example plugin) library:
+
+
+ List all available controls and their valid ranges for vcf_notch
+plugin from VCF
library:
+
+
ladspa=f=vcf:p=vcf_notch:c=help
+
+
+ Simulate low quality audio equipment using Computer Music Toolkit
(CMT)
+plugin library:
+
+
ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
+
+
+ Add reverberation to the audio using TAP-plugins
+(Tom’s Audio Processing plugins):
+
+
ladspa=file=tap_reverb:tap_reverb
+
+
+ Generate white noise, with 0.2 amplitude:
+
+
ladspa=file=cmt:noise_source_white:c=c0=.2
+
+
+ Generate 20 bpm clicks using plugin C* Click - Metronome
from the
+C* Audio Plugin Suite
(CAPS) library:
+
+
ladspa=file=caps:Click:c=c1=20'
+
+
+ Apply C* Eq10X2 - Stereo 10-band equaliser
effect:
+
+
ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
+
+
+
+
+
35.34.2 Commands# TOC
+
+
This filter supports the following commands:
+
+cN
+Modify the N -th control value.
+
+If the specified value is not valid, it is ignored and prior one is kept.
+
+
+
+
+
35.35 lowpass# TOC
+
+
Apply a low-pass filter with 3dB point frequency.
+The filter can be either single-pole or double-pole (the default).
+The filter roll off at 6dB per pole per octave (20dB per pole per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set frequency in Hz. Default is 500.
+
+
+poles, p
+Set number of poles. Default is 2.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+Applies only to double-pole filter.
+The default is 0.707q and gives a Butterworth response.
+
+
+
+
+
35.36 pan# TOC
+
+
Mix channels with specific gain levels. The filter accepts the output
+channel layout followed by a set of channels definitions.
+
+
This filter is also designed to efficiently remap the channels of an audio
+stream.
+
+
The filter accepts parameters of the form:
+"l |outdef |outdef |..."
+
+
+l
+output channel layout or number of channels
+
+
+outdef
+output channel specification, of the form:
+"out_name =[gain *]in_name [+[gain *]in_name ...]"
+
+
+out_name
+output channel to define, either a channel name (FL, FR, etc.) or a channel
+number (c0, c1, etc.)
+
+
+gain
+multiplicative coefficient for the channel, 1 leaving the volume unchanged
+
+
+in_name
+input channel to use, see out_name for details; it is not possible to mix
+named and numbered input channels
+
+
+
+
If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for
+that specification will be renormalized so that the total is 1, thus
+avoiding clipping noise.
+
+
+
35.36.1 Mixing examples# TOC
+
+
For example, if you want to down-mix from stereo to mono, but with a bigger
+factor for the left channel:
+
+
pan=1c|c0=0.9*c0+0.1*c1
+
+
+
A customized down-mix to stereo that works automatically for 3-, 4-, 5- and
+7-channels surround:
+
+
pan=stereo| FL < FL + 0.5*FC + 0.6*BL + 0.6*SL | FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
+
+
+
Note that ffmpeg
integrates a default down-mix (and up-mix) system
+that should be preferred (see "-ac" option) unless you have very specific
+needs.
+
+
+
35.36.2 Remapping examples# TOC
+
+
The channel remapping will be effective if, and only if:
+
+
+ gain coefficients are zeroes or ones,
+ only one input per channel output,
+
+
+
If all these conditions are satisfied, the filter will notify the user ("Pure
+channel mapping detected"), and use an optimized and lossless method to do the
+remapping.
+
+
For example, if you have a 5.1 source and want a stereo audio stream by
+dropping the extra channels:
+
+
pan="stereo| c0=FL | c1=FR"
+
+
+
Given the same source, you can also switch front left and front right channels
+and keep the input channel layout:
+
+
pan="5.1| c0=c1 | c1=c0 | c2=c2 | c3=c3 | c4=c4 | c5=c5"
+
+
+
If the input is a stereo audio stream, you can mute the front left channel (and
+still keep the stereo channel layout) with:
+
+
+
Still with a stereo audio stream input, you can copy the right channel in both
+front left and right:
+
+
pan="stereo| c0=FR | c1=FR"
+
+
+
+
35.37 replaygain# TOC
+
+
ReplayGain scanner filter. This filter takes an audio stream as an input and
+outputs it unchanged.
+At end of filtering it displays track_gain
and track_peak
.
+
+
+
35.38 resample# TOC
+
+
Convert the audio sample format, sample rate and channel layout. It is
+not meant to be used directly.
+
+
+
35.39 silencedetect# TOC
+
+
Detect silence in an audio stream.
+
+
This filter logs a message when it detects that the input audio volume is less
+or equal to a noise tolerance value for a duration greater or equal to the
+minimum detected noise duration.
+
+
The printed times and duration are expressed in seconds.
+
+
The filter accepts the following options:
+
+
+duration, d
+Set silence duration until notification (default is 2 seconds).
+
+
+noise, n
+Set noise tolerance. Can be specified in dB (in case "dB" is appended to the
+specified value) or amplitude ratio. Default is -60dB, or 0.001.
+
+
+
+
+
35.39.1 Examples# TOC
+
+
+ Detect 5 seconds of silence with -50dB noise tolerance:
+
+
silencedetect=n=-50dB:d=5
+
+
+ Complete example with ffmpeg
to detect silence with 0.0001 noise
+tolerance in silence.mp3 :
+
+
ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
+
+
+
+
+
35.40 silenceremove# TOC
+
+
Remove silence from the beginning, middle or end of the audio.
+
+
The filter accepts the following options:
+
+
+start_periods
+This value is used to indicate if audio should be trimmed at beginning of
+the audio. A value of zero indicates no silence should be trimmed from the
+beginning. When specifying a non-zero value, it trims audio up until it
+finds non-silence. Normally, when trimming silence from beginning of audio
+the start_periods will be 1
but it can be increased to higher
+values to trim all audio up to specific count of non-silence periods.
+Default value is 0
.
+
+
+start_duration
+Specify the amount of time that non-silence must be detected before it stops
+trimming audio. By increasing the duration, bursts of noises can be treated
+as silence and trimmed off. Default value is 0
.
+
+
+start_threshold
+This indicates what sample value should be treated as silence. For digital
+audio, a value of 0
may be fine but for audio recorded from analog,
+you may wish to increase the value to account for background noise.
+Can be specified in dB (in case "dB" is appended to the specified value)
+or amplitude ratio. Default value is 0
.
+
+
+stop_periods
+Set the count for trimming silence from the end of audio.
+To remove silence from the middle of a file, specify a stop_periods
+that is negative. This value is then treated as a positive value and is
+used to indicate the effect should restart processing as specified by
+start_periods , making it suitable for removing periods of silence
+in the middle of the audio.
+Default value is 0
.
+
+
+stop_duration
+Specify a duration of silence that must exist before audio is not copied any
+more. By specifying a higher duration, silence that is wanted can be left in
+the audio.
+Default value is 0
.
+
+
+stop_threshold
+This is the same as start_threshold but for trimming silence from
+the end of audio.
+Can be specified in dB (in case "dB" is appended to the specified value)
+or amplitude ratio. Default value is 0
.
+
+
+leave_silence
+This indicate that stop_duration length of audio should be left intact
+at the beginning of each period of silence.
+For example, if you want to remove long pauses between words but do not want
+to remove the pauses completely. Default value is 0
.
+
+
+
+
+
+
35.40.1 Examples# TOC
+
+
+ The following example shows how this filter can be used to start a recording
+that does not contain the delay at the start which usually occurs between
+pressing the record button and the start of the performance:
+
+
silenceremove=1:5:0.02
+
+
+
+
+
35.41 treble# TOC
+
+
Boost or cut treble (upper) frequencies of the audio using a two-pole
+shelving filter with a response similar to that of a standard
+hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
+
+
The filter accepts the following options:
+
+
+gain, g
+Give the gain at whichever is the lower of ~22 kHz and the
+Nyquist frequency. Its useful range is about -20 (for a large cut)
+to +20 (for a large boost). Beware of clipping when using a positive gain.
+
+
+frequency, f
+Set the filter’s central frequency and so can be used
+to extend or reduce the frequency range to be boosted or cut.
+The default value is 3000
Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Determine how steep is the filter’s shelf transition.
+
+
+
+
+
35.42 volume# TOC
+
+
Adjust the input audio volume.
+
+
It accepts the following parameters:
+
+volume
+Set audio volume expression.
+
+Output values are clipped to the maximum value.
+
+The output audio volume is given by the relation:
+
+
output_volume = volume * input_volume
+
+
+The default value for volume is "1.0".
+
+
+precision
+This parameter represents the mathematical precision.
+
+It determines which input sample formats will be allowed, which affects the
+precision of the volume scaling.
+
+
+fixed
+8-bit fixed-point; this limits input sample format to U8, S16, and S32.
+
+float
+32-bit floating-point; this limits input sample format to FLT. (default)
+
+double
+64-bit floating-point; this limits input sample format to DBL.
+
+
+
+
+replaygain
+Choose the behaviour on encountering ReplayGain side data in input frames.
+
+
+drop
+Remove ReplayGain side data, ignoring its contents (the default).
+
+
+ignore
+Ignore ReplayGain side data, but leave it in the frame.
+
+
+track
+Prefer the track gain, if present.
+
+
+album
+Prefer the album gain, if present.
+
+
+
+
+replaygain_preamp
+Pre-amplification gain in dB to apply to the selected replaygain gain.
+
+Default value for replaygain_preamp is 0.0.
+
+
+eval
+Set when the volume expression is evaluated.
+
+It accepts the following values:
+
+‘once ’
+only evaluate expression once during the filter initialization, or
+when the ‘volume ’ command is sent
+
+
+‘frame ’
+evaluate expression for each incoming frame
+
+
+
+Default value is ‘once ’.
+
+
+
+
The volume expression can contain the following parameters.
+
+
+n
+frame number (starting at zero)
+
+nb_channels
+number of channels
+
+nb_consumed_samples
+number of samples consumed by the filter
+
+nb_samples
+number of samples in the current frame
+
+pos
+original frame position in the file
+
+pts
+frame PTS
+
+sample_rate
+sample rate
+
+startpts
+PTS at start of stream
+
+startt
+time at start of stream
+
+t
+frame time
+
+tb
+timestamp timebase
+
+volume
+last set volume value
+
+
+
+
Note that when eval is set to ‘once ’ only the
+sample_rate and tb variables are available, all other
+variables will evaluate to NAN.
+
+
+
35.42.1 Commands# TOC
+
+
This filter supports the following commands:
+
+volume
+Modify the volume expression.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+replaygain_noclip
+Prevent clipping by limiting the gain applied.
+
+Default value for replaygain_noclip is 1.
+
+
+
+
+
+
35.42.2 Examples# TOC
+
+
+
+
+
35.43 volumedetect# TOC
+
+
Detect the volume of the input video.
+
+
The filter has no parameters. The input is not modified. Statistics about
+the volume will be printed in the log when the input stream end is reached.
+
+
In particular it will show the mean volume (root mean square), maximum
+volume (on a per-sample basis), and the beginning of a histogram of the
+registered volume values (from the maximum value to a cumulated 1/1000 of
+the samples).
+
+
All volumes are in decibels relative to the maximum PCM value.
+
+
+
35.43.1 Examples# TOC
+
+
Here is an excerpt of the output:
+
+
[Parsed_volumedetect_0 0xa23120] mean_volume: -27 dB
+[Parsed_volumedetect_0 0xa23120] max_volume: -4 dB
+[Parsed_volumedetect_0 0xa23120] histogram_4db: 6
+[Parsed_volumedetect_0 0xa23120] histogram_5db: 62
+[Parsed_volumedetect_0 0xa23120] histogram_6db: 286
+[Parsed_volumedetect_0 0xa23120] histogram_7db: 1042
+[Parsed_volumedetect_0 0xa23120] histogram_8db: 2551
+[Parsed_volumedetect_0 0xa23120] histogram_9db: 4609
+[Parsed_volumedetect_0 0xa23120] histogram_10db: 8409
+
+
+
It means that:
+
+ The mean square energy is approximately -27 dB, or 10^-2.7.
+ The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB.
+ There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc.
+
+
+
In other words, raising the volume by +4 dB does not cause any clipping,
+raising it by +5 dB causes clipping for 6 samples, etc.
+
+
+
+
36 Audio Sources# TOC
+
+
Below is a description of the currently available audio sources.
+
+
+
36.1 abuffer# TOC
+
+
Buffer audio frames, and make them available to the filter chain.
+
+
This source is mainly intended for a programmatic use, in particular
+through the interface defined in libavfilter/asrc_abuffer.h .
+
+
It accepts the following parameters:
+
+time_base
+The timebase which will be used for timestamps of submitted frames. It must be
+either a floating-point number or in numerator /denominator form.
+
+
+sample_rate
+The sample rate of the incoming audio buffers.
+
+
+sample_fmt
+The sample format of the incoming audio buffers.
+Either a sample format name or its corresponding integer representation from
+the enum AVSampleFormat in libavutil/samplefmt.h
+
+
+channel_layout
+The channel layout of the incoming audio buffers.
+Either a channel layout name from channel_layout_map in
+libavutil/channel_layout.c or its corresponding integer representation
+from the AV_CH_LAYOUT_* macros in libavutil/channel_layout.h
+
+
+channels
+The number of channels of the incoming audio buffers.
+If both channels and channel_layout are specified, then they
+must be consistent.
+
+
+
+
+
+
36.1.1 Examples# TOC
+
+
+
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
+
+
+
will instruct the source to accept planar 16bit signed stereo at 44100Hz.
+Since the sample format with name "s16p" corresponds to the number
+6 and the "stereo" channel layout corresponds to the value 0x3, this is
+equivalent to:
+
+
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
+
+
+
+
36.2 aevalsrc# TOC
+
+
Generate an audio signal specified by an expression.
+
+
This source accepts in input one or more expressions (one for each
+channel), which are evaluated and used to generate a corresponding
+audio signal.
+
+
This source accepts the following options:
+
+
+exprs
+Set the ’|’-separated expressions list for each separate channel. In case the
+channel_layout option is not specified, the selected channel layout
+depends on the number of provided expressions. Otherwise the last
+specified expression is applied to the remaining output channels.
+
+
+channel_layout, c
+Set the channel layout. The number of channels in the specified layout
+must be equal to the number of specified expressions.
+
+
+duration, d
+Set the minimum duration of the sourced audio. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+Note that the resulting duration may be greater than the specified
+duration, as the generated audio is always cut at the end of a
+complete frame.
+
+If not specified, or the expressed duration is negative, the audio is
+supposed to be generated forever.
+
+
+nb_samples, n
+Set the number of samples per channel per each output frame,
+default to 1024.
+
+
+sample_rate, s
+Specify the sample rate, default to 44100.
+
+
+
+
Each expression in exprs can contain the following constants:
+
+
+n
+number of the evaluated sample, starting from 0
+
+
+t
+time of the evaluated sample expressed in seconds, starting from 0
+
+
+s
+sample rate
+
+
+
+
+
+
36.2.1 Examples# TOC
+
+
+ Generate silence:
+
+
+ Generate a sin signal with frequency of 440 Hz, set sample rate to
+8000 Hz:
+
+
aevalsrc="sin(440*2*PI*t):s=8000"
+
+
+ Generate a two channels signal, specify the channel layout (Front
+Center + Back Center) explicitly:
+
+
aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
+
+
+ Generate white noise:
+
+
aevalsrc="-2+random(0)"
+
+
+ Generate an amplitude modulated signal:
+
+
aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
+
+
+ Generate 2.5 Hz binaural beats on a 360 Hz carrier:
+
+
aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
+
+
+
+
+
+
36.3 anullsrc# TOC
+
+
The null audio source, return unprocessed audio frames. It is mainly useful
+as a template and to be employed in analysis / debugging tools, or as
+the source for filters which ignore the input data (for example the sox
+synth filter).
+
+
This source accepts the following options:
+
+
+channel_layout, cl
+
+Specifies the channel layout, and can be either an integer or a string
+representing a channel layout. The default value of channel_layout
+is "stereo".
+
+Check the channel_layout_map definition in
+libavutil/channel_layout.c for the mapping between strings and
+channel layout values.
+
+
+sample_rate, r
+Specifies the sample rate, and defaults to 44100.
+
+
+nb_samples, n
+Set the number of samples per requested frames.
+
+
+
+
+
+
36.3.1 Examples# TOC
+
+
+ Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO.
+
+
+ Do the same operation with a more obvious syntax:
+
+
anullsrc=r=48000:cl=mono
+
+
+
+
All the parameters need to be explicitly defined.
+
+
+
36.4 flite# TOC
+
+
Synthesize a voice utterance using the libflite library.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libflite
.
+
+
Note that the flite library is not thread-safe.
+
+
The filter accepts the following options:
+
+
+list_voices
+If set to 1, list the names of the available voices and exit
+immediately. Default value is 0.
+
+
+nb_samples, n
+Set the maximum number of samples per frame. Default value is 512.
+
+
+textfile
+Set the filename containing the text to speak.
+
+
+text
+Set the text to speak.
+
+
+voice, v
+Set the voice to use for the speech synthesis. Default value is
+kal
. See also the list_voices option.
+
+
+
+
+
36.4.1 Examples# TOC
+
+
+ Read from file speech.txt , and synthesize the text using the
+standard flite voice:
+
+
flite=textfile=speech.txt
+
+
+ Read the specified text selecting the slt
voice:
+
+
flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
+
+
+ Input text to ffmpeg:
+
+
ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
+
+
+ Make ffplay speak the specified text, using flite
and
+the lavfi
device:
+
+
ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
+
+
+
+
For more information about libflite, check:
+http://www.speech.cs.cmu.edu/flite/
+
+
+
36.5 sine# TOC
+
+
Generate an audio signal made of a sine wave with amplitude 1/8.
+
+
The audio signal is bit-exact.
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the carrier frequency. Default is 440 Hz.
+
+
+beep_factor, b
+Enable a periodic beep every second with frequency beep_factor times
+the carrier frequency. Default is 0, meaning the beep is disabled.
+
+
+sample_rate, r
+Specify the sample rate, default is 44100.
+
+
+duration, d
+Specify the duration of the generated audio stream.
+
+
+samples_per_frame
+Set the number of samples per output frame, default is 1024.
+
+
+
+
+
36.5.1 Examples# TOC
+
+
+ Generate a simple 440 Hz sine wave:
+
+
+ Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds:
+
+
sine=220:4:d=5
+sine=f=220:b=4:d=5
+sine=frequency=220:beep_factor=4:duration=5
+
+
+
+
+
+
+
37 Audio Sinks# TOC
+
+
Below is a description of the currently available audio sinks.
+
+
+
37.1 abuffersink# TOC
+
+
Buffer audio frames, and make them available to the end of filter chain.
+
+
This sink is mainly intended for programmatic use, in particular
+through the interface defined in libavfilter/buffersink.h
+or the options system.
+
+
It accepts a pointer to an AVABufferSinkContext structure, which
+defines the incoming buffers’ formats, to be passed as the opaque
+parameter to avfilter_init_filter
for initialization.
+
+
37.2 anullsink# TOC
+
+
Null audio sink; do absolutely nothing with the input audio. It is
+mainly useful as a template and for use in analysis / debugging
+tools.
+
+
+
+
38 Video Filters# TOC
+
+
When you configure your FFmpeg build, you can disable any of the
+existing filters using --disable-filters
.
+The configure output will show the video filters included in your
+build.
+
+
Below is a description of the currently available video filters.
+
+
+
38.1 alphaextract# TOC
+
+
Extract the alpha component from the input as a grayscale video. This
+is especially useful with the alphamerge filter.
+
+
+
38.2 alphamerge# TOC
+
+
Add or replace the alpha component of the primary input with the
+grayscale value of a second input. This is intended for use with
+alphaextract to allow the transmission or storage of frame
+sequences that have alpha in a format that doesn’t support an alpha
+channel.
+
+
For example, to reconstruct full frames from a normal YUV-encoded video
+and a separate video created with alphaextract , you might use:
+
+
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
+
+
+
Since this filter is designed for reconstruction, it operates on frame
+sequences without considering timestamps, and terminates when either
+input reaches end of stream. This will cause problems if your encoding
+pipeline drops frames. If you’re trying to apply an image as an
+overlay to a video stream, consider the overlay filter instead.
+
+
+
38.3 ass# TOC
+
+
Same as the subtitles filter, except that it doesn’t require libavcodec
+and libavformat to work. On the other hand, it is limited to ASS (Advanced
+Substation Alpha) subtitles files.
+
+
This filter accepts the following option in addition to the common options from
+the subtitles filter:
+
+
+shaping
+Set the shaping engine
+
+Available values are:
+
+‘auto ’
+The default libass shaping engine, which is the best available.
+
+‘simple ’
+Fast, font-agnostic shaper that can do only substitutions
+
+‘complex ’
+Slower shaper using OpenType for substitutions and positioning
+
+
+
+The default is auto
.
+
+
+
+
+
38.4 bbox# TOC
+
+
Compute the bounding box for the non-black pixels in the input frame
+luminance plane.
+
+
This filter computes the bounding box containing all the pixels with a
+luminance value greater than the minimum allowed value.
+The parameters describing the bounding box are printed on the filter
+log.
+
+
The filter accepts the following option:
+
+
+min_val
+Set the minimal luminance value. Default is 16
.
+
+
+
+
+
38.5 blackdetect# TOC
+
+
Detect video intervals that are (almost) completely black. Can be
+useful to detect chapter transitions, commercials, or invalid
+recordings. Output lines contains the time for the start, end and
+duration of the detected black interval expressed in seconds.
+
+
In order to display the output lines, you need to set the loglevel at
+least to the AV_LOG_INFO value.
+
+
The filter accepts the following options:
+
+
+black_min_duration, d
+Set the minimum detected black duration expressed in seconds. It must
+be a non-negative floating point number.
+
+Default value is 2.0.
+
+
+picture_black_ratio_th, pic_th
+Set the threshold for considering a picture "black".
+Express the minimum value for the ratio:
+
+
nb_black_pixels / nb_pixels
+
+
+for which a picture is considered black.
+Default value is 0.98.
+
+
+pixel_black_th, pix_th
+Set the threshold for considering a pixel "black".
+
+The threshold expresses the maximum pixel luminance value for which a
+pixel is considered "black". The provided value is scaled according to
+the following equation:
+
+
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
+
+
+luminance_range_size and luminance_minimum_value depend on
+the input video format, the range is [0-255] for YUV full-range
+formats and [16-235] for YUV non full-range formats.
+
+Default value is 0.10.
+
+
+
+
The following example sets the maximum pixel threshold to the minimum
+value, and detects only black intervals of 2 or more seconds:
+
+
blackdetect=d=2:pix_th=0.00
+
+
+
+
38.6 blackframe# TOC
+
+
Detect frames that are (almost) completely black. Can be useful to
+detect chapter transitions or commercials. Output lines consist of
+the frame number of the detected frame, the percentage of blackness,
+the position in the file if known or -1 and the timestamp in seconds.
+
+
In order to display the output lines, you need to set the loglevel at
+least to the AV_LOG_INFO value.
+
+
It accepts the following parameters:
+
+
+amount
+The percentage of the pixels that have to be below the threshold; it defaults to
+98
.
+
+
+threshold, thresh
+The threshold below which a pixel value is considered black; it defaults to
+32
.
+
+
+
+
+
+
38.7 blend, tblend# TOC
+
+
Blend two video frames into each other.
+
+
The blend
filter takes two input streams and outputs one
+stream, the first input is the "top" layer and second input is
+"bottom" layer. Output terminates when shortest input terminates.
+
+
The tblend
(time blend) filter takes two consecutive frames
+from one single stream, and outputs the result obtained by blending
+the new frame on top of the old frame.
+
+
A description of the accepted options follows.
+
+
+c0_mode
+c1_mode
+c2_mode
+c3_mode
+all_mode
+Set blend mode for specific pixel component or all pixel components in case
+of all_mode . Default value is normal
.
+
+Available values for component modes are:
+
+‘addition ’
+‘and ’
+‘average ’
+‘burn ’
+‘darken ’
+‘difference ’
+‘difference128 ’
+‘divide ’
+‘dodge ’
+‘exclusion ’
+‘hardlight ’
+‘lighten ’
+‘multiply ’
+‘negation ’
+‘normal ’
+‘or ’
+‘overlay ’
+‘phoenix ’
+‘pinlight ’
+‘reflect ’
+‘screen ’
+‘softlight ’
+‘subtract ’
+‘vividlight ’
+‘xor ’
+
+
+
+c0_opacity
+c1_opacity
+c2_opacity
+c3_opacity
+all_opacity
+Set blend opacity for specific pixel component or all pixel components in case
+of all_opacity . Only used in combination with pixel component blend modes.
+
+
+c0_expr
+c1_expr
+c2_expr
+c3_expr
+all_expr
+Set blend expression for specific pixel component or all pixel components in case
+of all_expr . Note that related mode options will be ignored if those are set.
+
+The expressions can use the following variables:
+
+
+N
+The sequential number of the filtered frame, starting from 0
.
+
+
+X
+Y
+the coordinates of the current sample
+
+
+W
+H
+the width and height of currently filtered plane
+
+
+SW
+SH
+Width and height scale depending on the currently filtered plane. It is the
+ratio between the corresponding luma plane number of pixels and the current
+plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
+0.5,0.5
for chroma planes.
+
+
+T
+Time of the current frame, expressed in seconds.
+
+
+TOP, A
+Value of pixel component at current location for first video frame (top layer).
+
+
+BOTTOM, B
+Value of pixel component at current location for second video frame (bottom layer).
+
+
+
+
+shortest
+Force termination when the shortest input terminates. Default is
+0
. This option is only defined for the blend
filter.
+
+
+repeatlast
+Continue applying the last bottom frame after the end of the stream. A value of
+0
disable the filter after the last frame of the bottom layer is reached.
+Default is 1
. This option is only defined for the blend
filter.
+
+
+
+
+
38.7.1 Examples# TOC
+
+
+ Apply transition from bottom layer to top layer in first 10 seconds:
+
+
blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
+
+
+ Apply 1x1 checkerboard effect:
+
+
blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
+
+
+ Apply uncover left effect:
+
+
blend=all_expr='if(gte(N*SW+X,W),A,B)'
+
+
+ Apply uncover down effect:
+
+
blend=all_expr='if(gte(Y-N*SH,0),A,B)'
+
+
+ Apply uncover up-left effect:
+
+
blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
+
+
+ Display differences between the current and the previous frame:
+
+
tblend=all_mode=difference128
+
+
+
+
+
38.8 boxblur# TOC
+
+
Apply a boxblur algorithm to the input video.
+
+
It accepts the following parameters:
+
+
+luma_radius, lr
+luma_power, lp
+chroma_radius, cr
+chroma_power, cp
+alpha_radius, ar
+alpha_power, ap
+
+
+
A description of the accepted options follows.
+
+
+luma_radius, lr
+chroma_radius, cr
+alpha_radius, ar
+Set an expression for the box radius in pixels used for blurring the
+corresponding input plane.
+
+The radius value must be a non-negative number, and must not be
+greater than the value of the expression min(w,h)/2
for the
+luma and alpha planes, and of min(cw,ch)/2
for the chroma
+planes.
+
+Default value for luma_radius is "2". If not specified,
+chroma_radius and alpha_radius default to the
+corresponding value set for luma_radius .
+
+The expressions can contain the following constants:
+
+w
+h
+The input width and height in pixels.
+
+
+cw
+ch
+The input chroma image width and height in pixels.
+
+
+hsub
+vsub
+The horizontal and vertical chroma subsample values. For example, for the
+pixel format "yuv422p", hsub is 2 and vsub is 1.
+
+
+
+
+luma_power, lp
+chroma_power, cp
+alpha_power, ap
+Specify how many times the boxblur filter is applied to the
+corresponding plane.
+
+Default value for luma_power is 2. If not specified,
+chroma_power and alpha_power default to the
+corresponding value set for luma_power .
+
+A value of 0 will disable the effect.
+
+
+
+
+
38.8.1 Examples# TOC
+
+
+ Apply a boxblur filter with the luma, chroma, and alpha radii
+set to 2:
+
+
boxblur=luma_radius=2:luma_power=1
+boxblur=2:1
+
+
+ Set the luma radius to 2, and alpha and chroma radius to 0:
+
+
+ Set the luma and chroma radii to a fraction of the video dimension:
+
+
boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
+
+
+
+
+
38.9 codecview# TOC
+
+
Visualize information exported by some codecs.
+
+
Some codecs can export information through frames using side-data or other
+means. For example, some MPEG based codecs export motion vectors through the
+export_mvs flag in the codec flags2 option.
+
+
The filter accepts the following option:
+
+
+mv
+Set motion vectors to visualize.
+
+Available flags for mv are:
+
+
+‘pf ’
+forward predicted MVs of P-frames
+
+‘bf ’
+forward predicted MVs of B-frames
+
+‘bb ’
+backward predicted MVs of B-frames
+
+
+
+
+
+
+
38.9.1 Examples# TOC
+
+
+ Visualizes multi-directionals MVs from P and B-Frames using ffplay
:
+
+
ffplay -flags2 +export_mvs input.mpg -vf codecview=mv=pf+bf+bb
+
+
+
+
+
38.10 colorbalance# TOC
+
Modify intensity of primary colors (red, green and blue) of input frames.
+
+
The filter allows an input frame to be adjusted in the shadows, midtones or highlights
+regions for the red-cyan, green-magenta or blue-yellow balance.
+
+
A positive adjustment value shifts the balance towards the primary color, a negative
+value towards the complementary color.
+
+
The filter accepts the following options:
+
+
+rs
+gs
+bs
+Adjust red, green and blue shadows (darkest pixels).
+
+
+rm
+gm
+bm
+Adjust red, green and blue midtones (medium pixels).
+
+
+rh
+gh
+bh
+Adjust red, green and blue highlights (brightest pixels).
+
+Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
+
+
+
+
+
38.10.1 Examples# TOC
+
+
+ Add red color cast to shadows:
+
+
+
+
+
38.11 colorlevels# TOC
+
+
Adjust video input frames using levels.
+
+
The filter accepts the following options:
+
+
+rimin
+gimin
+bimin
+aimin
+Adjust red, green, blue and alpha input black point.
+Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
+
+
+rimax
+gimax
+bimax
+aimax
+Adjust red, green, blue and alpha input white point.
+Allowed ranges for options are [-1.0, 1.0]
. Defaults are 1
.
+
+Input levels are used to lighten highlights (bright tones), darken shadows
+(dark tones), change the balance of bright and dark tones.
+
+
+romin
+gomin
+bomin
+aomin
+Adjust red, green, blue and alpha output black point.
+Allowed ranges for options are [0, 1.0]
. Defaults are 0
.
+
+
+romax
+gomax
+bomax
+aomax
+Adjust red, green, blue and alpha output white point.
+Allowed ranges for options are [0, 1.0]
. Defaults are 1
.
+
+Output levels allows manual selection of a constrained output level range.
+
+
+
+
+
38.11.1 Examples# TOC
+
+
+ Make video output darker:
+
+
colorlevels=rimin=0.058:gimin=0.058:bimin=0.058
+
+
+ Increase contrast:
+
+
colorlevels=rimin=0.039:gimin=0.039:bimin=0.039:rimax=0.96:gimax=0.96:bimax=0.96
+
+
+ Make video output lighter:
+
+
colorlevels=rimax=0.902:gimax=0.902:bimax=0.902
+
+
+ Increase brightness:
+
+
colorlevels=romin=0.5:gomin=0.5:bomin=0.5
+
+
+
+
+
38.12 colorchannelmixer# TOC
+
+
Adjust video input frames by re-mixing color channels.
+
+
This filter modifies a color channel by adding the values associated to
+the other channels of the same pixels. For example if the value to
+modify is red, the output value will be:
+
+
red =red *rr + blue *rb + green *rg + alpha *ra
+
+
+
The filter accepts the following options:
+
+
+rr
+rg
+rb
+ra
+Adjust contribution of input red, green, blue and alpha channels for output red channel.
+Default is 1
for rr , and 0
for rg , rb and ra .
+
+
+gr
+gg
+gb
+ga
+Adjust contribution of input red, green, blue and alpha channels for output green channel.
+Default is 1
for gg , and 0
for gr , gb and ga .
+
+
+br
+bg
+bb
+ba
+Adjust contribution of input red, green, blue and alpha channels for output blue channel.
+Default is 1
for bb , and 0
for br , bg and ba .
+
+
+ar
+ag
+ab
+aa
+Adjust contribution of input red, green, blue and alpha channels for output alpha channel.
+Default is 1
for aa , and 0
for ar , ag and ab .
+
+Allowed ranges for options are [-2.0, 2.0]
.
+
+
+
+
+
38.12.1 Examples# TOC
+
+
+ Convert source to grayscale:
+
+
colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
+
+ Simulate sepia tones:
+
+
colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
+
+
+
+
+
38.13 colormatrix# TOC
+
+
Convert color matrix.
+
+
The filter accepts the following options:
+
+
+src
+dst
+Specify the source and destination color matrix. Both values must be
+specified.
+
+The accepted values are:
+
+‘bt709 ’
+BT.709
+
+
+‘bt601 ’
+BT.601
+
+
+‘smpte240m ’
+SMPTE-240M
+
+
+‘fcc ’
+FCC
+
+
+
+
+
+
For example to convert from BT.601 to SMPTE-240M, use the command:
+
+
colormatrix=bt601:smpte240m
+
+
+
+
38.14 copy# TOC
+
+
Copy the input source unchanged to the output. This is mainly useful for
+testing purposes.
+
+
+
38.15 crop# TOC
+
+
Crop the input video to given dimensions.
+
+
It accepts the following parameters:
+
+
+w, out_w
+The width of the output video. It defaults to iw
.
+This expression is evaluated only once during the filter
+configuration.
+
+
+h, out_h
+The height of the output video. It defaults to ih
.
+This expression is evaluated only once during the filter
+configuration.
+
+
+x
+The horizontal position, in the input video, of the left edge of the output
+video. It defaults to (in_w-out_w)/2
.
+This expression is evaluated per-frame.
+
+
+y
+The vertical position, in the input video, of the top edge of the output video.
+It defaults to (in_h-out_h)/2
.
+This expression is evaluated per-frame.
+
+
+keep_aspect
+If set to 1 will force the output display aspect ratio
+to be the same of the input, by changing the output sample aspect
+ratio. It defaults to 0.
+
+
+
+
The out_w , out_h , x , y parameters are
+expressions containing the following constants:
+
+
+x
+y
+The computed values for x and y . They are evaluated for
+each new frame.
+
+
+in_w
+in_h
+The input width and height.
+
+
+iw
+ih
+These are the same as in_w and in_h .
+
+
+out_w
+out_h
+The output (cropped) width and height.
+
+
+ow
+oh
+These are the same as out_w and out_h .
+
+
+a
+same as iw / ih
+
+
+sar
+input sample aspect ratio
+
+
+dar
+input display aspect ratio, it is the same as (iw / ih ) * sar
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+n
+The number of the input frame, starting from 0.
+
+
+pos
+the position in the file of the input frame, NAN if unknown
+
+
+t
+The timestamp expressed in seconds. It’s NAN if the input timestamp is unknown.
+
+
+
+
+
The expression for out_w may depend on the value of out_h ,
+and the expression for out_h may depend on out_w , but they
+cannot depend on x and y , as x and y are
+evaluated after out_w and out_h .
+
+
The x and y parameters specify the expressions for the
+position of the top-left corner of the output (non-cropped) area. They
+are evaluated for each frame. If the evaluated value is not valid, it
+is approximated to the nearest valid value.
+
+
The expression for x may depend on y , and the expression
+for y may depend on x .
+
+
+
38.15.1 Examples# TOC
+
+
+
+
+
38.16 cropdetect# TOC
+
+
Auto-detect the crop size.
+
+
It calculates the necessary cropping parameters and prints the
+recommended parameters via the logging system. The detected dimensions
+correspond to the non-black area of the input video.
+
+
It accepts the following parameters:
+
+
+limit
+Set higher black value threshold, which can be optionally specified
+from nothing (0) to everything (255 for 8bit based formats). An intensity
+value greater to the set value is considered non-black. It defaults to 24.
+You can also specify a value between 0.0 and 1.0 which will be scaled depending
+on the bitdepth of the pixel format.
+
+
+round
+The value which the width/height should be divisible by. It defaults to
+16. The offset is automatically adjusted to center the video. Use 2 to
+get only even dimensions (needed for 4:2:2 video). 16 is best when
+encoding to most video codecs.
+
+
+reset_count, reset
+Set the counter that determines after how many frames cropdetect will
+reset the previously detected largest video area and start over to
+detect the current optimal crop area. Default value is 0.
+
+This can be useful when channel logos distort the video area. 0
+indicates ’never reset’, and returns the largest area encountered during
+playback.
+
+
+
+
+
38.17 curves# TOC
+
+
Apply color adjustments using curves.
+
+
This filter is similar to the Adobe Photoshop and GIMP curves tools. Each
+component (red, green and blue) has its values defined by N key points
+tied from each other using a smooth curve. The x-axis represents the pixel
+values from the input frame, and the y-axis the new pixel values to be set for
+the output frame.
+
+
By default, a component curve is defined by the two points (0;0) and
+(1;1) . This creates a straight line where each original pixel value is
+"adjusted" to its own value, which means no change to the image.
+
+
The filter allows you to redefine these two points and add some more. A new
+curve (using a natural cubic spline interpolation) will be define to pass
+smoothly through all these new coordinates. The new defined points needs to be
+strictly increasing over the x-axis, and their x and y values must
+be in the [0;1] interval. If the computed curves happened to go outside
+the vector spaces, the values will be clipped accordingly.
+
+
If there is no key point defined in x=0
, the filter will automatically
+insert a (0;0) point. In the same way, if there is no key point defined
+in x=1
, the filter will automatically insert a (1;1) point.
+
+
The filter accepts the following options:
+
+
+preset
+Select one of the available color presets. This option can be used in addition
+to the r , g , b parameters; in this case, the later
+options takes priority on the preset values.
+Available presets are:
+
+‘none ’
+‘color_negative ’
+‘cross_process ’
+‘darker ’
+‘increase_contrast ’
+‘lighter ’
+‘linear_contrast ’
+‘medium_contrast ’
+‘negative ’
+‘strong_contrast ’
+‘vintage ’
+
+Default is none
.
+
+master, m
+Set the master key points. These points will define a second pass mapping. It
+is sometimes called a "luminance" or "value" mapping. It can be used with
+r , g , b or all since it acts like a
+post-processing LUT.
+
+red, r
+Set the key points for the red component.
+
+green, g
+Set the key points for the green component.
+
+blue, b
+Set the key points for the blue component.
+
+all
+Set the key points for all components (not including master).
+Can be used in addition to the other key points component
+options. In this case, the unset component(s) will fallback on this
+all setting.
+
+psfile
+Specify a Photoshop curves file (.asv
) to import the settings from.
+
+
+
+
To avoid some filtergraph syntax conflicts, each key points list need to be
+defined using the following syntax: x0/y0 x1/y1 x2/y2 ...
.
+
+
+
38.17.1 Examples# TOC
+
+
+
+
+
38.18 dctdnoiz# TOC
+
+
Denoise frames using 2D DCT (frequency domain filtering).
+
+
This filter is not designed for real time.
+
+
The filter accepts the following options:
+
+
+sigma, s
+Set the noise sigma constant.
+
+This sigma defines a hard threshold of 3 * sigma
; every DCT
+coefficient (absolute value) below this threshold with be dropped.
+
+If you need a more advanced filtering, see expr .
+
+Default is 0
.
+
+
+overlap
+Set number overlapping pixels for each block. Since the filter can be slow, you
+may want to reduce this value, at the cost of a less effective filter and the
+risk of various artefacts.
+
+If the overlapping value doesn’t allow to process the whole input width or
+height, a warning will be displayed and according borders won’t be denoised.
+
+Default value is blocksize -1, which is the best possible setting.
+
+
+expr, e
+Set the coefficient factor expression.
+
+For each coefficient of a DCT block, this expression will be evaluated as a
+multiplier value for the coefficient.
+
+If this is option is set, the sigma option will be ignored.
+
+The absolute value of the coefficient can be accessed through the c
+variable.
+
+
+n
+Set the blocksize using the number of bits. 1<<n
defines the
+blocksize , which is the width and height of the processed blocks.
+
+The default value is 3 (8x8) and can be raised to 4 for a
+blocksize of 16x16. Note that changing this setting has huge consequences
+on the speed processing. Also, a larger block size does not necessarily means a
+better de-noising.
+
+
+
+
+
38.18.1 Examples# TOC
+
+
Apply a denoise with a sigma of 4.5
:
+
+
+
The same operation can be achieved using the expression system:
+
+
dctdnoiz=e='gte(c, 4.5*3)'
+
+
+
Violent denoise using a block size of 16x16
:
+
+
+
+
38.19 decimate# TOC
+
+
Drop duplicated frames at regular intervals.
+
+
The filter accepts the following options:
+
+
+cycle
+Set the number of frames from which one will be dropped. Setting this to
+N means one frame in every batch of N frames will be dropped.
+Default is 5
.
+
+
+dupthresh
+Set the threshold for duplicate detection. If the difference metric for a frame
+is less than or equal to this value, then it is declared as duplicate. Default
+is 1.1
+
+
+scthresh
+Set scene change threshold. Default is 15
.
+
+
+blockx
+blocky
+Set the size of the x and y-axis blocks used during metric calculations.
+Larger blocks give better noise suppression, but also give worse detection of
+small movements. Must be a power of two. Default is 32
.
+
+
+ppsrc
+Mark main input as a pre-processed input and activate clean source input
+stream. This allows the input to be pre-processed with various filters to help
+the metrics calculation while keeping the frame selection lossless. When set to
+1
, the first stream is for the pre-processed input, and the second
+stream is the clean source from where the kept frames are chosen. Default is
+0
.
+
+
+chroma
+Set whether or not chroma is considered in the metric calculations. Default is
+1
.
+
+
+
+
+
38.20 dejudder# TOC
+
+
Remove judder produced by partially interlaced telecined content.
+
+
Judder can be introduced, for instance, by pullup filter. If the original
+source was partially telecined content then the output of pullup,dejudder
+will have a variable frame rate. May change the recorded frame rate of the
+container. Aside from that change, this filter will not affect constant frame
+rate video.
+
+
The option available in this filter is:
+
+cycle
+Specify the length of the window over which the judder repeats.
+
+Accepts any integer greater than 1. Useful values are:
+
+‘4 ’
+If the original was telecined from 24 to 30 fps (Film to NTSC).
+
+
+‘5 ’
+If the original was telecined from 25 to 30 fps (PAL to NTSC).
+
+
+‘20 ’
+If a mixture of the two.
+
+
+
+The default is ‘4 ’.
+
+
+
+
+
38.21 delogo# TOC
+
+
Suppress a TV station logo by a simple interpolation of the surrounding
+pixels. Just set a rectangle covering the logo and watch it disappear
+(and sometimes something even uglier appear - your mileage may vary).
+
+
It accepts the following parameters:
+
+x
+y
+Specify the top left corner coordinates of the logo. They must be
+specified.
+
+
+w
+h
+Specify the width and height of the logo to clear. They must be
+specified.
+
+
+band, t
+Specify the thickness of the fuzzy edge of the rectangle (added to
+w and h ). The default value is 4.
+
+
+show
+When set to 1, a green rectangle is drawn on the screen to simplify
+finding the right x , y , w , and h parameters.
+The default value is 0.
+
+The rectangle is drawn on the outermost pixels which will be (partly)
+replaced with interpolated values. The values of the next pixels
+immediately outside this rectangle in each direction will be used to
+compute the interpolated pixel values inside the rectangle.
+
+
+
+
+
+
38.21.1 Examples# TOC
+
+
+ Set a rectangle covering the area with top left corner coordinates 0,0
+and size 100x77, and a band of size 10:
+
+
delogo=x=0:y=0:w=100:h=77:band=10
+
+
+
+
+
+
38.22 deshake# TOC
+
+
Attempt to fix small changes in horizontal and/or vertical shift. This
+filter helps remove camera shake from hand-holding a camera, bumping a
+tripod, moving on a vehicle, etc.
+
+
The filter accepts the following options:
+
+
+x
+y
+w
+h
+Specify a rectangular area where to limit the search for motion
+vectors.
+If desired the search for motion vectors can be limited to a
+rectangular area of the frame defined by its top left corner, width
+and height. These parameters have the same meaning as the drawbox
+filter which can be used to visualise the position of the bounding
+box.
+
+This is useful when simultaneous movement of subjects within the frame
+might be confused for camera motion by the motion vector search.
+
+If any or all of x , y , w and h are set to -1
+then the full frame is used. This allows later options to be set
+without specifying the bounding box for the motion vector search.
+
+Default - search the whole frame.
+
+
+rx
+ry
+Specify the maximum extent of movement in x and y directions in the
+range 0-64 pixels. Default 16.
+
+
+edge
+Specify how to generate pixels to fill blanks at the edge of the
+frame. Available values are:
+
+‘blank, 0 ’
+Fill zeroes at blank locations
+
+‘original, 1 ’
+Original image at blank locations
+
+‘clamp, 2 ’
+Extruded edge value at blank locations
+
+‘mirror, 3 ’
+Mirrored edge at blank locations
+
+
+Default value is ‘mirror ’.
+
+
+blocksize
+Specify the blocksize to use for motion search. Range 4-128 pixels,
+default 8.
+
+
+contrast
+Specify the contrast threshold for blocks. Only blocks with more than
+the specified contrast (difference between darkest and lightest
+pixels) will be considered. Range 1-255, default 125.
+
+
+search
+Specify the search strategy. Available values are:
+
+‘exhaustive, 0 ’
+Set exhaustive search
+
+‘less, 1 ’
+Set less exhaustive search.
+
+
+Default value is ‘exhaustive ’.
+
+
+filename
+If set then a detailed log of the motion search is written to the
+specified file.
+
+
+opencl
+If set to 1, specify using OpenCL capabilities, only available if
+FFmpeg was configured with --enable-opencl
. Default value is 0.
+
+
+
+
+
+
38.23 drawbox# TOC
+
+
Draw a colored box on the input image.
+
+
It accepts the following parameters:
+
+
+x
+y
+The expressions which specify the top left corner coordinates of the box. It defaults to 0.
+
+
+width, w
+height, h
+The expressions which specify the width and height of the box; if 0 they are interpreted as
+the input width and height. It defaults to 0.
+
+
+color, c
+Specify the color of the box to write. For the general syntax of this option,
+check the "Color" section in the ffmpeg-utils manual. If the special
+value invert
is used, the box edge color is the same as the
+video with inverted luma.
+
+
+thickness, t
+The expression which sets the thickness of the box edge. Default value is 3
.
+
+See below for the list of accepted constants.
+
+
+
+
The parameters for x , y , w and h and t are expressions containing the
+following constants:
+
+
+dar
+The input display aspect ratio, it is the same as (w / h ) * sar .
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+in_h, ih
+in_w, iw
+The input width and height.
+
+
+sar
+The input sample aspect ratio.
+
+
+x
+y
+The x and y offset coordinates where the box is drawn.
+
+
+w
+h
+The width and height of the drawn box.
+
+
+t
+The thickness of the drawn box.
+
+These constants allow the x , y , w , h and t expressions to refer to
+each other, so you may for example specify y=x/dar
or h=w/dar
.
+
+
+
+
+
+
38.23.1 Examples# TOC
+
+
+
+
+
38.24 drawgrid# TOC
+
+
Draw a grid on the input image.
+
+
It accepts the following parameters:
+
+
+x
+y
+The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0.
+
+
+width, w
+height, h
+The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the
+input width and height, respectively, minus thickness
, so image gets
+framed. Default to 0.
+
+
+color, c
+Specify the color of the grid. For the general syntax of this option,
+check the "Color" section in the ffmpeg-utils manual. If the special
+value invert
is used, the grid color is the same as the
+video with inverted luma.
+
+
+thickness, t
+The expression which sets the thickness of the grid line. Default value is 1
.
+
+See below for the list of accepted constants.
+
+
+
+
The parameters for x , y , w and h and t are expressions containing the
+following constants:
+
+
+dar
+The input display aspect ratio, it is the same as (w / h ) * sar .
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+in_h, ih
+in_w, iw
+The input grid cell width and height.
+
+
+sar
+The input sample aspect ratio.
+
+
+x
+y
+The x and y coordinates of some point of grid intersection (meant to configure offset).
+
+
+w
+h
+The width and height of the drawn cell.
+
+
+t
+The thickness of the drawn cell.
+
+These constants allow the x , y , w , h and t expressions to refer to
+each other, so you may for example specify y=x/dar
or h=w/dar
.
+
+
+
+
+
+
38.24.1 Examples# TOC
+
+
+ Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%:
+
+
drawgrid=width=100:height=100:thickness=2:color=red@0.5
+
+
+ Draw a white 3x3 grid with an opacity of 50%:
+
+
drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
+
+
+
+
+
38.25 drawtext# TOC
+
+
Draw a text string or text from a specified file on top of a video, using the
+libfreetype library.
+
+
To enable compilation of this filter, you need to configure FFmpeg with
+--enable-libfreetype
.
+To enable default font fallback and the font option you need to
+configure FFmpeg with --enable-libfontconfig
.
+To enable the text_shaping option, you need to configure FFmpeg with
+--enable-libfribidi
.
+
+
+
38.25.1 Syntax# TOC
+
+
It accepts the following parameters:
+
+
+box
+Used to draw a box around text using the background color.
+The value must be either 1 (enable) or 0 (disable).
+The default value of box is 0.
+
+
+boxcolor
+The color to be used for drawing box around text. For the syntax of this
+option, check the "Color" section in the ffmpeg-utils manual.
+
+The default value of boxcolor is "white".
+
+
+borderw
+Set the width of the border to be drawn around the text using bordercolor .
+The default value of borderw is 0.
+
+
+bordercolor
+Set the color to be used for drawing border around text. For the syntax of this
+option, check the "Color" section in the ffmpeg-utils manual.
+
+The default value of bordercolor is "black".
+
+
+expansion
+Select how the text is expanded. Can be either none
,
+strftime
(deprecated) or
+normal
(default). See the Text expansion section
+below for details.
+
+
+fix_bounds
+If true, check and fix text coords to avoid clipping.
+
+
+fontcolor
+The color to be used for drawing fonts. For the syntax of this option, check
+the "Color" section in the ffmpeg-utils manual.
+
+The default value of fontcolor is "black".
+
+
+fontcolor_expr
+String which is expanded the same way as text to obtain dynamic
+fontcolor value. By default this option has empty value and is not
+processed. When this option is set, it overrides fontcolor option.
+
+
+font
+The font family to be used for drawing text. By default Sans.
+
+
+fontfile
+The font file to be used for drawing text. The path must be included.
+This parameter is mandatory if the fontconfig support is disabled.
+
+
+fontsize
+The font size to be used for drawing text.
+The default value of fontsize is 16.
+
+
+text_shaping
+If set to 1, attempt to shape the text (for example, reverse the order of
+right-to-left text and join Arabic characters) before drawing it.
+Otherwise, just draw the text exactly as given.
+By default 1 (if supported).
+
+
+ft_load_flags
+The flags to be used for loading the fonts.
+
+The flags map the corresponding flags supported by libfreetype, and are
+a combination of the following values:
+
+default
+no_scale
+no_hinting
+render
+no_bitmap
+vertical_layout
+force_autohint
+crop_bitmap
+pedantic
+ignore_global_advance_width
+no_recurse
+ignore_transform
+monochrome
+linear_design
+no_autohint
+
+
+Default value is "default".
+
+For more information consult the documentation for the FT_LOAD_*
+libfreetype flags.
+
+
+shadowcolor
+The color to be used for drawing a shadow behind the drawn text. For the
+syntax of this option, check the "Color" section in the ffmpeg-utils manual.
+
+The default value of shadowcolor is "black".
+
+
+shadowx
+shadowy
+The x and y offsets for the text shadow position with respect to the
+position of the text. They can be either positive or negative
+values. The default value for both is "0".
+
+
+start_number
+The starting frame number for the n/frame_num variable. The default value
+is "0".
+
+
+tabsize
+The size in number of spaces to use for rendering the tab.
+Default value is 4.
+
+
+timecode
+Set the initial timecode representation in "hh:mm:ss[:;.]ff"
+format. It can be used with or without text parameter. timecode_rate
+option must be specified.
+
+
+timecode_rate, rate, r
+Set the timecode frame rate (timecode only).
+
+
+text
+The text string to be drawn. The text must be a sequence of UTF-8
+encoded characters.
+This parameter is mandatory if no file is specified with the parameter
+textfile .
+
+
+textfile
+A text file containing text to be drawn. The text must be a sequence
+of UTF-8 encoded characters.
+
+This parameter is mandatory if no text string is specified with the
+parameter text .
+
+If both text and textfile are specified, an error is thrown.
+
+
+reload
+If set to 1, the textfile will be reloaded before each frame.
+Be sure to update it atomically, or it may be read partially, or even fail.
+
+
+x
+y
+The expressions which specify the offsets where text will be drawn
+within the video frame. They are relative to the top/left border of the
+output image.
+
+The default value of x and y is "0".
+
+See below for the list of accepted constants and functions.
+
+
+
+
The parameters for x and y are expressions containing the
+following constants and functions:
+
+
+dar
+input display aspect ratio, it is the same as (w / h ) * sar
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+line_h, lh
+the height of each text line
+
+
+main_h, h, H
+the input height
+
+
+main_w, w, W
+the input width
+
+
+max_glyph_a, ascent
+the maximum distance from the baseline to the highest/upper grid
+coordinate used to place a glyph outline point, for all the rendered
+glyphs.
+It is a positive value, due to the grid’s orientation with the Y axis
+upwards.
+
+
+max_glyph_d, descent
+the maximum distance from the baseline to the lowest grid coordinate
+used to place a glyph outline point, for all the rendered glyphs.
+This is a negative value, due to the grid’s orientation, with the Y axis
+upwards.
+
+
+max_glyph_h
+maximum glyph height, that is the maximum height for all the glyphs
+contained in the rendered text, it is equivalent to ascent -
+descent .
+
+
+max_glyph_w
+maximum glyph width, that is the maximum width for all the glyphs
+contained in the rendered text
+
+
+n
+the number of input frame, starting from 0
+
+
+rand(min, max)
+return a random number included between min and max
+
+
+sar
+The input sample aspect ratio.
+
+
+t
+timestamp expressed in seconds, NAN if the input timestamp is unknown
+
+
+text_h, th
+the height of the rendered text
+
+
+text_w, tw
+the width of the rendered text
+
+
+x
+y
+the x and y offset coordinates where the text is drawn.
+
+These parameters allow the x and y expressions to refer
+each other, so you can for example specify y=x/dar
.
+
+
+
+
+
38.25.2 Text expansion# TOC
+
+
If expansion is set to strftime
,
+the filter recognizes strftime() sequences in the provided text and
+expands them accordingly. Check the documentation of strftime(). This
+feature is deprecated.
+
+
If expansion is set to none
, the text is printed verbatim.
+
+
If expansion is set to normal
(which is the default),
+the following expansion mechanism is used.
+
+
The backslash character ’\’, followed by any character, always expands to
+the second character.
+
+
Sequence of the form %{...}
are expanded. The text between the
+braces is a function name, possibly followed by arguments separated by ’:’.
+If the arguments contain special characters or delimiters (’:’ or ’}’),
+they should be escaped.
+
+
Note that they probably must also be escaped as the value for the
+text option in the filter argument string and as the filter
+argument in the filtergraph description, and possibly also for the shell,
+that makes up to four levels of escaping; using a text file avoids these
+problems.
+
+
The following functions are available:
+
+
+expr, e
+The expression evaluation result.
+
+It must take one argument specifying the expression to be evaluated,
+which accepts the same constants and functions as the x and
+y values. Note that not all constants should be used, for
+example the text size is not known when evaluating the expression, so
+the constants text_w and text_h will have an undefined
+value.
+
+
+expr_int_format, eif
+Evaluate the expression’s value and output as formatted integer.
+
+The first argument is the expression to be evaluated, just as for the expr function.
+The second argument specifies the output format. Allowed values are ’x’, ’X’, ’d’ and
+’u’. They are treated exactly as in the printf function.
+The third parameter is optional and sets the number of positions taken by the output.
+It can be used to add padding with zeros from the left.
+
+
+gmtime
+The time at which the filter is running, expressed in UTC.
+It can accept an argument: a strftime() format string.
+
+
+localtime
+The time at which the filter is running, expressed in the local time zone.
+It can accept an argument: a strftime() format string.
+
+
+metadata
+Frame metadata. It must take one argument specifying metadata key.
+
+
+n, frame_num
+The frame number, starting from 0.
+
+
+pict_type
+A 1 character description of the current picture type.
+
+
+pts
+The timestamp of the current frame.
+It can take up to two arguments.
+
+The first argument is the format of the timestamp; it defaults to flt
+for seconds as a decimal number with microsecond accuracy; hms
stands
+for a formatted [-]HH:MM:SS.mmm timestamp with millisecond accuracy.
+
+The second argument is an offset added to the timestamp.
+
+
+
+
+
+
38.25.3 Examples# TOC
+
+
+
+
For more information about libfreetype, check:
+http://www.freetype.org/ .
+
+
For more information about fontconfig, check:
+http://freedesktop.org/software/fontconfig/fontconfig-user.html .
+
+
For more information about libfribidi, check:
+http://fribidi.org/ .
+
+
+
38.26 edgedetect# TOC
+
+
Detect and draw edges. The filter uses the Canny Edge Detection algorithm.
+
+
The filter accepts the following options:
+
+
+low
+high
+Set low and high threshold values used by the Canny thresholding
+algorithm.
+
+The high threshold selects the "strong" edge pixels, which are then
+connected through 8-connectivity with the "weak" edge pixels selected
+by the low threshold.
+
+low and high threshold values must be chosen in the range
+[0,1], and low should be lesser or equal to high .
+
+Default value for low is 20/255
, and default value for high
+is 50/255
.
+
+
+mode
+Define the drawing mode.
+
+
+‘wires ’
+Draw white/gray wires on black background.
+
+
+‘colormix ’
+Mix the colors to create a paint/cartoon effect.
+
+
+
+Default value is wires .
+
+
+
+
+
38.26.1 Examples# TOC
+
+
+ Standard edge detection with custom values for the hysteresis thresholding:
+
+
edgedetect=low=0.1:high=0.4
+
+
+ Painting effect without thresholding:
+
+
edgedetect=mode=colormix:high=0
+
+
+
+
+
38.27 extractplanes# TOC
+
+
Extract color channel components from input video stream into
+separate grayscale video streams.
+
+
The filter accepts the following option:
+
+
+planes
+Set plane(s) to extract.
+
+Available values for planes are:
+
+‘y ’
+‘u ’
+‘v ’
+‘a ’
+‘r ’
+‘g ’
+‘b ’
+
+
+Choosing planes not available in the input will result in an error.
+That means you cannot select r
, g
, b
planes
+with y
, u
, v
planes at same time.
+
+
+
+
+
38.27.1 Examples# TOC
+
+
+ Extract luma, u and v color channel component from input video frame
+into 3 grayscale outputs:
+
+
ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
+
+
+
+
+
38.28 elbg# TOC
+
+
Apply a posterize effect using the ELBG (Enhanced LBG) algorithm.
+
+
For each input image, the filter will compute the optimal mapping from
+the input to the output given the codebook length, that is the number
+of distinct output colors.
+
+
This filter accepts the following options.
+
+
+codebook_length, l
+Set codebook length. The value must be a positive integer, and
+represents the number of distinct output colors. Default value is 256.
+
+
+nb_steps, n
+Set the maximum number of iterations to apply for computing the optimal
+mapping. The higher the value the better the result and the higher the
+computation time. Default value is 1.
+
+
+seed, s
+Set a random seed, must be an integer included between 0 and
+UINT32_MAX. If not specified, or if explicitly set to -1, the filter
+will try to use a good random seed on a best effort basis.
+
+
+
+
+
38.29 fade# TOC
+
+
Apply a fade-in/out effect to the input video.
+
+
It accepts the following parameters:
+
+
+type, t
+The effect type can be either "in" for a fade-in, or "out" for a fade-out
+effect.
+Default is in
.
+
+
+start_frame, s
+Specify the number of the frame to start applying the fade
+effect at. Default is 0.
+
+
+nb_frames, n
+The number of frames that the fade effect lasts. At the end of the
+fade-in effect, the output video will have the same intensity as the input video.
+At the end of the fade-out transition, the output video will be filled with the
+selected color .
+Default is 25.
+
+
+alpha
+If set to 1, fade only alpha channel, if one exists on the input.
+Default value is 0.
+
+
+start_time, st
+Specify the timestamp (in seconds) of the frame to start to apply the fade
+effect. If both start_frame and start_time are specified, the fade will start at
+whichever comes last. Default is 0.
+
+
+duration, d
+The number of seconds for which the fade effect has to last. At the end of the
+fade-in effect the output video will have the same intensity as the input video,
+at the end of the fade-out transition the output video will be filled with the
+selected color .
+If both duration and nb_frames are specified, duration is used. Default is 0.
+
+
+color, c
+Specify the color of the fade. Default is "black".
+
+
+
+
+
38.29.1 Examples# TOC
+
+
+
+
+
38.30 field# TOC
+
+
Extract a single field from an interlaced image using stride
+arithmetic to avoid wasting CPU time. The output frames are marked as
+non-interlaced.
+
+
The filter accepts the following options:
+
+
+type
+Specify whether to extract the top (if the value is 0
or
+top
) or the bottom field (if the value is 1
or
+bottom
).
+
+
+
+
+
38.31 fieldmatch# TOC
+
+
Field matching filter for inverse telecine. It is meant to reconstruct the
+progressive frames from a telecined stream. The filter does not drop duplicated
+frames, so to achieve a complete inverse telecine fieldmatch
needs to be
+followed by a decimation filter such as decimate in the filtergraph.
+
+
The separation of the field matching and the decimation is notably motivated by
+the possibility of inserting a de-interlacing filter fallback between the two.
+If the source has mixed telecined and real interlaced content,
+fieldmatch
will not be able to match fields for the interlaced parts.
+But these remaining combed frames will be marked as interlaced, and thus can be
+de-interlaced by a later filter such as yadif before decimation.
+
+
In addition to the various configuration options, fieldmatch
can take an
+optional second stream, activated through the ppsrc option. If
+enabled, the frames reconstruction will be based on the fields and frames from
+this second stream. This allows the first input to be pre-processed in order to
+help the various algorithms of the filter, while keeping the output lossless
+(assuming the fields are matched properly). Typically, a field-aware denoiser,
+or brightness/contrast adjustments can help.
+
+
Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project)
+and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from
+which fieldmatch
is based on. While the semantic and usage are very
+close, some behaviour and options names can differ.
+
+
The decimate filter currently only works for constant frame rate input.
+Do not use fieldmatch
and decimate if your input has mixed
+telecined and progressive content with changing framerate.
+
+
The filter accepts the following options:
+
+
+order
+Specify the assumed field order of the input stream. Available values are:
+
+
+‘auto ’
+Auto detect parity (use FFmpeg’s internal parity value).
+
+‘bff ’
+Assume bottom field first.
+
+‘tff ’
+Assume top field first.
+
+
+
+Note that it is sometimes recommended not to trust the parity announced by the
+stream.
+
+Default value is auto .
+
+
+mode
+Set the matching mode or strategy to use. pc mode is the safest in the
+sense that it won’t risk creating jerkiness due to duplicate frames when
+possible, but if there are bad edits or blended fields it will end up
+outputting combed frames when a good match might actually exist. On the other
+hand, pcn_ub mode is the most risky in terms of creating jerkiness,
+but will almost always find a good frame if there is one. The other values are
+all somewhere in between pc and pcn_ub in terms of risking
+jerkiness and creating duplicate frames versus finding good matches in sections
+with bad edits, orphaned fields, blended fields, etc.
+
+More details about p/c/n/u/b are available in p/c/n/u/b meaning section.
+
+Available values are:
+
+
+‘pc ’
+2-way matching (p/c)
+
+‘pc_n ’
+2-way matching, and trying 3rd match if still combed (p/c + n)
+
+‘pc_u ’
+2-way matching, and trying 3rd match (same order) if still combed (p/c + u)
+
+‘pc_n_ub ’
+2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if
+still combed (p/c + n + u/b)
+
+‘pcn ’
+3-way matching (p/c/n)
+
+‘pcn_ub ’
+3-way matching, and trying 4th/5th matches if all 3 of the original matches are
+detected as combed (p/c/n + u/b)
+
+
+
+The parenthesis at the end indicate the matches that would be used for that
+mode assuming order =tff (and field on auto or
+top ).
+
+In terms of speed pc mode is by far the fastest and pcn_ub is
+the slowest.
+
+Default value is pc_n .
+
+
+ppsrc
+Mark the main input stream as a pre-processed input, and enable the secondary
+input stream as the clean source to pick the fields from. See the filter
+introduction for more details. It is similar to the clip2 feature from
+VFM/TFM.
+
+Default value is 0
(disabled).
+
+
+field
+Set the field to match from. It is recommended to set this to the same value as
+order unless you experience matching failures with that setting. In
+certain circumstances changing the field that is used to match from can have a
+large impact on matching performance. Available values are:
+
+
+‘auto ’
+Automatic (same value as order ).
+
+‘bottom ’
+Match from the bottom field.
+
+‘top ’
+Match from the top field.
+
+
+
+Default value is auto .
+
+
+mchroma
+Set whether or not chroma is included during the match comparisons. In most
+cases it is recommended to leave this enabled. You should set this to 0
+only if your clip has bad chroma problems such as heavy rainbowing or other
+artifacts. Setting this to 0
could also be used to speed things up at
+the cost of some accuracy.
+
+Default value is 1
.
+
+
+y0
+y1
+These define an exclusion band which excludes the lines between y0 and
+y1 from being included in the field matching decision. An exclusion
+band can be used to ignore subtitles, a logo, or other things that may
+interfere with the matching. y0 sets the starting scan line and
+y1 sets the ending line; all lines in between y0 and
+y1 (including y0 and y1 ) will be ignored. Setting
+y0 and y1 to the same value will disable the feature.
+y0 and y1 defaults to 0
.
+
+
+scthresh
+Set the scene change detection threshold as a percentage of maximum change on
+the luma plane. Good values are in the [8.0, 14.0]
range. Scene change
+detection is only relevant in case combmatch =sc . The range for
+scthresh is [0.0, 100.0]
.
+
+Default value is 12.0
.
+
+
+combmatch
+When combatch is not none , fieldmatch
will take into
+account the combed scores of matches when deciding what match to use as the
+final match. Available values are:
+
+
+‘none ’
+No final matching based on combed scores.
+
+‘sc ’
+Combed scores are only used when a scene change is detected.
+
+‘full ’
+Use combed scores all the time.
+
+
+
+Default is sc .
+
+
+combdbg
+Force fieldmatch
to calculate the combed metrics for certain matches and
+print them. This setting is known as micout in TFM/VFM vocabulary.
+Available values are:
+
+
+‘none ’
+No forced calculation.
+
+‘pcn ’
+Force p/c/n calculations.
+
+‘pcnub ’
+Force p/c/n/u/b calculations.
+
+
+
+Default value is none .
+
+
+cthresh
+This is the area combing threshold used for combed frame detection. This
+essentially controls how "strong" or "visible" combing must be to be detected.
+Larger values mean combing must be more visible and smaller values mean combing
+can be less visible or strong and still be detected. Valid settings are from
+-1
(every pixel will be detected as combed) to 255
(no pixel will
+be detected as combed). This is basically a pixel difference value. A good
+range is [8, 12]
.
+
+Default value is 9
.
+
+
+chroma
+Sets whether or not chroma is considered in the combed frame decision. Only
+disable this if your source has chroma problems (rainbowing, etc.) that are
+causing problems for the combed frame detection with chroma enabled. Actually,
+using chroma =0 is usually more reliable, except for the case
+where there is chroma only combing in the source.
+
+Default value is 0
.
+
+
+blockx
+blocky
+Respectively set the x-axis and y-axis size of the window used during combed
+frame detection. This has to do with the size of the area in which
+combpel pixels are required to be detected as combed for a frame to be
+declared combed. See the combpel parameter description for more info.
+Possible values are any number that is a power of 2 starting at 4 and going up
+to 512.
+
+Default value is 16
.
+
+
+combpel
+The number of combed pixels inside any of the blocky by
+blockx size blocks on the frame for the frame to be detected as
+combed. While cthresh controls how "visible" the combing must be, this
+setting controls "how much" combing there must be in any localized area (a
+window defined by the blockx and blocky settings) on the
+frame. Minimum value is 0
and maximum is blocky x blockx
(at
+which point no frames will ever be detected as combed). This setting is known
+as MI in TFM/VFM vocabulary.
+
+Default value is 80
.
+
+
+
+
+
38.31.1 p/c/n/u/b meaning# TOC
+
+
+
38.31.1.1 p/c/n# TOC
+
+
We assume the following telecined stream:
+
+
+
Top fields: 1 2 2 3 4
+Bottom fields: 1 2 3 4 4
+
+
+
The numbers correspond to the progressive frame the fields relate to. Here, the
+first two frames are progressive, the 3rd and 4th are combed, and so on.
+
+
When fieldmatch
is configured to run a matching from bottom
+(field =bottom ) this is how this input stream get transformed:
+
+
+
Input stream:
+ T 1 2 2 3 4
+ B 1 2 3 4 4 <-- matching reference
+
+Matches: c c n n c
+
+Output stream:
+ T 1 2 3 4 4
+ B 1 2 3 4 4
+
+
+
As a result of the field matching, we can see that some frames get duplicated.
+To perform a complete inverse telecine, you need to rely on a decimation filter
+after this operation. See for instance the decimate filter.
+
+
The same operation now matching from top fields (field =top )
+looks like this:
+
+
+
Input stream:
+ T 1 2 2 3 4 <-- matching reference
+ B 1 2 3 4 4
+
+Matches: c c p p c
+
+Output stream:
+ T 1 2 2 3 4
+ B 1 2 2 3 4
+
+
+
In these examples, we can see what p , c and n mean;
+basically, they refer to the frame and field of the opposite parity:
+
+
+ p matches the field of the opposite parity in the previous frame
+ c matches the field of the opposite parity in the current frame
+ n matches the field of the opposite parity in the next frame
+
+
+
+
38.31.1.2 u/b# TOC
+
+
The u and b matching are a bit special in the sense that they match
+from the opposite parity flag. In the following examples, we assume that we are
+currently matching the 2nd frame (Top:2, bottom:2). According to the match, a
+’x’ is placed above and below each matched fields.
+
+
With bottom matching (field =bottom ):
+
+
Match: c p n b u
+
+ x x x x x
+ Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
+ Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
+ x x x x x
+
+Output frames:
+ 2 1 2 2 2
+ 2 2 2 1 3
+
+
+
With top matching (field =top ):
+
+
Match: c p n b u
+
+ x x x x x
+ Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
+ Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
+ x x x x x
+
+Output frames:
+ 2 2 2 1 2
+ 2 1 3 2 2
+
+
+
+
38.31.2 Examples# TOC
+
+
Simple IVTC of a top field first telecined stream:
+
+
fieldmatch=order=tff:combmatch=none, decimate
+
+
+
Advanced IVTC, with fallback on yadif for still combed frames:
+
+
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
+
+
+
+
38.32 fieldorder# TOC
+
+
Transform the field order of the input video.
+
+
It accepts the following parameters:
+
+
+order
+The output field order. Valid values are tff for top field first or bff
+for bottom field first.
+
+
+
+
The default value is ‘tff ’.
+
+
The transformation is done by shifting the picture content up or down
+by one line, and filling the remaining line with appropriate picture content.
+This method is consistent with most broadcast field order converters.
+
+
If the input video is not flagged as being interlaced, or it is already
+flagged as being of the required output field order, then this filter does
+not alter the incoming video.
+
+
It is very useful when converting to or from PAL DV material,
+which is bottom field first.
+
+
For example:
+
+
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
+
+
+
+
38.33 fifo# TOC
+
+
Buffer input images and send them when they are requested.
+
+
It is mainly useful when auto-inserted by the libavfilter
+framework.
+
+
It does not take parameters.
+
+
+
38.34 format# TOC
+
+
Convert the input video to one of the specified pixel formats.
+Libavfilter will try to pick one that is suitable as input to
+the next filter.
+
+
It accepts the following parameters:
+
+pix_fmts
+A ’|’-separated list of pixel format names, such as
+"pix_fmts=yuv420p|monow|rgb24".
+
+
+
+
+
+
38.34.1 Examples# TOC
+
+
+
+
+
38.35 fps# TOC
+
+
Convert the video to specified constant frame rate by duplicating or dropping
+frames as necessary.
+
+
It accepts the following parameters:
+
+fps
+The desired output frame rate. The default is 25
.
+
+
+round
+Rounding method.
+
+Possible values are:
+
+zero
+zero round towards 0
+
+inf
+round away from 0
+
+down
+round towards -infinity
+
+up
+round towards +infinity
+
+near
+round to nearest
+
+
+The default is near
.
+
+
+start_time
+Assume the first PTS should be the given value, in seconds. This allows for
+padding/trimming at the start of stream. By default, no assumption is made
+about the first frame’s expected PTS, so no padding or trimming is done.
+For example, this could be set to 0 to pad the beginning with duplicates of
+the first frame if a video stream starts after the audio stream or to trim any
+frames with a negative PTS.
+
+
+
+
+
Alternatively, the options can be specified as a flat string:
+fps [:round ].
+
+
See also the setpts filter.
+
+
+
38.35.1 Examples# TOC
+
+
+ A typical usage in order to set the fps to 25:
+
+
+ Sets the fps to 24, using abbreviation and rounding method to round to nearest:
+
+
fps=fps=film:round=near
+
+
+
+
+
38.36 framepack# TOC
+
+
Pack two different video streams into a stereoscopic video, setting proper
+metadata on supported codecs. The two views should have the same size and
+framerate and processing will stop when the shorter video ends. Please note
+that you may conveniently adjust view properties with the scale and
+fps filters.
+
+
It accepts the following parameters:
+
+format
+The desired packing format. Supported values are:
+
+
+sbs
+The views are next to each other (default).
+
+
+tab
+The views are on top of each other.
+
+
+lines
+The views are packed by line.
+
+
+columns
+The views are packed by column.
+
+
+frameseq
+The views are temporally interleaved.
+
+
+
+
+
+
+
+
Some examples:
+
+
+
# Convert left and right views into a frame-sequential video
+ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
+
+# Convert views into a side-by-side video with the same output resolution as the input
+ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
+
+
+
+
38.37 framestep# TOC
+
+
Select one frame every N-th frame.
+
+
This filter accepts the following option:
+
+step
+Select frame after every step
frames.
+Allowed values are positive integers higher than 0. Default value is 1
.
+
+
+
+
+
38.38 frei0r# TOC
+
+
Apply a frei0r effect to the input video.
+
+
To enable the compilation of this filter, you need to install the frei0r
+header and configure FFmpeg with --enable-frei0r
.
+
+
It accepts the following parameters:
+
+
+filter_name
+The name of the frei0r effect to load. If the environment variable
+FREI0R_PATH
is defined, the frei0r effect is searched for in each of the
+directories specified by the colon-separated list in FREIOR_PATH
.
+Otherwise, the standard frei0r paths are searched, in this order:
+HOME/.frei0r-1/lib/ , /usr/local/lib/frei0r-1/ ,
+/usr/lib/frei0r-1/ .
+
+
+filter_params
+A ’|’-separated list of parameters to pass to the frei0r effect.
+
+
+
+
+
A frei0r effect parameter can be a boolean (its value is either
+"y" or "n"), a double, a color (specified as
+R /G /B , where R , G , and B are floating point
+numbers between 0.0 and 1.0, inclusive) or by a color description specified in the "Color"
+section in the ffmpeg-utils manual), a position (specified as X /Y , where
+X and Y are floating point numbers) and/or a string.
+
+
The number and types of parameters depend on the loaded effect. If an
+effect parameter is not specified, the default value is set.
+
+
+
38.38.1 Examples# TOC
+
+
+ Apply the distort0r effect, setting the first two double parameters:
+
+
frei0r=filter_name=distort0r:filter_params=0.5|0.01
+
+
+ Apply the colordistance effect, taking a color as the first parameter:
+
+
frei0r=colordistance:0.2/0.3/0.4
+frei0r=colordistance:violet
+frei0r=colordistance:0x112233
+
+
+ Apply the perspective effect, specifying the top left and top right image
+positions:
+
+
frei0r=perspective:0.2/0.2|0.8/0.2
+
+
+
+
For more information, see
+http://frei0r.dyne.org
+
+
+
38.39 fspp# TOC
+
+
Apply fast and simple postprocessing. It is a faster version of spp .
+
+
It splits (I)DCT into horizontal/vertical passes. Unlike the simple post-
+processing filter, one of them is performed once per block, not per pixel.
+This allows for much higher speed.
+
+
The filter accepts the following options:
+
+
+quality
+Set quality. This option defines the number of levels for averaging. It accepts
+an integer in the range 4-5. Default value is 4
.
+
+
+qp
+Force a constant quantization parameter. It accepts an integer in range 0-63.
+If not set, the filter will use the QP from the video stream (if available).
+
+
+strength
+Set filter strength. It accepts an integer in range -15 to 32. Lower values mean
+more details but also more artifacts, while higher values make the image smoother
+but also blurrier. Default value is 0
− PSNR optimal.
+
+
+use_bframe_qp
+Enable the use of the QP from the B-Frames if set to 1
. Using this
+option may cause flicker since the B-Frames have often larger QP. Default is
+0
(not enabled).
+
+
+
+
+
+
38.40 geq# TOC
+
+
The filter accepts the following options:
+
+
+lum_expr, lum
+Set the luminance expression.
+
+cb_expr, cb
+Set the chrominance blue expression.
+
+cr_expr, cr
+Set the chrominance red expression.
+
+alpha_expr, a
+Set the alpha expression.
+
+red_expr, r
+Set the red expression.
+
+green_expr, g
+Set the green expression.
+
+blue_expr, b
+Set the blue expression.
+
+
+
+
The colorspace is selected according to the specified options. If one
+of the lum_expr , cb_expr , or cr_expr
+options is specified, the filter will automatically select a YCbCr
+colorspace. If one of the red_expr , green_expr , or
+blue_expr options is specified, it will select an RGB
+colorspace.
+
+
If one of the chrominance expression is not defined, it falls back on the other
+one. If no alpha expression is specified it will evaluate to opaque value.
+If none of chrominance expressions are specified, they will evaluate
+to the luminance expression.
+
+
The expressions can use the following variables and functions:
+
+
+N
+The sequential number of the filtered frame, starting from 0
.
+
+
+X
+Y
+The coordinates of the current sample.
+
+
+W
+H
+The width and height of the image.
+
+
+SW
+SH
+Width and height scale depending on the currently filtered plane. It is the
+ratio between the corresponding luma plane number of pixels and the current
+plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
+0.5,0.5
for chroma planes.
+
+
+T
+Time of the current frame, expressed in seconds.
+
+
+p(x, y)
+Return the value of the pixel at location (x ,y ) of the current
+plane.
+
+
+lum(x, y)
+Return the value of the pixel at location (x ,y ) of the luminance
+plane.
+
+
+cb(x, y)
+Return the value of the pixel at location (x ,y ) of the
+blue-difference chroma plane. Return 0 if there is no such plane.
+
+
+cr(x, y)
+Return the value of the pixel at location (x ,y ) of the
+red-difference chroma plane. Return 0 if there is no such plane.
+
+
+r(x, y)
+g(x, y)
+b(x, y)
+Return the value of the pixel at location (x ,y ) of the
+red/green/blue component. Return 0 if there is no such component.
+
+
+alpha(x, y)
+Return the value of the pixel at location (x ,y ) of the alpha
+plane. Return 0 if there is no such plane.
+
+
+
+
For functions, if x and y are outside the area, the value will be
+automatically clipped to the closer edge.
+
+
+
38.40.1 Examples# TOC
+
+
+ Flip the image horizontally:
+
+
+ Generate a bidimensional sine wave, with angle PI/3
and a
+wavelength of 100 pixels:
+
+
geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
+
+
+ Generate a fancy enigmatic moving light:
+
+
nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
+
+
+ Generate a quick emboss effect:
+
+
format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
+
+
+ Modify RGB components depending on pixel position:
+
+
geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
+
+
+ Create a radial gradient that is the same size as the input (also see
+the vignette filter):
+
+
geq=lum=255*gauss((X/W-0.5)*3)*gauss((Y/H-0.5)*3)/gauss(0)/gauss(0),format=gray
+
+
+ Create a linear gradient to use as a mask for another filter, then
+compose with overlay . In this example the video will gradually
+become more blurry from the top to the bottom of the y-axis as defined
+by the linear gradient:
+
+
ffmpeg -i input.mp4 -filter_complex "geq=lum=255*(Y/H),format=gray[grad];[0:v]boxblur=4[blur];[blur][grad]alphamerge[alpha];[0:v][alpha]overlay" output.mp4
+
+
+
+
+
38.41 gradfun# TOC
+
+
Fix the banding artifacts that are sometimes introduced into nearly flat
+regions by truncation to 8bit color depth.
+Interpolate the gradients that should go where the bands are, and
+dither them.
+
+
It is designed for playback only. Do not use it prior to
+lossy compression, because compression tends to lose the dither and
+bring back the bands.
+
+
It accepts the following parameters:
+
+
+strength
+The maximum amount by which the filter will change any one pixel. This is also
+the threshold for detecting nearly flat regions. Acceptable values range from
+.51 to 64; the default value is 1.2. Out-of-range values will be clipped to the
+valid range.
+
+
+radius
+The neighborhood to fit the gradient to. A larger radius makes for smoother
+gradients, but also prevents the filter from modifying the pixels near detailed
+regions. Acceptable values are 8-32; the default value is 16. Out-of-range
+values will be clipped to the valid range.
+
+
+
+
+
Alternatively, the options can be specified as a flat string:
+strength [:radius ]
+
+
+
38.41.1 Examples# TOC
+
+
+ Apply the filter with a 3.5
strength and radius of 8
:
+
+
+ Specify radius, omitting the strength (which will fall-back to the default
+value):
+
+
+
+
+
+
38.42 haldclut# TOC
+
+
Apply a Hald CLUT to a video stream.
+
+
First input is the video stream to process, and second one is the Hald CLUT.
+The Hald CLUT input can be a simple picture or a complete video stream.
+
+
The filter accepts the following options:
+
+
+shortest
+Force termination when the shortest input terminates. Default is 0
.
+
+repeatlast
+Continue applying the last CLUT after the end of the stream. A value of
+0
disable the filter after the last frame of the CLUT is reached.
+Default is 1
.
+
+
+
+
haldclut
also has the same interpolation options as lut3d (both
+filters share the same internals).
+
+
More information about the Hald CLUT can be found on Eskil Steenberg’s website
+(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html .
+
+
+
38.42.1 Workflow examples# TOC
+
+
+
38.42.1.1 Hald CLUT video stream# TOC
+
+
Generate an identity Hald CLUT stream altered with various effects:
+
+
ffmpeg -f lavfi -i haldclutsrc =8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
+
+
+
Note: make sure you use a lossless codec.
+
+
Then use it with haldclut
to apply it on some random stream:
+
+
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
+
+
+
The Hald CLUT will be applied to the 10 first seconds (duration of
+clut.nut ), then the latest picture of that CLUT stream will be applied
+to the remaining frames of the mandelbrot
stream.
+
+
+
38.42.1.2 Hald CLUT with preview# TOC
+
+
A Hald CLUT is supposed to be a squared image of Level*Level*Level
by
+Level*Level*Level
pixels. For a given Hald CLUT, FFmpeg will select the
+biggest possible square starting at the top left of the picture. The remaining
+padding pixels (bottom or right) will be ignored. This area can be used to add
+a preview of the Hald CLUT.
+
+
Typically, the following generated Hald CLUT will be supported by the
+haldclut
filter:
+
+
+
ffmpeg -f lavfi -i haldclutsrc =8 -vf "
+ pad=iw+320 [padded_clut];
+ smptebars=s=320x256, split [a][b];
+ [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
+ [main][b] overlay=W-320" -frames:v 1 clut.png
+
+
+
It contains the original and a preview of the effect of the CLUT: SMPTE color
+bars are displayed on the right-top, and below the same color bars processed by
+the color changes.
+
+
Then, the effect of this Hald CLUT can be visualized with:
+
+
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
+
+
+
+
38.43 hflip# TOC
+
+
Flip the input video horizontally.
+
+
For example, to horizontally flip the input video with ffmpeg
:
+
+
ffmpeg -i in.avi -vf "hflip" out.avi
+
+
+
+
38.44 histeq# TOC
+
This filter applies a global color histogram equalization on a
+per-frame basis.
+
+
It can be used to correct video that has a compressed range of pixel
+intensities. The filter redistributes the pixel intensities to
+equalize their distribution across the intensity range. It may be
+viewed as an "automatically adjusting contrast filter". This filter is
+useful only for correcting degraded or poorly captured source
+video.
+
+
The filter accepts the following options:
+
+
+strength
+Determine the amount of equalization to be applied. As the strength
+is reduced, the distribution of pixel intensities more-and-more
+approaches that of the input frame. The value must be a float number
+in the range [0,1] and defaults to 0.200.
+
+
+intensity
+Set the maximum intensity that can generated and scale the output
+values appropriately. The strength should be set as desired and then
+the intensity can be limited if needed to avoid washing-out. The value
+must be a float number in the range [0,1] and defaults to 0.210.
+
+
+antibanding
+Set the antibanding level. If enabled the filter will randomly vary
+the luminance of output pixels by a small amount to avoid banding of
+the histogram. Possible values are none
, weak
or
+strong
. It defaults to none
.
+
+
+
+
+
38.45 histogram# TOC
+
+
Compute and draw a color distribution histogram for the input video.
+
+
The computed histogram is a representation of the color component
+distribution in an image.
+
+
The filter accepts the following options:
+
+
+mode
+Set histogram mode.
+
+It accepts the following values:
+
+‘levels ’
+Standard histogram that displays the color components distribution in an
+image. Displays color graph for each color component. Shows distribution of
+the Y, U, V, A or R, G, B components, depending on input format, in the
+current frame. Below each graph a color component scale meter is shown.
+
+
+‘color ’
+Displays chroma values (U/V color placement) in a two dimensional
+graph (which is called a vectorscope). The brighter a pixel in the
+vectorscope, the more pixels of the input frame correspond to that pixel
+(i.e., more pixels have this chroma value). The V component is displayed on
+the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost
+side being V = 255. The U component is displayed on the vertical (Y) axis,
+with the top representing U = 0 and the bottom representing U = 255.
+
+The position of a white pixel in the graph corresponds to the chroma value of
+a pixel of the input clip. The graph can therefore be used to read the hue
+(color flavor) and the saturation (the dominance of the hue in the color). As
+the hue of a color changes, it moves around the square. At the center of the
+square the saturation is zero, which means that the corresponding pixel has no
+color. If the amount of a specific color is increased (while leaving the other
+colors unchanged) the saturation increases, and the indicator moves towards
+the edge of the square.
+
+
+‘color2 ’
+Chroma values in vectorscope, similar as color
but actual chroma values
+are displayed.
+
+
+‘waveform ’
+Per row/column color component graph. In row mode, the graph on the left side
+represents color component value 0 and the right side represents value = 255.
+In column mode, the top side represents color component value = 0 and bottom
+side represents value = 255.
+
+
+Default value is levels
.
+
+
+level_height
+Set height of level in levels
. Default value is 200
.
+Allowed range is [50, 2048].
+
+
+scale_height
+Set height of color scale in levels
. Default value is 12
.
+Allowed range is [0, 40].
+
+
+step
+Set step for waveform
mode. Smaller values are useful to find out how
+many values of the same luminance are distributed across input rows/columns.
+Default value is 10
. Allowed range is [1, 255].
+
+
+waveform_mode
+Set mode for waveform
. Can be either row
, or column
.
+Default is row
.
+
+
+waveform_mirror
+Set mirroring mode for waveform
. 0
means unmirrored, 1
+means mirrored. In mirrored mode, higher values will be represented on the left
+side for row
mode and at the top for column
mode. Default is
+0
(unmirrored).
+
+
+display_mode
+Set display mode for waveform
and levels
.
+It accepts the following values:
+
+‘parade ’
+Display separate graph for the color components side by side in
+row
waveform mode or one below the other in column
waveform mode
+for waveform
histogram mode. For levels
histogram mode,
+per color component graphs are placed below each other.
+
+Using this display mode in waveform
histogram mode makes it easy to
+spot color casts in the highlights and shadows of an image, by comparing the
+contours of the top and the bottom graphs of each waveform. Since whites,
+grays, and blacks are characterized by exactly equal amounts of red, green,
+and blue, neutral areas of the picture should display three waveforms of
+roughly equal width/height. If not, the correction is easy to perform by
+making level adjustments the three waveforms.
+
+
+‘overlay ’
+Presents information identical to that in the parade
, except
+that the graphs representing color components are superimposed directly
+over one another.
+
+This display mode in waveform
histogram mode makes it easier to spot
+relative differences or similarities in overlapping areas of the color
+components that are supposed to be identical, such as neutral whites, grays,
+or blacks.
+
+
+Default is parade
.
+
+
+levels_mode
+Set mode for levels
. Can be either linear
, or logarithmic
.
+Default is linear
.
+
+
+
+
+
38.45.1 Examples# TOC
+
+
+ Calculate and draw histogram:
+
+
ffplay -i input -vf histogram
+
+
+
+
+
+
38.46 hqdn3d# TOC
+
+
This is a high precision/quality 3d denoise filter. It aims to reduce
+image noise, producing smooth images and making still images really
+still. It should enhance compressibility.
+
+
It accepts the following optional parameters:
+
+
+luma_spatial
+A non-negative floating point number which specifies spatial luma strength.
+It defaults to 4.0.
+
+
+chroma_spatial
+A non-negative floating point number which specifies spatial chroma strength.
+It defaults to 3.0*luma_spatial /4.0.
+
+
+luma_tmp
+A floating point number which specifies luma temporal strength. It defaults to
+6.0*luma_spatial /4.0.
+
+
+chroma_tmp
+A floating point number which specifies chroma temporal strength. It defaults to
+luma_tmp *chroma_spatial /luma_spatial .
+
+
+
+
+
38.47 hqx# TOC
+
+
Apply a high-quality magnification filter designed for pixel art. This filter
+was originally created by Maxim Stepin.
+
+
It accepts the following option:
+
+
+n
+Set the scaling dimension: 2
for hq2x
, 3
for
+hq3x
and 4
for hq4x
.
+Default is 3
.
+
+
+
+
+
38.48 hue# TOC
+
+
Modify the hue and/or the saturation of the input.
+
+
It accepts the following parameters:
+
+
+h
+Specify the hue angle as a number of degrees. It accepts an expression,
+and defaults to "0".
+
+
+s
+Specify the saturation in the [-10,10] range. It accepts an expression and
+defaults to "1".
+
+
+H
+Specify the hue angle as a number of radians. It accepts an
+expression, and defaults to "0".
+
+
+b
+Specify the brightness in the [-10,10] range. It accepts an expression and
+defaults to "0".
+
+
+
+
h and H are mutually exclusive, and can’t be
+specified at the same time.
+
+
The b , h , H and s option values are
+expressions containing the following constants:
+
+
+n
+frame count of the input frame starting from 0
+
+
+pts
+presentation timestamp of the input frame expressed in time base units
+
+
+r
+frame rate of the input video, NAN if the input frame rate is unknown
+
+
+t
+timestamp expressed in seconds, NAN if the input timestamp is unknown
+
+
+tb
+time base of the input video
+
+
+
+
+
38.48.1 Examples# TOC
+
+
+
+
+
38.48.2 Commands# TOC
+
+
This filter supports the following commands:
+
+b
+s
+h
+H
+Modify the hue and/or the saturation and/or brightness of the input video.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+
+
+
+
38.49 idet# TOC
+
+
Detect video interlacing type.
+
+
This filter tries to detect if the input frames as interlaced, progressive,
+top or bottom field first. It will also try and detect fields that are
+repeated between adjacent frames (a sign of telecine).
+
+
Single frame detection considers only immediately adjacent frames when classifying each frame.
+Multiple frame detection incorporates the classification history of previous frames.
+
+
The filter will log these metadata values:
+
+
+single.current_frame
+Detected type of current frame using single-frame detection. One of:
+“tff” (top field first), “bff” (bottom field first),
+“progressive”, or “undetermined”
+
+
+single.tff
+Cumulative number of frames detected as top field first using single-frame detection.
+
+
+multiple.tff
+Cumulative number of frames detected as top field first using multiple-frame detection.
+
+
+single.bff
+Cumulative number of frames detected as bottom field first using single-frame detection.
+
+
+multiple.current_frame
+Detected type of current frame using multiple-frame detection. One of:
+“tff” (top field first), “bff” (bottom field first),
+“progressive”, or “undetermined”
+
+
+multiple.bff
+Cumulative number of frames detected as bottom field first using multiple-frame detection.
+
+
+single.progressive
+Cumulative number of frames detected as progressive using single-frame detection.
+
+
+multiple.progressive
+Cumulative number of frames detected as progressive using multiple-frame detection.
+
+
+single.undetermined
+Cumulative number of frames that could not be classified using single-frame detection.
+
+
+multiple.undetermined
+Cumulative number of frames that could not be classified using multiple-frame detection.
+
+
+repeated.current_frame
+Which field in the current frame is repeated from the last. One of “neither”, “top”, or “bottom”.
+
+
+repeated.neither
+Cumulative number of frames with no repeated field.
+
+
+repeated.top
+Cumulative number of frames with the top field repeated from the previous frame’s top field.
+
+
+repeated.bottom
+Cumulative number of frames with the bottom field repeated from the previous frame’s bottom field.
+
+
+
+
The filter accepts the following options:
+
+
+intl_thres
+Set interlacing threshold.
+
+prog_thres
+Set progressive threshold.
+
+repeat_thres
+Threshold for repeated field detection.
+
+half_life
+Number of frames after which a given frame’s contribution to the
+statistics is halved (i.e., it contributes only 0.5 to it’s
+classification). The default of 0 means that all frames seen are given
+full weight of 1.0 forever.
+
+analyze_interlaced_flag
+When this is not 0 then idet will use the specified number of frames to determine
+if the interlaced flag is accurate, it will not count undetermined frames.
+If the flag is found to be accurate it will be used without any further
+computations, if it is found to be inaccuarte it will be cleared without any
+further computations. This allows inserting the idet filter as a low computational
+method to clean up the interlaced flag
+
+
+
+
+
38.50 il# TOC
+
+
Deinterleave or interleave fields.
+
+
This filter allows one to process interlaced images fields without
+deinterlacing them. Deinterleaving splits the input frame into 2
+fields (so called half pictures). Odd lines are moved to the top
+half of the output image, even lines to the bottom half.
+You can process (filter) them independently and then re-interleave them.
+
+
The filter accepts the following options:
+
+
+luma_mode, l
+chroma_mode, c
+alpha_mode, a
+Available values for luma_mode , chroma_mode and
+alpha_mode are:
+
+
+‘none ’
+Do nothing.
+
+
+‘deinterleave, d ’
+Deinterleave fields, placing one above the other.
+
+
+‘interleave, i ’
+Interleave fields. Reverse the effect of deinterleaving.
+
+
+Default value is none
.
+
+
+luma_swap, ls
+chroma_swap, cs
+alpha_swap, as
+Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0
.
+
+
+
+
+
38.51 interlace# TOC
+
+
Simple interlacing filter from progressive contents. This interleaves upper (or
+lower) lines from odd frames with lower (or upper) lines from even frames,
+halving the frame rate and preserving image height.
+
+
+
Original Original New Frame
+ Frame 'j' Frame 'j+1' (tff)
+ ========== =========== ==================
+ Line 0 --------------------> Frame 'j' Line 0
+ Line 1 Line 1 ----> Frame 'j+1' Line 1
+ Line 2 ---------------------> Frame 'j' Line 2
+ Line 3 Line 3 ----> Frame 'j+1' Line 3
+ ... ... ...
+New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
+
+
+
It accepts the following optional parameters:
+
+
+scan
+This determines whether the interlaced frame is taken from the even
+(tff - default) or odd (bff) lines of the progressive frame.
+
+
+lowpass
+Enable (default) or disable the vertical lowpass filter to avoid twitter
+interlacing and reduce moire patterns.
+
+
+
+
+
38.52 kerndeint# TOC
+
+
Deinterlace input video by applying Donald Graft’s adaptive kernel
+deinterling. Work on interlaced parts of a video to produce
+progressive frames.
+
+
The description of the accepted parameters follows.
+
+
+thresh
+Set the threshold which affects the filter’s tolerance when
+determining if a pixel line must be processed. It must be an integer
+in the range [0,255] and defaults to 10. A value of 0 will result in
+applying the process on every pixels.
+
+
+map
+Paint pixels exceeding the threshold value to white if set to 1.
+Default is 0.
+
+
+order
+Set the fields order. Swap fields if set to 1, leave fields alone if
+0. Default is 0.
+
+
+sharp
+Enable additional sharpening if set to 1. Default is 0.
+
+
+twoway
+Enable twoway sharpening if set to 1. Default is 0.
+
+
+
+
+
38.52.1 Examples# TOC
+
+
+ Apply default values:
+
+
kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
+
+
+ Enable additional sharpening:
+
+
+ Paint processed pixels in white:
+
+
+
+
+
38.53 lenscorrection# TOC
+
+
Correct radial lens distortion
+
+
This filter can be used to correct for radial distortion as can result from the use
+of wide angle lenses, and thereby re-rectify the image. To find the right parameters
+one can use tools available for example as part of opencv or simply trial-and-error.
+To use opencv use the calibration sample (under samples/cpp) from the opencv sources
+and extract the k1 and k2 coefficients from the resulting matrix.
+
+
Note that effectively the same filter is available in the open-source tools Krita and
+Digikam from the KDE project.
+
+
In contrast to the vignette filter, which can also be used to compensate lens errors,
+this filter corrects the distortion of the image, whereas vignette corrects the
+brightness distribution, so you may want to use both filters together in certain
+cases, though you will have to take care of ordering, i.e. whether vignetting should
+be applied before or after lens correction.
+
+
+
38.53.1 Options# TOC
+
+
The filter accepts the following options:
+
+
+cx
+Relative x-coordinate of the focal point of the image, and thereby the center of the
+distortion. This value has a range [0,1] and is expressed as fractions of the image
+width.
+
+cy
+Relative y-coordinate of the focal point of the image, and thereby the center of the
+distortion. This value has a range [0,1] and is expressed as fractions of the image
+height.
+
+k1
+Coefficient of the quadratic correction term. 0.5 means no correction.
+
+k2
+Coefficient of the double quadratic correction term. 0.5 means no correction.
+
+
+
+
The formula that generates the correction is:
+
+
r_src = r_tgt * (1 + k1 * (r_tgt / r_0 )^2 + k2 * (r_tgt / r_0 )^4)
+
+
where r_0 is halve of the image diagonal and r_src and r_tgt are the
+distances from the focal point in the source and target images, respectively.
+
+
+
38.54 lut3d# TOC
+
+
Apply a 3D LUT to an input video.
+
+
The filter accepts the following options:
+
+
+file
+Set the 3D LUT file name.
+
+Currently supported formats:
+
+‘3dl ’
+AfterEffects
+
+‘cube ’
+Iridas
+
+‘dat ’
+DaVinci
+
+‘m3d ’
+Pandora
+
+
+
+interp
+Select interpolation mode.
+
+Available values are:
+
+
+‘nearest ’
+Use values from the nearest defined point.
+
+‘trilinear ’
+Interpolate values using the 8 points defining a cube.
+
+‘tetrahedral ’
+Interpolate values using a tetrahedron.
+
+
+
+
+
+
+
38.55 lut, lutrgb, lutyuv# TOC
+
+
Compute a look-up table for binding each pixel component input value
+to an output value, and apply it to the input video.
+
+
lutyuv applies a lookup table to a YUV input video, lutrgb
+to an RGB input video.
+
+
These filters accept the following parameters:
+
+c0
+set first pixel component expression
+
+c1
+set second pixel component expression
+
+c2
+set third pixel component expression
+
+c3
+set fourth pixel component expression, corresponds to the alpha component
+
+
+r
+set red component expression
+
+g
+set green component expression
+
+b
+set blue component expression
+
+a
+alpha component expression
+
+
+y
+set Y/luminance component expression
+
+u
+set U/Cb component expression
+
+v
+set V/Cr component expression
+
+
+
+
Each of them specifies the expression to use for computing the lookup table for
+the corresponding pixel component values.
+
+
The exact component associated to each of the c* options depends on the
+format in input.
+
+
The lut filter requires either YUV or RGB pixel formats in input,
+lutrgb requires RGB pixel formats in input, and lutyuv requires YUV.
+
+
The expressions can contain the following constants and functions:
+
+
+w
+h
+The input width and height.
+
+
+val
+The input value for the pixel component.
+
+
+clipval
+The input value, clipped to the minval -maxval range.
+
+
+maxval
+The maximum value for the pixel component.
+
+
+minval
+The minimum value for the pixel component.
+
+
+negval
+The negated value for the pixel component value, clipped to the
+minval -maxval range; it corresponds to the expression
+"maxval-clipval+minval".
+
+
+clip(val)
+The computed value in val , clipped to the
+minval -maxval range.
+
+
+gammaval(gamma)
+The computed gamma correction value of the pixel component value,
+clipped to the minval -maxval range. It corresponds to the
+expression
+"pow((clipval-minval)/(maxval-minval)\,gamma )*(maxval-minval)+minval"
+
+
+
+
+
All expressions default to "val".
+
+
+
38.55.1 Examples# TOC
+
+
+
+
+
38.56 mergeplanes# TOC
+
+
Merge color channel components from several video streams.
+
+
The filter accepts up to 4 input streams, and merge selected input
+planes to the output video.
+
+
This filter accepts the following options:
+
+mapping
+Set input to output plane mapping. Default is 0
.
+
+The mappings is specified as a bitmap. It should be specified as a
+hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the
+mapping for the first plane of the output stream. ’A’ sets the number of
+the input stream to use (from 0 to 3), and ’a’ the plane number of the
+corresponding input to use (from 0 to 3). The rest of the mappings is
+similar, ’Bb’ describes the mapping for the output stream second
+plane, ’Cc’ describes the mapping for the output stream third plane and
+’Dd’ describes the mapping for the output stream fourth plane.
+
+
+format
+Set output pixel format. Default is yuva444p
.
+
+
+
+
+
38.56.1 Examples# TOC
+
+
+ Merge three gray video streams of same width and height into single video stream:
+
+
[a0][a1][a2]mergeplanes=0x001020:yuv444p
+
+
+ Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream:
+
+
[a0][a1]mergeplanes=0x00010210:yuva444p
+
+
+ Swap Y and A plane in yuva444p stream:
+
+
format=yuva444p,mergeplanes=0x03010200:yuva444p
+
+
+ Swap U and V plane in yuv420p stream:
+
+
format=yuv420p,mergeplanes=0x000201:yuv420p
+
+
+ Cast a rgb24 clip to yuv444p:
+
+
format=rgb24,mergeplanes=0x000102:yuv444p
+
+
+
+
+
38.57 mcdeint# TOC
+
+
Apply motion-compensation deinterlacing.
+
+
It needs one field per frame as input and must thus be used together
+with yadif=1/3 or equivalent.
+
+
This filter accepts the following options:
+
+mode
+Set the deinterlacing mode.
+
+It accepts one of the following values:
+
+‘fast ’
+‘medium ’
+‘slow ’
+use iterative motion estimation
+
+‘extra_slow ’
+like ‘slow ’, but use multiple reference frames.
+
+
+Default value is ‘fast ’.
+
+
+parity
+Set the picture field parity assumed for the input video. It must be
+one of the following values:
+
+
+‘0, tff ’
+assume top field first
+
+‘1, bff ’
+assume bottom field first
+
+
+
+Default value is ‘bff ’.
+
+
+qp
+Set per-block quantization parameter (QP) used by the internal
+encoder.
+
+Higher values should result in a smoother motion vector field but less
+optimal individual vectors. Default value is 1.
+
+
+
+
+
38.58 mp# TOC
+
+
Apply an MPlayer filter to the input video.
+
+
This filter provides a wrapper around some of the filters of
+MPlayer/MEncoder.
+
+
This wrapper is considered experimental. Some of the wrapped filters
+may not work properly and we may drop support for them, as they will
+be implemented natively into FFmpeg. Thus you should avoid
+depending on them when writing portable scripts.
+
+
The filter accepts the parameters:
+filter_name [:=]filter_params
+
+
filter_name is the name of a supported MPlayer filter,
+filter_params is a string containing the parameters accepted by
+the named filter.
+
+
The list of the currently supported filters follows:
+
+eq2
+eq
+ilpack
+softpulldown
+
+
+
The parameter syntax and behavior for the listed filters are the same
+of the corresponding MPlayer filters. For detailed instructions check
+the "VIDEO FILTERS" section in the MPlayer manual.
+
+
+
38.58.1 Examples# TOC
+
+
+ Adjust gamma, brightness, contrast:
+
+
+
+
See also mplayer(1), http://www.mplayerhq.hu/ .
+
+
+
38.59 mpdecimate# TOC
+
+
Drop frames that do not differ greatly from the previous frame in
+order to reduce frame rate.
+
+
The main use of this filter is for very-low-bitrate encoding
+(e.g. streaming over dialup modem), but it could in theory be used for
+fixing movies that were inverse-telecined incorrectly.
+
+
A description of the accepted options follows.
+
+
+max
+Set the maximum number of consecutive frames which can be dropped (if
+positive), or the minimum interval between dropped frames (if
+negative). If the value is 0, the frame is dropped unregarding the
+number of previous sequentially dropped frames.
+
+Default value is 0.
+
+
+hi
+lo
+frac
+Set the dropping threshold values.
+
+Values for hi and lo are for 8x8 pixel blocks and
+represent actual pixel value differences, so a threshold of 64
+corresponds to 1 unit of difference for each pixel, or the same spread
+out differently over the block.
+
+A frame is a candidate for dropping if no 8x8 blocks differ by more
+than a threshold of hi , and if no more than frac blocks (1
+meaning the whole image) differ by more than a threshold of lo .
+
+Default value for hi is 64*12, default value for lo is
+64*5, and default value for frac is 0.33.
+
+
+
+
+
+
38.60 negate# TOC
+
+
Negate input video.
+
+
It accepts an integer in input; if non-zero it negates the
+alpha component (if available). The default value in input is 0.
+
+
+
38.61 noformat# TOC
+
+
Force libavfilter not to use any of the specified pixel formats for the
+input to the next filter.
+
+
It accepts the following parameters:
+
+pix_fmts
+A ’|’-separated list of pixel format names, such as
+apix_fmts=yuv420p|monow|rgb24".
+
+
+
+
+
+
38.61.1 Examples# TOC
+
+
+ Force libavfilter to use a format different from yuv420p for the
+input to the vflip filter:
+
+
noformat=pix_fmts=yuv420p,vflip
+
+
+ Convert the input video to any of the formats not contained in the list:
+
+
noformat=yuv420p|yuv444p|yuv410p
+
+
+
+
+
38.62 noise# TOC
+
+
Add noise on video input frame.
+
+
The filter accepts the following options:
+
+
+all_seed
+c0_seed
+c1_seed
+c2_seed
+c3_seed
+Set noise seed for specific pixel component or all pixel components in case
+of all_seed . Default value is 123457
.
+
+
+all_strength, alls
+c0_strength, c0s
+c1_strength, c1s
+c2_strength, c2s
+c3_strength, c3s
+Set noise strength for specific pixel component or all pixel components in case
+all_strength . Default value is 0
. Allowed range is [0, 100].
+
+
+all_flags, allf
+c0_flags, c0f
+c1_flags, c1f
+c2_flags, c2f
+c3_flags, c3f
+Set pixel component flags or set flags for all components if all_flags .
+Available values for component flags are:
+
+‘a ’
+averaged temporal noise (smoother)
+
+‘p ’
+mix random noise with a (semi)regular pattern
+
+‘t ’
+temporal noise (noise pattern changes between frames)
+
+‘u ’
+uniform noise (gaussian otherwise)
+
+
+
+
+
+
+
38.62.1 Examples# TOC
+
+
Add temporal and uniform noise to input video:
+
+
noise=alls=20:allf=t+u
+
+
+
+
38.63 null# TOC
+
+
Pass the video source unchanged to the output.
+
+
+
38.64 ocv# TOC
+
+
Apply a video transform using libopencv.
+
+
To enable this filter, install the libopencv library and headers and
+configure FFmpeg with --enable-libopencv
.
+
+
It accepts the following parameters:
+
+
+filter_name
+The name of the libopencv filter to apply.
+
+
+filter_params
+The parameters to pass to the libopencv filter. If not specified, the default
+values are assumed.
+
+
+
+
+
Refer to the official libopencv documentation for more precise
+information:
+http://docs.opencv.org/master/modules/imgproc/doc/filtering.html
+
+
Several libopencv filters are supported; see the following subsections.
+
+
+
38.64.1 dilate# TOC
+
+
Dilate an image by using a specific structuring element.
+It corresponds to the libopencv function cvDilate
.
+
+
It accepts the parameters: struct_el |nb_iterations .
+
+
struct_el represents a structuring element, and has the syntax:
+cols xrows +anchor_x xanchor_y /shape
+
+
cols and rows represent the number of columns and rows of
+the structuring element, anchor_x and anchor_y the anchor
+point, and shape the shape for the structuring element. shape
+must be "rect", "cross", "ellipse", or "custom".
+
+
If the value for shape is "custom", it must be followed by a
+string of the form "=filename ". The file with name
+filename is assumed to represent a binary image, with each
+printable character corresponding to a bright pixel. When a custom
+shape is used, cols and rows are ignored, the number
+or columns and rows of the read file are assumed instead.
+
+
The default value for struct_el is "3x3+0x0/rect".
+
+
nb_iterations specifies the number of times the transform is
+applied to the image, and defaults to 1.
+
+
Some examples:
+
+
# Use the default values
+ocv=dilate
+
+# Dilate using a structuring element with a 5x5 cross, iterating two times
+ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
+
+# Read the shape from the file diamond.shape, iterating two times.
+# The file diamond.shape may contain a pattern of characters like this
+# *
+# ***
+# *****
+# ***
+# *
+# The specified columns and rows are ignored
+# but the anchor point coordinates are not
+ocv=dilate:0x0+2x2/custom=diamond.shape|2
+
+
+
+
38.64.2 erode# TOC
+
+
Erode an image by using a specific structuring element.
+It corresponds to the libopencv function cvErode
.
+
+
It accepts the parameters: struct_el :nb_iterations ,
+with the same syntax and semantics as the dilate filter.
+
+
+
38.64.3 smooth# TOC
+
+
Smooth the input video.
+
+
The filter takes the following parameters:
+type |param1 |param2 |param3 |param4 .
+
+
type is the type of smooth filter to apply, and must be one of
+the following values: "blur", "blur_no_scale", "median", "gaussian",
+or "bilateral". The default value is "gaussian".
+
+
The meaning of param1 , param2 , param3 , and param4
+depend on the smooth type. param1 and
+param2 accept integer positive values or 0. param3 and
+param4 accept floating point values.
+
+
The default value for param1 is 3. The default value for the
+other parameters is 0.
+
+
These parameters correspond to the parameters assigned to the
+libopencv function cvSmooth
.
+
+
+
38.65 overlay# TOC
+
+
Overlay one video on top of another.
+
+
It takes two inputs and has one output. The first input is the "main"
+video on which the second input is overlaid.
+
+
It accepts the following parameters:
+
+
A description of the accepted options follows.
+
+
+x
+y
+Set the expression for the x and y coordinates of the overlaid video
+on the main video. Default value is "0" for both expressions. In case
+the expression is invalid, it is set to a huge value (meaning that the
+overlay will not be displayed within the output visible area).
+
+
+eof_action
+The action to take when EOF is encountered on the secondary input; it accepts
+one of the following values:
+
+
+repeat
+Repeat the last frame (the default).
+
+endall
+End both streams.
+
+pass
+Pass the main input through.
+
+
+
+
+eval
+Set when the expressions for x , and y are evaluated.
+
+It accepts the following values:
+
+‘init ’
+only evaluate expressions once during the filter initialization or
+when a command is processed
+
+
+‘frame ’
+evaluate expressions for each incoming frame
+
+
+
+Default value is ‘frame ’.
+
+
+shortest
+If set to 1, force the output to terminate when the shortest input
+terminates. Default value is 0.
+
+
+format
+Set the format for the output video.
+
+It accepts the following values:
+
+‘yuv420 ’
+force YUV420 output
+
+
+‘yuv422 ’
+force YUV422 output
+
+
+‘yuv444 ’
+force YUV444 output
+
+
+‘rgb ’
+force RGB output
+
+
+
+Default value is ‘yuv420 ’.
+
+
+rgb (deprecated)
+If set to 1, force the filter to accept inputs in the RGB
+color space. Default value is 0. This option is deprecated, use
+format instead.
+
+
+repeatlast
+If set to 1, force the filter to draw the last overlay frame over the
+main input until the end of the stream. A value of 0 disables this
+behavior. Default value is 1.
+
+
+
+
The x , and y expressions can contain the following
+parameters.
+
+
+main_w, W
+main_h, H
+The main input width and height.
+
+
+overlay_w, w
+overlay_h, h
+The overlay input width and height.
+
+
+x
+y
+The computed values for x and y . They are evaluated for
+each new frame.
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values of the output
+format. For example for the pixel format "yuv422p" hsub is 2 and
+vsub is 1.
+
+
+n
+the number of input frame, starting from 0
+
+
+pos
+the position in the file of the input frame, NAN if unknown
+
+
+t
+The timestamp, expressed in seconds. It’s NAN if the input timestamp is unknown.
+
+
+
+
+
Note that the n , pos , t variables are available only
+when evaluation is done per frame , and will evaluate to NAN
+when eval is set to ‘init ’.
+
+
Be aware that frames are taken from each input video in timestamp
+order, hence, if their initial timestamps differ, it is a good idea
+to pass the two inputs through a setpts=PTS-STARTPTS filter to
+have them begin in the same zero timestamp, as the example for
+the movie filter does.
+
+
You can chain together more overlays but you should test the
+efficiency of such approach.
+
+
+
38.65.1 Commands# TOC
+
+
This filter supports the following commands:
+
+x
+y
+Modify the x and y of the overlay input.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+
+
+
+
38.65.2 Examples# TOC
+
+
+
+
+
38.66 owdenoise# TOC
+
+
Apply Overcomplete Wavelet denoiser.
+
+
The filter accepts the following options:
+
+
+depth
+Set depth.
+
+Larger depth values will denoise lower frequency components more, but
+slow down filtering.
+
+Must be an int in the range 8-16, default is 8
.
+
+
+luma_strength, ls
+Set luma strength.
+
+Must be a double value in the range 0-1000, default is 1.0
.
+
+
+chroma_strength, cs
+Set chroma strength.
+
+Must be a double value in the range 0-1000, default is 1.0
.
+
+
+
+
+
38.67 pad# TOC
+
+
Add paddings to the input image, and place the original input at the
+provided x , y coordinates.
+
+
It accepts the following parameters:
+
+
+width, w
+height, h
+Specify an expression for the size of the output image with the
+paddings added. If the value for width or height is 0, the
+corresponding input size is used for the output.
+
+The width expression can reference the value set by the
+height expression, and vice versa.
+
+The default value of width and height is 0.
+
+
+x
+y
+Specify the offsets to place the input image at within the padded area,
+with respect to the top/left border of the output image.
+
+The x expression can reference the value set by the y
+expression, and vice versa.
+
+The default value of x and y is 0.
+
+
+color
+Specify the color of the padded area. For the syntax of this option,
+check the "Color" section in the ffmpeg-utils manual.
+
+The default value of color is "black".
+
+
+
+
The value for the width , height , x , and y
+options are expressions containing the following constants:
+
+
+in_w
+in_h
+The input video width and height.
+
+
+iw
+ih
+These are the same as in_w and in_h .
+
+
+out_w
+out_h
+The output width and height (the size of the padded area), as
+specified by the width and height expressions.
+
+
+ow
+oh
+These are the same as out_w and out_h .
+
+
+x
+y
+The x and y offsets as specified by the x and y
+expressions, or NAN if not yet specified.
+
+
+a
+same as iw / ih
+
+
+sar
+input sample aspect ratio
+
+
+dar
+input display aspect ratio, it is the same as (iw / ih ) * sar
+
+
+hsub
+vsub
+The horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+
+
+
38.67.1 Examples# TOC
+
+
+
+
+
38.68 perspective# TOC
+
+
Correct perspective of video not recorded perpendicular to the screen.
+
+
A description of the accepted parameters follows.
+
+
+x0
+y0
+x1
+y1
+x2
+y2
+x3
+y3
+Set coordinates expression for top left, top right, bottom left and bottom right corners.
+Default values are 0:0:W:0:0:H:W:H
with which perspective will remain unchanged.
+If the sense
option is set to source
, then the specified points will be sent
+to the corners of the destination. If the sense
option is set to destination
,
+then the corners of the source will be sent to the specified coordinates.
+
+The expressions can use the following variables:
+
+
+W
+H
+the width and height of video frame.
+
+
+
+
+interpolation
+Set interpolation for perspective correction.
+
+It accepts the following values:
+
+‘linear ’
+‘cubic ’
+
+
+Default value is ‘linear ’.
+
+
+sense
+Set interpretation of coordinate options.
+
+It accepts the following values:
+
+‘0, source ’
+
+Send point in the source specified by the given coordinates to
+the corners of the destination.
+
+
+‘1, destination ’
+
+Send the corners of the source to the point in the destination specified
+by the given coordinates.
+
+Default value is ‘source ’.
+
+
+
+
+
+
+
38.69 phase# TOC
+
+
Delay interlaced video by one field time so that the field order changes.
+
+
The intended use is to fix PAL movies that have been captured with the
+opposite field order to the film-to-video transfer.
+
+
A description of the accepted parameters follows.
+
+
+mode
+Set phase mode.
+
+It accepts the following values:
+
+‘t ’
+Capture field order top-first, transfer bottom-first.
+Filter will delay the bottom field.
+
+
+‘b ’
+Capture field order bottom-first, transfer top-first.
+Filter will delay the top field.
+
+
+‘p ’
+Capture and transfer with the same field order. This mode only exists
+for the documentation of the other options to refer to, but if you
+actually select it, the filter will faithfully do nothing.
+
+
+‘a ’
+Capture field order determined automatically by field flags, transfer
+opposite.
+Filter selects among ‘t ’ and ‘b ’ modes on a frame by frame
+basis using field flags. If no field information is available,
+then this works just like ‘u ’.
+
+
+‘u ’
+Capture unknown or varying, transfer opposite.
+Filter selects among ‘t ’ and ‘b ’ on a frame by frame basis by
+analyzing the images and selecting the alternative that produces best
+match between the fields.
+
+
+‘T ’
+Capture top-first, transfer unknown or varying.
+Filter selects among ‘t ’ and ‘p ’ using image analysis.
+
+
+‘B ’
+Capture bottom-first, transfer unknown or varying.
+Filter selects among ‘b ’ and ‘p ’ using image analysis.
+
+
+‘A ’
+Capture determined by field flags, transfer unknown or varying.
+Filter selects among ‘t ’, ‘b ’ and ‘p ’ using field flags and
+image analysis. If no field information is available, then this works just
+like ‘U ’. This is the default mode.
+
+
+‘U ’
+Both capture and transfer unknown or varying.
+Filter selects among ‘t ’, ‘b ’ and ‘p ’ using image analysis only.
+
+
+
+
+
+
+
38.70 pixdesctest# TOC
+
+
Pixel format descriptor test filter, mainly useful for internal
+testing. The output video should be equal to the input video.
+
+
For example:
+
+
format=monow, pixdesctest
+
+
+
can be used to test the monowhite pixel format descriptor definition.
+
+
+
38.71 pp# TOC
+
+
Enable the specified chain of postprocessing subfilters using libpostproc. This
+library should be automatically selected with a GPL build (--enable-gpl
).
+Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’.
+Each subfilter and some options have a short and a long name that can be used
+interchangeably, i.e. dr/dering are the same.
+
+
The filters accept the following options:
+
+
+subfilters
+Set postprocessing subfilters string.
+
+
+
+
All subfilters share common options to determine their scope:
+
+
+a/autoq
+Honor the quality commands for this subfilter.
+
+
+c/chrom
+Do chrominance filtering, too (default).
+
+
+y/nochrom
+Do luminance filtering only (no chrominance).
+
+
+n/noluma
+Do chrominance filtering only (no luminance).
+
+
+
+
These options can be appended after the subfilter name, separated by a ’|’.
+
+
Available subfilters are:
+
+
+hb/hdeblock[|difference[|flatness]]
+Horizontal deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+vb/vdeblock[|difference[|flatness]]
+Vertical deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+ha/hadeblock[|difference[|flatness]]
+Accurate horizontal deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+va/vadeblock[|difference[|flatness]]
+Accurate vertical deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+
+
The horizontal and vertical deblocking filters share the difference and
+flatness values so you cannot set different horizontal and vertical
+thresholds.
+
+
+h1/x1hdeblock
+Experimental horizontal deblocking filter
+
+
+v1/x1vdeblock
+Experimental vertical deblocking filter
+
+
+dr/dering
+Deringing filter
+
+
+tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
+
+threshold1
+larger -> stronger filtering
+
+threshold2
+larger -> stronger filtering
+
+threshold3
+larger -> stronger filtering
+
+
+
+
+al/autolevels[:f/fullyrange], automatic brightness / contrast correction
+
+f/fullyrange
+Stretch luminance to 0-255
.
+
+
+
+
+lb/linblenddeint
+Linear blend deinterlacing filter that deinterlaces the given block by
+filtering all lines with a (1 2 1)
filter.
+
+
+li/linipoldeint
+Linear interpolating deinterlacing filter that deinterlaces the given block by
+linearly interpolating every second line.
+
+
+ci/cubicipoldeint
+Cubic interpolating deinterlacing filter deinterlaces the given block by
+cubically interpolating every second line.
+
+
+md/mediandeint
+Median deinterlacing filter that deinterlaces the given block by applying a
+median filter to every second line.
+
+
+fd/ffmpegdeint
+FFmpeg deinterlacing filter that deinterlaces the given block by filtering every
+second line with a (-1 4 2 4 -1)
filter.
+
+
+l5/lowpass5
+Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given
+block by filtering all lines with a (-1 2 6 2 -1)
filter.
+
+
+fq/forceQuant[|quantizer]
+Overrides the quantizer table from the input with the constant quantizer you
+specify.
+
+quantizer
+Quantizer to use
+
+
+
+
+de/default
+Default pp filter combination (hb|a,vb|a,dr|a
)
+
+
+fa/fast
+Fast pp filter combination (h1|a,v1|a,dr|a
)
+
+
+ac
+High quality pp filter combination (ha|a|128|7,va|a,dr|a
)
+
+
+
+
+
38.71.1 Examples# TOC
+
+
+ Apply horizontal and vertical deblocking, deringing and automatic
+brightness/contrast:
+
+
+ Apply default filters without brightness/contrast correction:
+
+
+ Apply default filters and temporal denoiser:
+
+
pp=default/tmpnoise|1|2|3
+
+
+ Apply deblocking on luminance only, and switch vertical deblocking on or off
+automatically depending on available CPU time:
+
+
+
+
+
38.72 pp7# TOC
+
Apply Postprocessing filter 7. It is variant of the spp filter,
+similar to spp = 6 with 7 point DCT, where only the center sample is
+used after IDCT.
+
+
The filter accepts the following options:
+
+
+qp
+Force a constant quantization parameter. It accepts an integer in range
+0 to 63. If not set, the filter will use the QP from the video stream
+(if available).
+
+
+mode
+Set thresholding mode. Available modes are:
+
+
+‘hard ’
+Set hard thresholding.
+
+‘soft ’
+Set soft thresholding (better de-ringing effect, but likely blurrier).
+
+‘medium ’
+Set medium thresholding (good results, default).
+
+
+
+
+
+
+
38.73 psnr# TOC
+
+
Obtain the average, maximum and minimum PSNR (Peak Signal to Noise
+Ratio) between two input videos.
+
+
This filter takes in input two input videos, the first input is
+considered the "main" source and is passed unchanged to the
+output. The second input is used as a "reference" video for computing
+the PSNR.
+
+
Both video inputs must have the same resolution and pixel format for
+this filter to work correctly. Also it assumes that both inputs
+have the same number of frames, which are compared one by one.
+
+
The obtained average PSNR is printed through the logging system.
+
+
The filter stores the accumulated MSE (mean squared error) of each
+frame, and at the end of the processing it is averaged across all frames
+equally, and the following formula is applied to obtain the PSNR:
+
+
+
PSNR = 10*log10(MAX^2/MSE)
+
+
+
Where MAX is the average of the maximum values of each component of the
+image.
+
+
The description of the accepted parameters follows.
+
+
+stats_file, f
+If specified the filter will use the named file to save the PSNR of
+each individual frame.
+
+
+
+
The file printed if stats_file is selected, contains a sequence of
+key/value pairs of the form key :value for each compared
+couple of frames.
+
+
A description of each shown parameter follows:
+
+
+n
+sequential number of the input frame, starting from 1
+
+
+mse_avg
+Mean Square Error pixel-by-pixel average difference of the compared
+frames, averaged over all the image components.
+
+
+mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
+Mean Square Error pixel-by-pixel average difference of the compared
+frames for the component specified by the suffix.
+
+
+psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
+Peak Signal to Noise ratio of the compared frames for the component
+specified by the suffix.
+
+
+
+
For example:
+
+
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
+[main][ref] psnr="stats_file=stats.log" [out]
+
+
+
On this example the input file being processed is compared with the
+reference file ref_movie.mpg . The PSNR of each individual frame
+is stored in stats.log .
+
+
+
38.74 pullup# TOC
+
+
Pulldown reversal (inverse telecine) filter, capable of handling mixed
+hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive
+content.
+
+
The pullup filter is designed to take advantage of future context in making
+its decisions. This filter is stateless in the sense that it does not lock
+onto a pattern to follow, but it instead looks forward to the following
+fields in order to identify matches and rebuild progressive frames.
+
+
To produce content with an even framerate, insert the fps filter after
+pullup, use fps=24000/1001
if the input frame rate is 29.97fps,
+fps=24
for 30fps and the (rare) telecined 25fps input.
+
+
The filter accepts the following options:
+
+
+jl
+jr
+jt
+jb
+These options set the amount of "junk" to ignore at the left, right, top, and
+bottom of the image, respectively. Left and right are in units of 8 pixels,
+while top and bottom are in units of 2 lines.
+The default is 8 pixels on each side.
+
+
+sb
+Set the strict breaks. Setting this option to 1 will reduce the chances of
+filter generating an occasional mismatched frame, but it may also cause an
+excessive number of frames to be dropped during high motion sequences.
+Conversely, setting it to -1 will make filter match fields more easily.
+This may help processing of video where there is slight blurring between
+the fields, but may also cause there to be interlaced frames in the output.
+Default value is 0
.
+
+
+mp
+Set the metric plane to use. It accepts the following values:
+
+‘l ’
+Use luma plane.
+
+
+‘u ’
+Use chroma blue plane.
+
+
+‘v ’
+Use chroma red plane.
+
+
+
+This option may be set to use chroma plane instead of the default luma plane
+for doing filter’s computations. This may improve accuracy on very clean
+source material, but more likely will decrease accuracy, especially if there
+is chroma noise (rainbow effect) or any grayscale video.
+The main purpose of setting mp to a chroma plane is to reduce CPU
+load and make pullup usable in realtime on slow machines.
+
+
+
+
For best results (without duplicated frames in the output file) it is
+necessary to change the output frame rate. For example, to inverse
+telecine NTSC input:
+
+
ffmpeg -i input -vf pullup -r 24000/1001 ...
+
+
+
+
38.75 qp# TOC
+
+
Change video quantization parameters (QP).
+
+
The filter accepts the following option:
+
+
+qp
+Set expression for quantization parameter.
+
+
+
+
The expression is evaluated through the eval API and can contain, among others,
+the following constants:
+
+
+known
+1 if index is not 129, 0 otherwise.
+
+
+qp
+Sequentional index starting from -129 to 128.
+
+
+
+
+
38.75.1 Examples# TOC
+
+
+ Some equation like:
+
+
+
+
+
38.76 removelogo# TOC
+
+
Suppress a TV station logo, using an image file to determine which
+pixels comprise the logo. It works by filling in the pixels that
+comprise the logo with neighboring pixels.
+
+
The filter accepts the following options:
+
+
+filename, f
+Set the filter bitmap file, which can be any image format supported by
+libavformat. The width and height of the image file must match those of the
+video stream being processed.
+
+
+
+
Pixels in the provided bitmap image with a value of zero are not
+considered part of the logo, non-zero pixels are considered part of
+the logo. If you use white (255) for the logo and black (0) for the
+rest, you will be safe. For making the filter bitmap, it is
+recommended to take a screen capture of a black frame with the logo
+visible, and then using a threshold filter followed by the erode
+filter once or twice.
+
+
If needed, little splotches can be fixed manually. Remember that if
+logo pixels are not covered, the filter quality will be much
+reduced. Marking too many pixels as part of the logo does not hurt as
+much, but it will increase the amount of blurring needed to cover over
+the image and will destroy more information than necessary, and extra
+pixels will slow things down on a large logo.
+
+
+
38.77 rotate# TOC
+
+
Rotate video by an arbitrary angle expressed in radians.
+
+
The filter accepts the following options:
+
+
A description of the optional parameters follows.
+
+angle, a
+Set an expression for the angle by which to rotate the input video
+clockwise, expressed as a number of radians. A negative value will
+result in a counter-clockwise rotation. By default it is set to "0".
+
+This expression is evaluated for each frame.
+
+
+out_w, ow
+Set the output width expression, default value is "iw".
+This expression is evaluated just once during configuration.
+
+
+out_h, oh
+Set the output height expression, default value is "ih".
+This expression is evaluated just once during configuration.
+
+
+bilinear
+Enable bilinear interpolation if set to 1, a value of 0 disables
+it. Default value is 1.
+
+
+fillcolor, c
+Set the color used to fill the output area not covered by the rotated
+image. For the general syntax of this option, check the "Color" section in the
+ffmpeg-utils manual. If the special value "none" is selected then no
+background is printed (useful for example if the background is never shown).
+
+Default value is "black".
+
+
+
+
The expressions for the angle and the output size can contain the
+following constants and functions:
+
+
+n
+sequential number of the input frame, starting from 0. It is always NAN
+before the first frame is filtered.
+
+
+t
+time in seconds of the input frame, it is set to 0 when the filter is
+configured. It is always NAN before the first frame is filtered.
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+in_w, iw
+in_h, ih
+the input video width and height
+
+
+out_w, ow
+out_h, oh
+the output width and height, that is the size of the padded area as
+specified by the width and height expressions
+
+
+rotw(a)
+roth(a)
+the minimal width/height required for completely containing the input
+video rotated by a radians.
+
+These are only available when computing the out_w and
+out_h expressions.
+
+
+
+
+
38.77.1 Examples# TOC
+
+
+ Rotate the input by PI/6 radians clockwise:
+
+
+ Rotate the input by PI/6 radians counter-clockwise:
+
+
+ Rotate the input by 45 degrees clockwise:
+
+
+ Apply a constant rotation with period T, starting from an angle of PI/3:
+
+
+ Make the input video rotation oscillating with a period of T
+seconds and an amplitude of A radians:
+
+
rotate=A*sin(2*PI/T*t)
+
+
+ Rotate the video, output size is chosen so that the whole rotating
+input video is always completely contained in the output:
+
+
rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
+
+
+ Rotate the video, reduce the output size so that no background is ever
+shown:
+
+
rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
+
+
+
+
+
38.77.2 Commands# TOC
+
+
The filter supports the following commands:
+
+
+a, angle
+Set the angle expression.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+
+
+
+
38.78 sab# TOC
+
+
Apply Shape Adaptive Blur.
+
+
The filter accepts the following options:
+
+
+luma_radius, lr
+Set luma blur filter strength, must be a value in range 0.1-4.0, default
+value is 1.0. A greater value will result in a more blurred image, and
+in slower processing.
+
+
+luma_pre_filter_radius, lpfr
+Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default
+value is 1.0.
+
+
+luma_strength, ls
+Set luma maximum difference between pixels to still be considered, must
+be a value in the 0.1-100.0 range, default value is 1.0.
+
+
+chroma_radius, cr
+Set chroma blur filter strength, must be a value in range 0.1-4.0. A
+greater value will result in a more blurred image, and in slower
+processing.
+
+
+chroma_pre_filter_radius, cpfr
+Set chroma pre-filter radius, must be a value in the 0.1-2.0 range.
+
+
+chroma_strength, cs
+Set chroma maximum difference between pixels to still be considered,
+must be a value in the 0.1-100.0 range.
+
+
+
+
Each chroma option value, if not explicitly specified, is set to the
+corresponding luma option value.
+
+
+
38.79 scale# TOC
+
+
Scale (resize) the input video, using the libswscale library.
+
+
The scale filter forces the output display aspect ratio to be the same
+of the input, by changing the output sample aspect ratio.
+
+
If the input image format is different from the format requested by
+the next filter, the scale filter will convert the input to the
+requested format.
+
+
+
38.79.1 Options# TOC
+
The filter accepts the following options, or any of the options
+supported by the libswscale scaler.
+
+
See (ffmpeg-scaler)the ffmpeg-scaler manual for
+the complete list of scaler options.
+
+
+width, w
+height, h
+Set the output video dimension expression. Default value is the input
+dimension.
+
+If the value is 0, the input width is used for the output.
+
+If one of the values is -1, the scale filter will use a value that
+maintains the aspect ratio of the input image, calculated from the
+other specified dimension. If both of them are -1, the input size is
+used
+
+If one of the values is -n with n > 1, the scale filter will also use a value
+that maintains the aspect ratio of the input image, calculated from the other
+specified dimension. After that it will, however, make sure that the calculated
+dimension is divisible by n and adjust the value if necessary.
+
+See below for the list of accepted constants for use in the dimension
+expression.
+
+
+interl
+Set the interlacing mode. It accepts the following values:
+
+
+‘1 ’
+Force interlaced aware scaling.
+
+
+‘0 ’
+Do not apply interlaced scaling.
+
+
+‘-1 ’
+Select interlaced aware scaling depending on whether the source frames
+are flagged as interlaced or not.
+
+
+
+Default value is ‘0 ’.
+
+
+flags
+Set libswscale scaling flags. See
+(ffmpeg-scaler)the ffmpeg-scaler manual for the
+complete list of values. If not explicitly specified the filter applies
+the default flags.
+
+
+size, s
+Set the video size. For the syntax of this option, check the "Video size"
+section in the ffmpeg-utils manual.
+
+
+in_color_matrix
+out_color_matrix
+Set in/output YCbCr color space type.
+
+This allows the autodetected value to be overridden as well as allows forcing
+a specific value used for the output and encoder.
+
+If not specified, the color space type depends on the pixel format.
+
+Possible values:
+
+
+‘auto ’
+Choose automatically.
+
+
+‘bt709 ’
+Format conforming to International Telecommunication Union (ITU)
+Recommendation BT.709.
+
+
+‘fcc ’
+Set color space conforming to the United States Federal Communications
+Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a).
+
+
+‘bt601 ’
+Set color space conforming to:
+
+
+ ITU Radiocommunication Sector (ITU-R) Recommendation BT.601
+
+ ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G
+
+ Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004
+
+
+
+
+‘smpte240m ’
+Set color space conforming to SMPTE ST 240:1999.
+
+
+
+
+in_range
+out_range
+Set in/output YCbCr sample range.
+
+This allows the autodetected value to be overridden as well as allows forcing
+a specific value used for the output and encoder. If not specified, the
+range depends on the pixel format. Possible values:
+
+
+‘auto ’
+Choose automatically.
+
+
+‘jpeg/full/pc ’
+Set full range (0-255 in case of 8-bit luma).
+
+
+‘mpeg/tv ’
+Set "MPEG" range (16-235 in case of 8-bit luma).
+
+
+
+
+force_original_aspect_ratio
+Enable decreasing or increasing output video width or height if necessary to
+keep the original aspect ratio. Possible values:
+
+
+‘disable ’
+Scale the video as specified and disable this feature.
+
+
+‘decrease ’
+The output video dimensions will automatically be decreased if needed.
+
+
+‘increase ’
+The output video dimensions will automatically be increased if needed.
+
+
+
+
+One useful instance of this option is that when you know a specific device’s
+maximum allowed resolution, you can use this to limit the output video to
+that, while retaining the aspect ratio. For example, device A allows
+1280x720 playback, and your video is 1920x800. Using this option (set it to
+decrease) and specifying 1280x720 to the command line makes the output
+1280x533.
+
+Please note that this is a different thing than specifying -1 for w
+or h , you still need to specify the output resolution for this option
+to work.
+
+
+
+
+
The values of the w and h options are expressions
+containing the following constants:
+
+
+in_w
+in_h
+The input width and height
+
+
+iw
+ih
+These are the same as in_w and in_h .
+
+
+out_w
+out_h
+The output (scaled) width and height
+
+
+ow
+oh
+These are the same as out_w and out_h
+
+
+a
+The same as iw / ih
+
+
+sar
+input sample aspect ratio
+
+
+dar
+The input display aspect ratio. Calculated from (iw / ih) * sar
.
+
+
+hsub
+vsub
+horizontal and vertical input chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+ohsub
+ovsub
+horizontal and vertical output chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+
+
+
38.79.2 Examples# TOC
+
+
+
+
+
38.80 separatefields# TOC
+
+
The separatefields
takes a frame-based video input and splits
+each frame into its components fields, producing a new half height clip
+with twice the frame rate and twice the frame count.
+
+
This filter use field-dominance information in frame to decide which
+of each pair of fields to place first in the output.
+If it gets it wrong use setfield filter before separatefields
filter.
+
+
+
38.81 setdar, setsar# TOC
+
+
The setdar
filter sets the Display Aspect Ratio for the filter
+output video.
+
+
This is done by changing the specified Sample (aka Pixel) Aspect
+Ratio, according to the following equation:
+
+
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
+
+
+
Keep in mind that the setdar
filter does not modify the pixel
+dimensions of the video frame. Also, the display aspect ratio set by
+this filter may be changed by later filters in the filterchain,
+e.g. in case of scaling or if another "setdar" or a "setsar" filter is
+applied.
+
+
The setsar
filter sets the Sample (aka Pixel) Aspect Ratio for
+the filter output video.
+
+
Note that as a consequence of the application of this filter, the
+output display aspect ratio will change according to the equation
+above.
+
+
Keep in mind that the sample aspect ratio set by the setsar
+filter may be changed by later filters in the filterchain, e.g. if
+another "setsar" or a "setdar" filter is applied.
+
+
It accepts the following parameters:
+
+
+r, ratio, dar (setdar
only), sar (setsar
only)
+Set the aspect ratio used by the filter.
+
+The parameter can be a floating point number string, an expression, or
+a string of the form num :den , where num and
+den are the numerator and denominator of the aspect ratio. If
+the parameter is not specified, it is assumed the value "0".
+In case the form "num :den " is used, the :
character
+should be escaped.
+
+
+max
+Set the maximum integer value to use for expressing numerator and
+denominator when reducing the expressed aspect ratio to a rational.
+Default value is 100
.
+
+
+
+
+
The parameter sar is an expression containing
+the following constants:
+
+
+E, PI, PHI
+These are approximated values for the mathematical constants e
+(Euler’s number), pi (Greek pi), and phi (the golden ratio).
+
+
+w, h
+The input width and height.
+
+
+a
+These are the same as w / h .
+
+
+sar
+The input sample aspect ratio.
+
+
+dar
+The input display aspect ratio. It is the same as
+(w / h ) * sar .
+
+
+hsub, vsub
+Horizontal and vertical chroma subsample values. For example, for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+
+
+
38.81.1 Examples# TOC
+
+
+ To change the display aspect ratio to 16:9, specify one of the following:
+
+
setdar=dar=1.77777
+setdar=dar=16/9
+setdar=dar=1.77777
+
+
+ To change the sample aspect ratio to 10:11, specify:
+
+
+ To set a display aspect ratio of 16:9, and specify a maximum integer value of
+1000 in the aspect ratio reduction, use the command:
+
+
setdar=ratio=16/9:max=1000
+
+
+
+
+
+
38.82 setfield# TOC
+
+
Force field for the output video frame.
+
+
The setfield
filter marks the interlace type field for the
+output frames. It does not change the input frame, but only sets the
+corresponding property, which affects how the frame is treated by
+following filters (e.g. fieldorder
or yadif
).
+
+
The filter accepts the following options:
+
+
+mode
+Available values are:
+
+
+‘auto ’
+Keep the same field property.
+
+
+‘bff ’
+Mark the frame as bottom-field-first.
+
+
+‘tff ’
+Mark the frame as top-field-first.
+
+
+‘prog ’
+Mark the frame as progressive.
+
+
+
+
+
+
+
38.83 showinfo# TOC
+
+
Show a line containing various information for each input video frame.
+The input video is not modified.
+
+
The shown line contains a sequence of key/value pairs of the form
+key :value .
+
+
The following values are shown in the output:
+
+
+n
+The (sequential) number of the input frame, starting from 0.
+
+
+pts
+The Presentation TimeStamp of the input frame, expressed as a number of
+time base units. The time base unit depends on the filter input pad.
+
+
+pts_time
+The Presentation TimeStamp of the input frame, expressed as a number of
+seconds.
+
+
+pos
+The position of the frame in the input stream, or -1 if this information is
+unavailable and/or meaningless (for example in case of synthetic video).
+
+
+fmt
+The pixel format name.
+
+
+sar
+The sample aspect ratio of the input frame, expressed in the form
+num /den .
+
+
+s
+The size of the input frame. For the syntax of this option, check the "Video size"
+section in the ffmpeg-utils manual.
+
+
+i
+The type of interlaced mode ("P" for "progressive", "T" for top field first, "B"
+for bottom field first).
+
+
+iskey
+This is 1 if the frame is a key frame, 0 otherwise.
+
+
+type
+The picture type of the input frame ("I" for an I-frame, "P" for a
+P-frame, "B" for a B-frame, or "?" for an unknown type).
+Also refer to the documentation of the AVPictureType
enum and of
+the av_get_picture_type_char
function defined in
+libavutil/avutil.h .
+
+
+checksum
+The Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame.
+
+
+plane_checksum
+The Adler-32 checksum (printed in hexadecimal) of each plane of the input frame,
+expressed in the form "[c0 c1 c2 c3 ]".
+
+
+
+
+
38.84 shuffleplanes# TOC
+
+
Reorder and/or duplicate video planes.
+
+
It accepts the following parameters:
+
+
+map0
+The index of the input plane to be used as the first output plane.
+
+
+map1
+The index of the input plane to be used as the second output plane.
+
+
+map2
+The index of the input plane to be used as the third output plane.
+
+
+map3
+The index of the input plane to be used as the fourth output plane.
+
+
+
+
+
The first plane has the index 0. The default is to keep the input unchanged.
+
+
Swap the second and third planes of the input:
+
+
ffmpeg -i INPUT -vf shuffleplanes=0:2:1:3 OUTPUT
+
+
+
+
38.85 signalstats# TOC
+
Evaluate various visual metrics that assist in determining issues associated
+with the digitization of analog video media.
+
+
By default the filter will log these metadata values:
+
+
+YMIN
+Display the minimal Y value contained within the input frame. Expressed in
+range of [0-255].
+
+
+YLOW
+Display the Y value at the 10% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+YAVG
+Display the average Y value within the input frame. Expressed in range of
+[0-255].
+
+
+YHIGH
+Display the Y value at the 90% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+YMAX
+Display the maximum Y value contained within the input frame. Expressed in
+range of [0-255].
+
+
+UMIN
+Display the minimal U value contained within the input frame. Expressed in
+range of [0-255].
+
+
+ULOW
+Display the U value at the 10% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+UAVG
+Display the average U value within the input frame. Expressed in range of
+[0-255].
+
+
+UHIGH
+Display the U value at the 90% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+UMAX
+Display the maximum U value contained within the input frame. Expressed in
+range of [0-255].
+
+
+VMIN
+Display the minimal V value contained within the input frame. Expressed in
+range of [0-255].
+
+
+VLOW
+Display the V value at the 10% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+VAVG
+Display the average V value within the input frame. Expressed in range of
+[0-255].
+
+
+VHIGH
+Display the V value at the 90% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+VMAX
+Display the maximum V value contained within the input frame. Expressed in
+range of [0-255].
+
+
+SATMIN
+Display the minimal saturation value contained within the input frame.
+Expressed in range of [0-~181.02].
+
+
+SATLOW
+Display the saturation value at the 10% percentile within the input frame.
+Expressed in range of [0-~181.02].
+
+
+SATAVG
+Display the average saturation value within the input frame. Expressed in range
+of [0-~181.02].
+
+
+SATHIGH
+Display the saturation value at the 90% percentile within the input frame.
+Expressed in range of [0-~181.02].
+
+
+SATMAX
+Display the maximum saturation value contained within the input frame.
+Expressed in range of [0-~181.02].
+
+
+HUEMED
+Display the median value for hue within the input frame. Expressed in range of
+[0-360].
+
+
+HUEAVG
+Display the average value for hue within the input frame. Expressed in range of
+[0-360].
+
+
+YDIF
+Display the average of sample value difference between all values of the Y
+plane in the current frame and corresponding values of the previous input frame.
+Expressed in range of [0-255].
+
+
+UDIF
+Display the average of sample value difference between all values of the U
+plane in the current frame and corresponding values of the previous input frame.
+Expressed in range of [0-255].
+
+
+VDIF
+Display the average of sample value difference between all values of the V
+plane in the current frame and corresponding values of the previous input frame.
+Expressed in range of [0-255].
+
+
+
+
The filter accepts the following options:
+
+
+stat
+out
+
+stat specify an additional form of image analysis.
+out output video with the specified type of pixel highlighted.
+
+Both options accept the following values:
+
+
+‘tout ’
+Identify temporal outliers pixels. A temporal outlier is a pixel
+unlike the neighboring pixels of the same field. Examples of temporal outliers
+include the results of video dropouts, head clogs, or tape tracking issues.
+
+
+‘vrep ’
+Identify vertical line repetition . Vertical line repetition includes
+similar rows of pixels within a frame. In born-digital video vertical line
+repetition is common, but this pattern is uncommon in video digitized from an
+analog source. When it occurs in video that results from the digitization of an
+analog source it can indicate concealment from a dropout compensator.
+
+
+‘brng ’
+Identify pixels that fall outside of legal broadcast range.
+
+
+
+
+color, c
+Set the highlight color for the out option. The default color is
+yellow.
+
+
+
+
+
38.85.1 Examples# TOC
+
+
+
+
+
38.86 smartblur# TOC
+
+
Blur the input video without impacting the outlines.
+
+
It accepts the following options:
+
+
+luma_radius, lr
+Set the luma radius. The option value must be a float number in
+the range [0.1,5.0] that specifies the variance of the gaussian filter
+used to blur the image (slower if larger). Default value is 1.0.
+
+
+luma_strength, ls
+Set the luma strength. The option value must be a float number
+in the range [-1.0,1.0] that configures the blurring. A value included
+in [0.0,1.0] will blur the image whereas a value included in
+[-1.0,0.0] will sharpen the image. Default value is 1.0.
+
+
+luma_threshold, lt
+Set the luma threshold used as a coefficient to determine
+whether a pixel should be blurred or not. The option value must be an
+integer in the range [-30,30]. A value of 0 will filter all the image,
+a value included in [0,30] will filter flat areas and a value included
+in [-30,0] will filter edges. Default value is 0.
+
+
+chroma_radius, cr
+Set the chroma radius. The option value must be a float number in
+the range [0.1,5.0] that specifies the variance of the gaussian filter
+used to blur the image (slower if larger). Default value is 1.0.
+
+
+chroma_strength, cs
+Set the chroma strength. The option value must be a float number
+in the range [-1.0,1.0] that configures the blurring. A value included
+in [0.0,1.0] will blur the image whereas a value included in
+[-1.0,0.0] will sharpen the image. Default value is 1.0.
+
+
+chroma_threshold, ct
+Set the chroma threshold used as a coefficient to determine
+whether a pixel should be blurred or not. The option value must be an
+integer in the range [-30,30]. A value of 0 will filter all the image,
+a value included in [0,30] will filter flat areas and a value included
+in [-30,0] will filter edges. Default value is 0.
+
+
+
+
If a chroma option is not explicitly set, the corresponding luma value
+is set.
+
+
+
38.87 stereo3d# TOC
+
+
Convert between different stereoscopic image formats.
+
+
The filters accept the following options:
+
+
+in
+Set stereoscopic image format of input.
+
+Available values for input image formats are:
+
+‘sbsl ’
+side by side parallel (left eye left, right eye right)
+
+
+‘sbsr ’
+side by side crosseye (right eye left, left eye right)
+
+
+‘sbs2l ’
+side by side parallel with half width resolution
+(left eye left, right eye right)
+
+
+‘sbs2r ’
+side by side crosseye with half width resolution
+(right eye left, left eye right)
+
+
+‘abl ’
+above-below (left eye above, right eye below)
+
+
+‘abr ’
+above-below (right eye above, left eye below)
+
+
+‘ab2l ’
+above-below with half height resolution
+(left eye above, right eye below)
+
+
+‘ab2r ’
+above-below with half height resolution
+(right eye above, left eye below)
+
+
+‘al ’
+alternating frames (left eye first, right eye second)
+
+
+‘ar ’
+alternating frames (right eye first, left eye second)
+
+Default value is ‘sbsl ’.
+
+
+
+
+out
+Set stereoscopic image format of output.
+
+Available values for output image formats are all the input formats as well as:
+
+‘arbg ’
+anaglyph red/blue gray
+(red filter on left eye, blue filter on right eye)
+
+
+‘argg ’
+anaglyph red/green gray
+(red filter on left eye, green filter on right eye)
+
+
+‘arcg ’
+anaglyph red/cyan gray
+(red filter on left eye, cyan filter on right eye)
+
+
+‘arch ’
+anaglyph red/cyan half colored
+(red filter on left eye, cyan filter on right eye)
+
+
+‘arcc ’
+anaglyph red/cyan color
+(red filter on left eye, cyan filter on right eye)
+
+
+‘arcd ’
+anaglyph red/cyan color optimized with the least squares projection of dubois
+(red filter on left eye, cyan filter on right eye)
+
+
+‘agmg ’
+anaglyph green/magenta gray
+(green filter on left eye, magenta filter on right eye)
+
+
+‘agmh ’
+anaglyph green/magenta half colored
+(green filter on left eye, magenta filter on right eye)
+
+
+‘agmc ’
+anaglyph green/magenta colored
+(green filter on left eye, magenta filter on right eye)
+
+
+‘agmd ’
+anaglyph green/magenta color optimized with the least squares projection of dubois
+(green filter on left eye, magenta filter on right eye)
+
+
+‘aybg ’
+anaglyph yellow/blue gray
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘aybh ’
+anaglyph yellow/blue half colored
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘aybc ’
+anaglyph yellow/blue colored
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘aybd ’
+anaglyph yellow/blue color optimized with the least squares projection of dubois
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘irl ’
+interleaved rows (left eye has top row, right eye starts on next row)
+
+
+‘irr ’
+interleaved rows (right eye has top row, left eye starts on next row)
+
+
+‘ml ’
+mono output (left eye only)
+
+
+‘mr ’
+mono output (right eye only)
+
+
+
+Default value is ‘arcd ’.
+
+
+
+
+
38.87.1 Examples# TOC
+
+
+ Convert input video from side by side parallel to anaglyph yellow/blue dubois:
+
+
+ Convert input video from above bellow (left eye above, right eye below) to side by side crosseye.
+
+
+
+
+
38.88 spp# TOC
+
+
Apply a simple postprocessing filter that compresses and decompresses the image
+at several (or - in the case of quality level 6
- all) shifts
+and average the results.
+
+
The filter accepts the following options:
+
+
+quality
+Set quality. This option defines the number of levels for averaging. It accepts
+an integer in the range 0-6. If set to 0
, the filter will have no
+effect. A value of 6
means the higher quality. For each increment of
+that value the speed drops by a factor of approximately 2. Default value is
+3
.
+
+
+qp
+Force a constant quantization parameter. If not set, the filter will use the QP
+from the video stream (if available).
+
+
+mode
+Set thresholding mode. Available modes are:
+
+
+‘hard ’
+Set hard thresholding (default).
+
+‘soft ’
+Set soft thresholding (better de-ringing effect, but likely blurrier).
+
+
+
+
+use_bframe_qp
+Enable the use of the QP from the B-Frames if set to 1
. Using this
+option may cause flicker since the B-Frames have often larger QP. Default is
+0
(not enabled).
+
+
+
+
+
38.89 subtitles# TOC
+
+
Draw subtitles on top of input video using the libass library.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libass
. This filter also requires a build with libavcodec and
+libavformat to convert the passed subtitles file to ASS (Advanced Substation
+Alpha) subtitles format.
+
+
The filter accepts the following options:
+
+
+filename, f
+Set the filename of the subtitle file to read. It must be specified.
+
+
+original_size
+Specify the size of the original video, the video for which the ASS file
+was composed. For the syntax of this option, check the "Video size" section in
+the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic,
+this is necessary to correctly scale the fonts if the aspect ratio has been
+changed.
+
+
+charenc
+Set subtitles input character encoding. subtitles
filter only. Only
+useful if not UTF-8.
+
+
+stream_index, si
+Set subtitles stream index. subtitles
filter only.
+
+
+
+
If the first key is not specified, it is assumed that the first value
+specifies the filename .
+
+
For example, to render the file sub.srt on top of the input
+video, use the command:
+
+
+
which is equivalent to:
+
+
subtitles=filename=sub.srt
+
+
+
To render the default subtitles stream from file video.mkv , use:
+
+
+
To render the second subtitles stream from that file, use:
+
+
subtitles=video.mkv:si=1
+
+
+
+
38.90 super2xsai# TOC
+
+
Scale the input by 2x and smooth using the Super2xSaI (Scale and
+Interpolate) pixel art scaling algorithm.
+
+
Useful for enlarging pixel art images without reducing sharpness.
+
+
+
38.91 swapuv# TOC
+
Swap U & V plane.
+
+
+
38.92 telecine# TOC
+
+
Apply telecine process to the video.
+
+
This filter accepts the following options:
+
+
+first_field
+
+‘top, t ’
+top field first
+
+‘bottom, b ’
+bottom field first
+The default value is top
.
+
+
+
+
+pattern
+A string of numbers representing the pulldown pattern you wish to apply.
+The default value is 23
.
+
+
+
+
+
Some typical patterns:
+
+NTSC output (30i):
+27.5p: 32222
+24p: 23 (classic)
+24p: 2332 (preferred)
+20p: 33
+18p: 334
+16p: 3444
+
+PAL output (25i):
+27.5p: 12222
+24p: 222222222223 ("Euro pulldown")
+16.67p: 33
+16p: 33333334
+
+
+
+
38.93 thumbnail# TOC
+
Select the most representative frame in a given sequence of consecutive frames.
+
+
The filter accepts the following options:
+
+
+n
+Set the frames batch size to analyze; in a set of n frames, the filter
+will pick one of them, and then handle the next batch of n frames until
+the end. Default is 100
.
+
+
+
+
Since the filter keeps track of the whole frames sequence, a bigger n
+value will result in a higher memory usage, so a high value is not recommended.
+
+
+
38.93.1 Examples# TOC
+
+
+ Extract one picture each 50 frames:
+
+
+ Complete example of a thumbnail creation with ffmpeg
:
+
+
ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
+
+
+
+
+
38.94 tile# TOC
+
+
Tile several successive frames together.
+
+
The filter accepts the following options:
+
+
+layout
+Set the grid size (i.e. the number of lines and columns). For the syntax of
+this option, check the "Video size" section in the ffmpeg-utils manual.
+
+
+nb_frames
+Set the maximum number of frames to render in the given area. It must be less
+than or equal to w xh . The default value is 0
, meaning all
+the area will be used.
+
+
+margin
+Set the outer border margin in pixels.
+
+
+padding
+Set the inner border thickness (i.e. the number of pixels between frames). For
+more advanced padding options (such as having different values for the edges),
+refer to the pad video filter.
+
+
+color
+Specify the color of the unused area. For the syntax of this option, check the
+"Color" section in the ffmpeg-utils manual. The default value of color
+is "black".
+
+
+
+
+
38.94.1 Examples# TOC
+
+
+
+
+
38.95 tinterlace# TOC
+
+
Perform various types of temporal field interlacing.
+
+
Frames are counted starting from 1, so the first input frame is
+considered odd.
+
+
The filter accepts the following options:
+
+
+mode
+Specify the mode of the interlacing. This option can also be specified
+as a value alone. See below for a list of values for this option.
+
+Available values are:
+
+
+‘merge, 0 ’
+Move odd frames into the upper field, even into the lower field,
+generating a double height frame at half frame rate.
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+
+
+
+‘drop_odd, 1 ’
+Only output even frames, odd frames are dropped, generating a frame with
+unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+ 22222 44444
+ 22222 44444
+ 22222 44444
+ 22222 44444
+
+
+
+‘drop_even, 2 ’
+Only output odd frames, even frames are dropped, generating a frame with
+unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+11111 33333
+11111 33333
+11111 33333
+11111 33333
+
+
+
+‘pad, 3 ’
+Expand each frame to full height, but pad alternate lines with black,
+generating a frame with double height at the same input frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+
+
+
+
+‘interleave_top, 4 ’
+Interleave the upper field from odd frames with the lower field from
+even frames, generating a frame with unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111<- 22222 33333<- 44444
+11111 22222<- 33333 44444<-
+11111<- 22222 33333<- 44444
+11111 22222<- 33333 44444<-
+
+Output:
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+
+
+
+
+‘interleave_bottom, 5 ’
+Interleave the lower field from odd frames with the upper field from
+even frames, generating a frame with unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222<- 33333 44444<-
+11111<- 22222 33333<- 44444
+11111 22222<- 33333 44444<-
+11111<- 22222 33333<- 44444
+
+Output:
+22222 44444
+11111 33333
+22222 44444
+11111 33333
+
+
+
+
+‘interlacex2, 6 ’
+Double frame rate with unchanged height. Frames are inserted each
+containing the second temporal field from the previous input frame and
+the first temporal field from the next input frame. This mode relies on
+the top_field_first flag. Useful for interlaced video displays with no
+field synchronisation.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+ 11111 22222 33333 44444
+11111 22222 33333 44444
+ 11111 22222 33333 44444
+
+Output:
+11111 22222 22222 33333 33333 44444 44444
+ 11111 11111 22222 22222 33333 33333 44444
+11111 22222 22222 33333 33333 44444 44444
+ 11111 11111 22222 22222 33333 33333 44444
+
+
+
+
+
+
+Numeric values are deprecated but are accepted for backward
+compatibility reasons.
+
+Default mode is merge
.
+
+
+flags
+Specify flags influencing the filter process.
+
+Available value for flags is:
+
+
+low_pass_filter, vlfp
+Enable vertical low-pass filtering in the filter.
+Vertical low-pass filtering is required when creating an interlaced
+destination from a progressive source which contains high-frequency
+vertical detail. Filtering will reduce interlace ’twitter’ and Moire
+patterning.
+
+Vertical low-pass filtering can only be enabled for mode
+interleave_top and interleave_bottom .
+
+
+
+
+
+
+
+
38.96 transpose# TOC
+
+
Transpose rows with columns in the input video and optionally flip it.
+
+
It accepts the following parameters:
+
+
+dir
+Specify the transposition direction.
+
+Can assume the following values:
+
+‘0, 4, cclock_flip ’
+Rotate by 90 degrees counterclockwise and vertically flip (default), that is:
+
+
L.R L.l
+. . -> . .
+l.r R.r
+
+
+
+‘1, 5, clock ’
+Rotate by 90 degrees clockwise, that is:
+
+
L.R l.L
+. . -> . .
+l.r r.R
+
+
+
+‘2, 6, cclock ’
+Rotate by 90 degrees counterclockwise, that is:
+
+
L.R R.r
+. . -> . .
+l.r L.l
+
+
+
+‘3, 7, clock_flip ’
+Rotate by 90 degrees clockwise and vertically flip, that is:
+
+
L.R r.R
+. . -> . .
+l.r l.L
+
+
+
+
+For values between 4-7, the transposition is only done if the input
+video geometry is portrait and not landscape. These values are
+deprecated, the passthrough
option should be used instead.
+
+Numerical values are deprecated, and should be dropped in favor of
+symbolic constants.
+
+
+passthrough
+Do not apply the transposition if the input geometry matches the one
+specified by the specified value. It accepts the following values:
+
+‘none ’
+Always apply transposition.
+
+‘portrait ’
+Preserve portrait geometry (when height >= width ).
+
+‘landscape ’
+Preserve landscape geometry (when width >= height ).
+
+
+
+Default value is none
.
+
+
+
+
For example to rotate by 90 degrees clockwise and preserve portrait
+layout:
+
+
transpose=dir=1:passthrough=portrait
+
+
+
The command above can also be specified as:
+
+
+
+
38.97 trim# TOC
+
Trim the input so that the output contains one continuous subpart of the input.
+
+
It accepts the following parameters:
+
+start
+Specify the time of the start of the kept section, i.e. the frame with the
+timestamp start will be the first frame in the output.
+
+
+end
+Specify the time of the first frame that will be dropped, i.e. the frame
+immediately preceding the one with the timestamp end will be the last
+frame in the output.
+
+
+start_pts
+This is the same as start , except this option sets the start timestamp
+in timebase units instead of seconds.
+
+
+end_pts
+This is the same as end , except this option sets the end timestamp
+in timebase units instead of seconds.
+
+
+duration
+The maximum duration of the output in seconds.
+
+
+start_frame
+The number of the first frame that should be passed to the output.
+
+
+end_frame
+The number of the first frame that should be dropped.
+
+
+
+
start , end , and duration are expressed as time
+duration specifications; see
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+
+
Note that the first two sets of the start/end options and the duration
+option look at the frame timestamp, while the _frame variants simply count the
+frames that pass through the filter. Also note that this filter does not modify
+the timestamps. If you wish for the output timestamps to start at zero, insert a
+setpts filter after the trim filter.
+
+
If multiple start or end options are set, this filter tries to be greedy and
+keep all the frames that match at least one of the specified constraints. To keep
+only the part that matches all the constraints at once, chain multiple trim
+filters.
+
+
The defaults are such that all the input is kept. So it is possible to set e.g.
+just the end values to keep everything before the specified time.
+
+
Examples:
+
+ Drop everything except the second minute of input:
+
+
ffmpeg -i INPUT -vf trim=60:120
+
+
+ Keep only the first second:
+
+
ffmpeg -i INPUT -vf trim=duration=1
+
+
+
+
+
+
+
38.98 unsharp# TOC
+
+
Sharpen or blur the input video.
+
+
It accepts the following parameters:
+
+
+luma_msize_x, lx
+Set the luma matrix horizontal size. It must be an odd integer between
+3 and 63. The default value is 5.
+
+
+luma_msize_y, ly
+Set the luma matrix vertical size. It must be an odd integer between 3
+and 63. The default value is 5.
+
+
+luma_amount, la
+Set the luma effect strength. It must be a floating point number, reasonable
+values lay between -1.5 and 1.5.
+
+Negative values will blur the input video, while positive values will
+sharpen it, a value of zero will disable the effect.
+
+Default value is 1.0.
+
+
+chroma_msize_x, cx
+Set the chroma matrix horizontal size. It must be an odd integer
+between 3 and 63. The default value is 5.
+
+
+chroma_msize_y, cy
+Set the chroma matrix vertical size. It must be an odd integer
+between 3 and 63. The default value is 5.
+
+
+chroma_amount, ca
+Set the chroma effect strength. It must be a floating point number, reasonable
+values lay between -1.5 and 1.5.
+
+Negative values will blur the input video, while positive values will
+sharpen it, a value of zero will disable the effect.
+
+Default value is 0.0.
+
+
+opencl
+If set to 1, specify using OpenCL capabilities, only available if
+FFmpeg was configured with --enable-opencl
. Default value is 0.
+
+
+
+
+
All parameters are optional and default to the equivalent of the
+string ’5:5:1.0:5:5:0.0’.
+
+
+
38.98.1 Examples# TOC
+
+
+ Apply strong luma sharpen effect:
+
+
unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
+
+
+ Apply a strong blur of both luma and chroma parameters:
+
+
+
+
+
38.99 uspp# TOC
+
+
Apply ultra slow/simple postprocessing filter that compresses and decompresses
+the image at several (or - in the case of quality level 8
- all)
+shifts and average the results.
+
+
The way this differs from the behavior of spp is that uspp actually encodes &
+decodes each case with libavcodec Snow, whereas spp uses a simplified intra only 8x8
+DCT similar to MJPEG.
+
+
The filter accepts the following options:
+
+
+quality
+Set quality. This option defines the number of levels for averaging. It accepts
+an integer in the range 0-8. If set to 0
, the filter will have no
+effect. A value of 8
means the higher quality. For each increment of
+that value the speed drops by a factor of approximately 2. Default value is
+3
.
+
+
+qp
+Force a constant quantization parameter. If not set, the filter will use the QP
+from the video stream (if available).
+
+
+
+
+
38.100 vidstabdetect# TOC
+
+
Analyze video stabilization/deshaking. Perform pass 1 of 2, see
+vidstabtransform for pass 2.
+
+
This filter generates a file with relative translation and rotation
+transform information about subsequent frames, which is then used by
+the vidstabtransform filter.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libvidstab
.
+
+
This filter accepts the following options:
+
+
+result
+Set the path to the file used to write the transforms information.
+Default value is transforms.trf .
+
+
+shakiness
+Set how shaky the video is and how quick the camera is. It accepts an
+integer in the range 1-10, a value of 1 means little shakiness, a
+value of 10 means strong shakiness. Default value is 5.
+
+
+accuracy
+Set the accuracy of the detection process. It must be a value in the
+range 1-15. A value of 1 means low accuracy, a value of 15 means high
+accuracy. Default value is 15.
+
+
+stepsize
+Set stepsize of the search process. The region around minimum is
+scanned with 1 pixel resolution. Default value is 6.
+
+
+mincontrast
+Set minimum contrast. Below this value a local measurement field is
+discarded. Must be a floating point value in the range 0-1. Default
+value is 0.3.
+
+
+tripod
+Set reference frame number for tripod mode.
+
+If enabled, the motion of the frames is compared to a reference frame
+in the filtered stream, identified by the specified number. The idea
+is to compensate all movements in a more-or-less static scene and keep
+the camera view absolutely still.
+
+If set to 0, it is disabled. The frames are counted starting from 1.
+
+
+show
+Show fields and transforms in the resulting frames. It accepts an
+integer in the range 0-2. Default value is 0, which disables any
+visualization.
+
+
+
+
+
38.100.1 Examples# TOC
+
+
+ Use default values:
+
+
+ Analyze strongly shaky movie and put the results in file
+mytransforms.trf :
+
+
vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
+
+
+ Visualize the result of internal transformations in the resulting
+video:
+
+
+ Analyze a video with medium shakiness using ffmpeg
:
+
+
ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
+
+
+
+
+
38.101 vidstabtransform# TOC
+
+
Video stabilization/deshaking: pass 2 of 2,
+see vidstabdetect for pass 1.
+
+
Read a file with transform information for each frame and
+apply/compensate them. Together with the vidstabdetect
+filter this can be used to deshake videos. See also
+http://public.hronopik.de/vid.stab . It is important to also use
+the unsharp filter, see below.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libvidstab
.
+
+
+
38.101.1 Options# TOC
+
+
+input
+Set path to the file used to read the transforms. Default value is
+transforms.trf .
+
+
+smoothing
+Set the number of frames (value*2 + 1) used for lowpass filtering the
+camera movements. Default value is 10.
+
+For example a number of 10 means that 21 frames are used (10 in the
+past and 10 in the future) to smoothen the motion in the video. A
+larger value leads to a smoother video, but limits the acceleration of
+the camera (pan/tilt movements). 0 is a special case where a static
+camera is simulated.
+
+
+optalgo
+Set the camera path optimization algorithm.
+
+Accepted values are:
+
+‘gauss ’
+gaussian kernel low-pass filter on camera motion (default)
+
+‘avg ’
+averaging on transformations
+
+
+
+
+maxshift
+Set maximal number of pixels to translate frames. Default value is -1,
+meaning no limit.
+
+
+maxangle
+Set maximal angle in radians (degree*PI/180) to rotate frames. Default
+value is -1, meaning no limit.
+
+
+crop
+Specify how to deal with borders that may be visible due to movement
+compensation.
+
+Available values are:
+
+‘keep ’
+keep image information from previous frame (default)
+
+‘black ’
+fill the border black
+
+
+
+
+invert
+Invert transforms if set to 1. Default value is 0.
+
+
+relative
+Consider transforms as relative to previous frame if set to 1,
+absolute if set to 0. Default value is 0.
+
+
+zoom
+Set percentage to zoom. A positive value will result in a zoom-in
+effect, a negative value in a zoom-out effect. Default value is 0 (no
+zoom).
+
+
+optzoom
+Set optimal zooming to avoid borders.
+
+Accepted values are:
+
+‘0 ’
+disabled
+
+‘1 ’
+optimal static zoom value is determined (only very strong movements
+will lead to visible borders) (default)
+
+‘2 ’
+optimal adaptive zoom value is determined (no borders will be
+visible), see zoomspeed
+
+
+
+Note that the value given at zoom is added to the one calculated here.
+
+
+zoomspeed
+Set percent to zoom maximally each frame (enabled when
+optzoom is set to 2). Range is from 0 to 5, default value is
+0.25.
+
+
+interpol
+Specify type of interpolation.
+
+Available values are:
+
+‘no ’
+no interpolation
+
+‘linear ’
+linear only horizontal
+
+‘bilinear ’
+linear in both directions (default)
+
+‘bicubic ’
+cubic in both directions (slow)
+
+
+
+
+tripod
+Enable virtual tripod mode if set to 1, which is equivalent to
+relative=0:smoothing=0
. Default value is 0.
+
+Use also tripod
option of vidstabdetect .
+
+
+debug
+Increase log verbosity if set to 1. Also the detected global motions
+are written to the temporary file global_motions.trf . Default
+value is 0.
+
+
+
+
+
38.101.2 Examples# TOC
+
+
+
+
+
38.102 vflip# TOC
+
+
Flip the input video vertically.
+
+
For example, to vertically flip a video with ffmpeg
:
+
+
ffmpeg -i in.avi -vf "vflip" out.avi
+
+
+
+
38.103 vignette# TOC
+
+
Make or reverse a natural vignetting effect.
+
+
The filter accepts the following options:
+
+
+angle, a
+Set lens angle expression as a number of radians.
+
+The value is clipped in the [0,PI/2]
range.
+
+Default value: "PI/5"
+
+
+x0
+y0
+Set center coordinates expressions. Respectively "w/2"
and "h/2"
+by default.
+
+
+mode
+Set forward/backward mode.
+
+Available modes are:
+
+‘forward ’
+The larger the distance from the central point, the darker the image becomes.
+
+
+‘backward ’
+The larger the distance from the central point, the brighter the image becomes.
+This can be used to reverse a vignette effect, though there is no automatic
+detection to extract the lens angle and other settings (yet). It can
+also be used to create a burning effect.
+
+
+
+Default value is ‘forward ’.
+
+
+eval
+Set evaluation mode for the expressions (angle , x0 , y0 ).
+
+It accepts the following values:
+
+‘init ’
+Evaluate expressions only once during the filter initialization.
+
+
+‘frame ’
+Evaluate expressions for each incoming frame. This is way slower than the
+‘init ’ mode since it requires all the scalers to be re-computed, but it
+allows advanced dynamic expressions.
+
+
+
+Default value is ‘init ’.
+
+
+dither
+Set dithering to reduce the circular banding effects. Default is 1
+(enabled).
+
+
+aspect
+Set vignette aspect. This setting allows one to adjust the shape of the vignette.
+Setting this value to the SAR of the input will make a rectangular vignetting
+following the dimensions of the video.
+
+Default is 1/1
.
+
+
+
+
+
38.103.1 Expressions# TOC
+
+
The alpha , x0 and y0 expressions can contain the
+following parameters.
+
+
+w
+h
+input width and height
+
+
+n
+the number of input frame, starting from 0
+
+
+pts
+the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in
+TB units, NAN if undefined
+
+
+r
+frame rate of the input video, NAN if the input frame rate is unknown
+
+
+t
+the PTS (Presentation TimeStamp) of the filtered video frame,
+expressed in seconds, NAN if undefined
+
+
+tb
+time base of the input video
+
+
+
+
+
+
38.103.2 Examples# TOC
+
+
+ Apply simple strong vignetting effect:
+
+
+ Make a flickering vignetting:
+
+
vignette='PI/4+random(1)*PI/50':eval=frame
+
+
+
+
+
+
38.104 w3fdif# TOC
+
+
Deinterlace the input video ("w3fdif" stands for "Weston 3 Field
+Deinterlacing Filter").
+
+
Based on the process described by Martin Weston for BBC R&D, and
+implemented based on the de-interlace algorithm written by Jim
+Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter
+uses filter coefficients calculated by BBC R&D.
+
+
There are two sets of filter coefficients, so called "simple":
+and "complex". Which set of filter coefficients is used can
+be set by passing an optional parameter:
+
+
+filter
+Set the interlacing filter coefficients. Accepts one of the following values:
+
+
+‘simple ’
+Simple filter coefficient set.
+
+‘complex ’
+More-complex filter coefficient set.
+
+
+Default value is ‘complex ’.
+
+
+deint
+Specify which frames to deinterlace. Accept one of the following values:
+
+
+‘all ’
+Deinterlace all frames,
+
+‘interlaced ’
+Only deinterlace frames marked as interlaced.
+
+
+
+Default value is ‘all ’.
+
+
+
+
+
38.105 xbr# TOC
+
Apply the xBR high-quality magnification filter which is designed for pixel
+art. It follows a set of edge-detection rules, see
+http://www.libretro.com/forums/viewtopic.php?f=6&t=134 .
+
+
It accepts the following option:
+
+
+n
+Set the scaling dimension: 2
for 2xBR
, 3
for
+3xBR
and 4
for 4xBR
.
+Default is 3
.
+
+
+
+
+
38.106 yadif# TOC
+
+
Deinterlace the input video ("yadif" means "yet another deinterlacing
+filter").
+
+
It accepts the following parameters:
+
+
+
+mode
+The interlacing mode to adopt. It accepts one of the following values:
+
+
+0, send_frame
+Output one frame for each frame.
+
+1, send_field
+Output one frame for each field.
+
+2, send_frame_nospatial
+Like send_frame
, but it skips the spatial interlacing check.
+
+3, send_field_nospatial
+Like send_field
, but it skips the spatial interlacing check.
+
+
+
+The default value is send_frame
.
+
+
+parity
+The picture field parity assumed for the input interlaced video. It accepts one
+of the following values:
+
+
+0, tff
+Assume the top field is first.
+
+1, bff
+Assume the bottom field is first.
+
+-1, auto
+Enable automatic detection of field parity.
+
+
+
+The default value is auto
.
+If the interlacing is unknown or the decoder does not export this information,
+top field first will be assumed.
+
+
+deint
+Specify which frames to deinterlace. Accept one of the following
+values:
+
+
+0, all
+Deinterlace all frames.
+
+1, interlaced
+Only deinterlace frames marked as interlaced.
+
+
+
+The default value is all
.
+
+
+
+
+
38.107 zoompan# TOC
+
+
Apply Zoom & Pan effect.
+
+
This filter accepts the following options:
+
+
+zoom, z
+Set the zoom expression. Default is 1.
+
+
+x
+y
+Set the x and y expression. Default is 0.
+
+
+d
+Set the duration expression in number of frames.
+This sets for how many number of frames effect will last for
+single input image.
+
+
+s
+Set the output image size, default is ’hd720’.
+
+
+
+
Each expression can contain the following constants:
+
+
+in_w, iw
+Input width.
+
+
+in_h, ih
+Input height.
+
+
+out_w, ow
+Output width.
+
+
+out_h, oh
+Output height.
+
+
+in
+Input frame count.
+
+
+on
+Output frame count.
+
+
+x
+y
+Last calculated ’x’ and ’y’ position from ’x’ and ’y’ expression
+for current input frame.
+
+
+px
+py
+’x’ and ’y’ of last output frame of previous input frame or 0 when there was
+not yet such frame (first input frame).
+
+
+zoom
+Last calculated zoom from ’z’ expression for current input frame.
+
+
+pzoom
+Last calculated zoom of last output frame of previous input frame.
+
+
+duration
+Number of output frames for current input frame. Calculated from ’d’ expression
+for each input frame.
+
+
+pduration
+number of output frames created for previous input frame
+
+
+a
+Rational number: input width / input height
+
+
+sar
+sample aspect ratio
+
+
+dar
+display aspect ratio
+
+
+
+
+
+
38.107.1 Examples# TOC
+
+
+ Zoom-in up to 1.5 and pan at same time to some spot near center of picture:
+
+
zoompan=z='min(zoom+0.0015,1.5)':d=700:x='if(gte(zoom,1.5),x,x+1/a)':y='if(gte(zoom,1.5),y,y+1)':s=640x360
+
+
+
+
+
+
39 Video Sources# TOC
+
+
Below is a description of the currently available video sources.
+
+
+
39.1 buffer# TOC
+
+
Buffer video frames, and make them available to the filter chain.
+
+
This source is mainly intended for a programmatic use, in particular
+through the interface defined in libavfilter/vsrc_buffer.h .
+
+
It accepts the following parameters:
+
+
+video_size
+Specify the size (width and height) of the buffered video frames. For the
+syntax of this option, check the "Video size" section in the ffmpeg-utils
+manual.
+
+
+width
+The input video width.
+
+
+height
+The input video height.
+
+
+pix_fmt
+A string representing the pixel format of the buffered video frames.
+It may be a number corresponding to a pixel format, or a pixel format
+name.
+
+
+time_base
+Specify the timebase assumed by the timestamps of the buffered frames.
+
+
+frame_rate
+Specify the frame rate expected for the video stream.
+
+
+pixel_aspect, sar
+The sample (pixel) aspect ratio of the input video.
+
+
+sws_param
+Specify the optional parameters to be used for the scale filter which
+is automatically inserted when an input change is detected in the
+input size or format.
+
+
+
+
For example:
+
+
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
+
+
+
will instruct the source to accept video frames with size 320x240 and
+with format "yuv410p", assuming 1/24 as the timestamps timebase and
+square pixels (1:1 sample aspect ratio).
+Since the pixel format with name "yuv410p" corresponds to the number 6
+(check the enum AVPixelFormat definition in libavutil/pixfmt.h ),
+this example corresponds to:
+
+
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
+
+
+
Alternatively, the options can be specified as a flat string, but this
+syntax is deprecated:
+
+
width :height :pix_fmt :time_base.num :time_base.den :pixel_aspect.num :pixel_aspect.den [:sws_param ]
+
+
+
39.2 cellauto# TOC
+
+
Create a pattern generated by an elementary cellular automaton.
+
+
The initial state of the cellular automaton can be defined through the
+filename , and pattern options. If such options are
+not specified an initial state is created randomly.
+
+
At each new frame a new row in the video is filled with the result of
+the cellular automaton next generation. The behavior when the whole
+frame is filled is defined by the scroll option.
+
+
This source accepts the following options:
+
+
+filename, f
+Read the initial cellular automaton state, i.e. the starting row, from
+the specified file.
+In the file, each non-whitespace character is considered an alive
+cell, a newline will terminate the row, and further characters in the
+file will be ignored.
+
+
+pattern, p
+Read the initial cellular automaton state, i.e. the starting row, from
+the specified string.
+
+Each non-whitespace character in the string is considered an alive
+cell, a newline will terminate the row, and further characters in the
+string will be ignored.
+
+
+rate, r
+Set the video rate, that is the number of frames generated per second.
+Default is 25.
+
+
+random_fill_ratio, ratio
+Set the random fill ratio for the initial cellular automaton row. It
+is a floating point number value ranging from 0 to 1, defaults to
+1/PHI.
+
+This option is ignored when a file or a pattern is specified.
+
+
+random_seed, seed
+Set the seed for filling randomly the initial row, must be an integer
+included between 0 and UINT32_MAX. If not specified, or if explicitly
+set to -1, the filter will try to use a good random seed on a best
+effort basis.
+
+
+rule
+Set the cellular automaton rule, it is a number ranging from 0 to 255.
+Default value is 110.
+
+
+size, s
+Set the size of the output video. For the syntax of this option, check
+the "Video size" section in the ffmpeg-utils manual.
+
+If filename or pattern is specified, the size is set
+by default to the width of the specified initial state row, and the
+height is set to width * PHI.
+
+If size is set, it must contain the width of the specified
+pattern string, and the specified pattern will be centered in the
+larger row.
+
+If a filename or a pattern string is not specified, the size value
+defaults to "320x518" (used for a randomly generated initial state).
+
+
+scroll
+If set to 1, scroll the output upward when all the rows in the output
+have been already filled. If set to 0, the new generated row will be
+written over the top row just after the bottom row is filled.
+Defaults to 1.
+
+
+start_full, full
+If set to 1, completely fill the output with generated rows before
+outputting the first frame.
+This is the default behavior, for disabling set the value to 0.
+
+
+stitch
+If set to 1, stitch the left and right row edges together.
+This is the default behavior, for disabling set the value to 0.
+
+
+
+
+
39.2.1 Examples# TOC
+
+
+ Read the initial state from pattern , and specify an output of
+size 200x400.
+
+
cellauto=f=pattern:s=200x400
+
+
+ Generate a random initial row with a width of 200 cells, with a fill
+ratio of 2/3:
+
+
cellauto=ratio=2/3:s=200x200
+
+
+ Create a pattern generated by rule 18 starting by a single alive cell
+centered on an initial row with width 100:
+
+
cellauto=p=@:s=100x400:full=0:rule=18
+
+
+ Specify a more elaborated initial pattern:
+
+
cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
+
+
+
+
+
+
39.3 mandelbrot# TOC
+
+
Generate a Mandelbrot set fractal, and progressively zoom towards the
+point specified with start_x and start_y .
+
+
This source accepts the following options:
+
+
+end_pts
+Set the terminal pts value. Default value is 400.
+
+
+end_scale
+Set the terminal scale value.
+Must be a floating point value. Default value is 0.3.
+
+
+inner
+Set the inner coloring mode, that is the algorithm used to draw the
+Mandelbrot fractal internal region.
+
+It shall assume one of the following values:
+
+black
+Set black mode.
+
+convergence
+Show time until convergence.
+
+mincol
+Set color based on point closest to the origin of the iterations.
+
+period
+Set period mode.
+
+
+
+Default value is mincol .
+
+
+bailout
+Set the bailout value. Default value is 10.0.
+
+
+maxiter
+Set the maximum of iterations performed by the rendering
+algorithm. Default value is 7189.
+
+
+outer
+Set outer coloring mode.
+It shall assume one of following values:
+
+iteration_count
+Set iteration cound mode.
+
+normalized_iteration_count
+set normalized iteration count mode.
+
+
+Default value is normalized_iteration_count .
+
+
+rate, r
+Set frame rate, expressed as number of frames per second. Default
+value is "25".
+
+
+size, s
+Set frame size. For the syntax of this option, check the "Video
+size" section in the ffmpeg-utils manual. Default value is "640x480".
+
+
+start_scale
+Set the initial scale value. Default value is 3.0.
+
+
+start_x
+Set the initial x position. Must be a floating point value between
+-100 and 100. Default value is -0.743643887037158704752191506114774.
+
+
+start_y
+Set the initial y position. Must be a floating point value between
+-100 and 100. Default value is -0.131825904205311970493132056385139.
+
+
+
+
+
39.4 mptestsrc# TOC
+
+
Generate various test patterns, as generated by the MPlayer test filter.
+
+
The size of the generated video is fixed, and is 256x256.
+This source is useful in particular for testing encoding features.
+
+
This source accepts the following options:
+
+
+rate, r
+Specify the frame rate of the sourced video, as the number of frames
+generated per second. It has to be a string in the format
+frame_rate_num /frame_rate_den , an integer number, a floating point
+number or a valid video frame rate abbreviation. The default value is
+"25".
+
+
+duration, d
+Set the duration of the sourced video. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+
+If not specified, or the expressed duration is negative, the video is
+supposed to be generated forever.
+
+
+test, t
+
+Set the number or the name of the test to perform. Supported tests are:
+
+dc_luma
+dc_chroma
+freq_luma
+freq_chroma
+amp_luma
+amp_chroma
+cbp
+mv
+ring1
+ring2
+all
+
+
+Default value is "all", which will cycle through the list of all tests.
+
+
+
+
Some examples:
+
+
+
will generate a "dc_luma" test pattern.
+
+
+
39.5 frei0r_src# TOC
+
+
Provide a frei0r source.
+
+
To enable compilation of this filter you need to install the frei0r
+header and configure FFmpeg with --enable-frei0r
.
+
+
This source accepts the following parameters:
+
+
+size
+The size of the video to generate. For the syntax of this option, check the
+"Video size" section in the ffmpeg-utils manual.
+
+
+framerate
+The framerate of the generated video. It may be a string of the form
+num /den or a frame rate abbreviation.
+
+
+filter_name
+The name to the frei0r source to load. For more information regarding frei0r and
+how to set the parameters, read the frei0r section in the video filters
+documentation.
+
+
+filter_params
+A ’|’-separated list of parameters to pass to the frei0r source.
+
+
+
+
+
For example, to generate a frei0r partik0l source with size 200x200
+and frame rate 10 which is overlaid on the overlay filter main input:
+
+
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
+
+
+
+
39.6 life# TOC
+
+
Generate a life pattern.
+
+
This source is based on a generalization of John Conway’s life game.
+
+
The sourced input represents a life grid, each pixel represents a cell
+which can be in one of two possible states, alive or dead. Every cell
+interacts with its eight neighbours, which are the cells that are
+horizontally, vertically, or diagonally adjacent.
+
+
At each interaction the grid evolves according to the adopted rule,
+which specifies the number of neighbor alive cells which will make a
+cell stay alive or born. The rule option allows one to specify
+the rule to adopt.
+
+
This source accepts the following options:
+
+
+filename, f
+Set the file from which to read the initial grid state. In the file,
+each non-whitespace character is considered an alive cell, and newline
+is used to delimit the end of each row.
+
+If this option is not specified, the initial grid is generated
+randomly.
+
+
+rate, r
+Set the video rate, that is the number of frames generated per second.
+Default is 25.
+
+
+random_fill_ratio, ratio
+Set the random fill ratio for the initial random grid. It is a
+floating point number value ranging from 0 to 1, defaults to 1/PHI.
+It is ignored when a file is specified.
+
+
+random_seed, seed
+Set the seed for filling the initial random grid, must be an integer
+included between 0 and UINT32_MAX. If not specified, or if explicitly
+set to -1, the filter will try to use a good random seed on a best
+effort basis.
+
+
+rule
+Set the life rule.
+
+A rule can be specified with a code of the kind "SNS /BNB ",
+where NS and NB are sequences of numbers in the range 0-8,
+NS specifies the number of alive neighbor cells which make a
+live cell stay alive, and NB the number of alive neighbor cells
+which make a dead cell to become alive (i.e. to "born").
+"s" and "b" can be used in place of "S" and "B", respectively.
+
+Alternatively a rule can be specified by an 18-bits integer. The 9
+high order bits are used to encode the next cell state if it is alive
+for each number of neighbor alive cells, the low order bits specify
+the rule for "borning" new cells. Higher order bits encode for an
+higher number of neighbor cells.
+For example the number 6153 = (12<<9)+9
specifies a stay alive
+rule of 12 and a born rule of 9, which corresponds to "S23/B03".
+
+Default value is "S23/B3", which is the original Conway’s game of life
+rule, and will keep a cell alive if it has 2 or 3 neighbor alive
+cells, and will born a new cell if there are three alive cells around
+a dead cell.
+
+
+size, s
+Set the size of the output video. For the syntax of this option, check the
+"Video size" section in the ffmpeg-utils manual.
+
+If filename is specified, the size is set by default to the
+same size of the input file. If size is set, it must contain
+the size specified in the input file, and the initial grid defined in
+that file is centered in the larger resulting area.
+
+If a filename is not specified, the size value defaults to "320x240"
+(used for a randomly generated initial grid).
+
+
+stitch
+If set to 1, stitch the left and right grid edges together, and the
+top and bottom edges also. Defaults to 1.
+
+
+mold
+Set cell mold speed. If set, a dead cell will go from death_color to
+mold_color with a step of mold . mold can have a
+value from 0 to 255.
+
+
+life_color
+Set the color of living (or new born) cells.
+
+
+death_color
+Set the color of dead cells. If mold is set, this is the first color
+used to represent a dead cell.
+
+
+mold_color
+Set mold color, for definitely dead and moldy cells.
+
+For the syntax of these 3 color options, check the "Color" section in the
+ffmpeg-utils manual.
+
+
+
+
+
39.6.1 Examples# TOC
+
+
+ Read a grid from pattern , and center it on a grid of size
+300x300 pixels:
+
+
life=f=pattern:s=300x300
+
+
+ Generate a random grid of size 200x200, with a fill ratio of 2/3:
+
+
life=ratio=2/3:s=200x200
+
+
+ Specify a custom rule for evolving a randomly generated grid:
+
+
+ Full example with slow death effect (mold) using ffplay
:
+
+
ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
+
+
+
+
+
39.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc# TOC
+
+
The color
source provides an uniformly colored input.
+
+
The haldclutsrc
source provides an identity Hald CLUT. See also
+haldclut filter.
+
+
The nullsrc
source returns unprocessed video frames. It is
+mainly useful to be employed in analysis / debugging tools, or as the
+source for filters which ignore the input data.
+
+
The rgbtestsrc
source generates an RGB test pattern useful for
+detecting RGB vs BGR issues. You should see a red, green and blue
+stripe from top to bottom.
+
+
The smptebars
source generates a color bars pattern, based on
+the SMPTE Engineering Guideline EG 1-1990.
+
+
The smptehdbars
source generates a color bars pattern, based on
+the SMPTE RP 219-2002.
+
+
The testsrc
source generates a test video pattern, showing a
+color pattern, a scrolling gradient and a timestamp. This is mainly
+intended for testing purposes.
+
+
The sources accept the following parameters:
+
+
+color, c
+Specify the color of the source, only available in the color
+source. For the syntax of this option, check the "Color" section in the
+ffmpeg-utils manual.
+
+
+level
+Specify the level of the Hald CLUT, only available in the haldclutsrc
+source. A level of N
generates a picture of N*N*N
by N*N*N
+pixels to be used as identity matrix for 3D lookup tables. Each component is
+coded on a 1/(N*N)
scale.
+
+
+size, s
+Specify the size of the sourced video. For the syntax of this option, check the
+"Video size" section in the ffmpeg-utils manual. The default value is
+"320x240".
+
+This option is not available with the haldclutsrc
filter.
+
+
+rate, r
+Specify the frame rate of the sourced video, as the number of frames
+generated per second. It has to be a string in the format
+frame_rate_num /frame_rate_den , an integer number, a floating point
+number or a valid video frame rate abbreviation. The default value is
+"25".
+
+
+sar
+Set the sample aspect ratio of the sourced video.
+
+
+duration, d
+Set the duration of the sourced video. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+
+If not specified, or the expressed duration is negative, the video is
+supposed to be generated forever.
+
+
+decimals, n
+Set the number of decimals to show in the timestamp, only available in the
+testsrc
source.
+
+The displayed timestamp value will correspond to the original
+timestamp value multiplied by the power of 10 of the specified
+value. Default value is 0.
+
+
+
+
For example the following:
+
+
testsrc=duration=5.3:size=qcif:rate=10
+
+
+
will generate a video with a duration of 5.3 seconds, with size
+176x144 and a frame rate of 10 frames per second.
+
+
The following graph description will generate a red source
+with an opacity of 0.2, with size "qcif" and a frame rate of 10
+frames per second.
+
+
color=c=red@0.2:s=qcif:r=10
+
+
+
If the input content is to be ignored, nullsrc
can be used. The
+following command generates noise in the luminance plane by employing
+the geq
filter:
+
+
nullsrc=s=256x256, geq=random(1)*255:128:128
+
+
+
+
39.7.1 Commands# TOC
+
+
The color
source supports the following commands:
+
+
+c, color
+Set the color of the created image. Accepts the same syntax of the
+corresponding color option.
+
+
+
+
+
+
40 Video Sinks# TOC
+
+
Below is a description of the currently available video sinks.
+
+
+
40.1 buffersink# TOC
+
+
Buffer video frames, and make them available to the end of the filter
+graph.
+
+
This sink is mainly intended for programmatic use, in particular
+through the interface defined in libavfilter/buffersink.h
+or the options system.
+
+
It accepts a pointer to an AVBufferSinkContext structure, which
+defines the incoming buffers’ formats, to be passed as the opaque
+parameter to avfilter_init_filter
for initialization.
+
+
+
40.2 nullsink# TOC
+
+
Null video sink: do absolutely nothing with the input video. It is
+mainly useful as a template and for use in analysis / debugging
+tools.
+
+
+
+
41 Multimedia Filters# TOC
+
+
Below is a description of the currently available multimedia filters.
+
+
+
41.1 avectorscope# TOC
+
+
Convert input audio to a video output, representing the audio vector
+scope.
+
+
The filter is used to measure the difference between channels of stereo
+audio stream. A monoaural signal, consisting of identical left and right
+signal, results in straight vertical line. Any stereo separation is visible
+as a deviation from this line, creating a Lissajous figure.
+If the straight (or deviation from it) but horizontal line appears this
+indicates that the left and right channels are out of phase.
+
+
The filter accepts the following options:
+
+
+mode, m
+Set the vectorscope mode.
+
+Available values are:
+
+‘lissajous ’
+Lissajous rotated by 45 degrees.
+
+
+‘lissajous_xy ’
+Same as above but not rotated.
+
+
+
+Default value is ‘lissajous ’.
+
+
+size, s
+Set the video size for the output. For the syntax of this option, check the "Video size"
+section in the ffmpeg-utils manual. Default value is 400x400
.
+
+
+rate, r
+Set the output frame rate. Default value is 25
.
+
+
+rc
+gc
+bc
+Specify the red, green and blue contrast. Default values are 40
, 160
and 80
.
+Allowed range is [0, 255]
.
+
+
+rf
+gf
+bf
+Specify the red, green and blue fade. Default values are 15
, 10
and 5
.
+Allowed range is [0, 255]
.
+
+
+zoom
+Set the zoom factor. Default value is 1
. Allowed range is [1, 10]
.
+
+
+
+
+
41.1.1 Examples# TOC
+
+
+ Complete example using ffplay
:
+
+
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
+ [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
+
+
+
+
+
41.2 concat# TOC
+
+
Concatenate audio and video streams, joining them together one after the
+other.
+
+
The filter works on segments of synchronized video and audio streams. All
+segments must have the same number of streams of each type, and that will
+also be the number of streams at output.
+
+
The filter accepts the following options:
+
+
+n
+Set the number of segments. Default is 2.
+
+
+v
+Set the number of output video streams, that is also the number of video
+streams in each segment. Default is 1.
+
+
+a
+Set the number of output audio streams, that is also the number of audio
+streams in each segment. Default is 0.
+
+
+unsafe
+Activate unsafe mode: do not fail if segments have a different format.
+
+
+
+
+
The filter has v +a outputs: first v video outputs, then
+a audio outputs.
+
+
There are n x(v +a ) inputs: first the inputs for the first
+segment, in the same order as the outputs, then the inputs for the second
+segment, etc.
+
+
Related streams do not always have exactly the same duration, for various
+reasons including codec frame size or sloppy authoring. For that reason,
+related synchronized streams (e.g. a video and its audio track) should be
+concatenated at once. The concat filter will use the duration of the longest
+stream in each segment (except the last one), and if necessary pad shorter
+audio streams with silence.
+
+
For this filter to work correctly, all segments must start at timestamp 0.
+
+
All corresponding streams must have the same parameters in all segments; the
+filtering system will automatically select a common pixel format for video
+streams, and a common sample format, sample rate and channel layout for
+audio streams, but other settings, such as resolution, must be converted
+explicitly by the user.
+
+
Different frame rates are acceptable but will result in variable frame rate
+at output; be sure to configure the output file to handle it.
+
+
+
41.2.1 Examples# TOC
+
+
+
+
+
41.3 ebur128# TOC
+
+
EBU R128 scanner filter. This filter takes an audio stream as input and outputs
+it unchanged. By default, it logs a message at a frequency of 10Hz with the
+Momentary loudness (identified by M
), Short-term loudness (S
),
+Integrated loudness (I
) and Loudness Range (LRA
).
+
+
The filter also has a video output (see the video option) with a real
+time graph to observe the loudness evolution. The graphic contains the logged
+message mentioned above, so it is not printed anymore when this option is set,
+unless the verbose logging is set. The main graphing area contains the
+short-term loudness (3 seconds of analysis), and the gauge on the right is for
+the momentary loudness (400 milliseconds).
+
+
More information about the Loudness Recommendation EBU R128 on
+http://tech.ebu.ch/loudness .
+
+
The filter accepts the following options:
+
+
+video
+Activate the video output. The audio stream is passed unchanged whether this
+option is set or no. The video stream will be the first output stream if
+activated. Default is 0
.
+
+
+size
+Set the video size. This option is for video only. For the syntax of this
+option, check the "Video size" section in the ffmpeg-utils manual. Default
+and minimum resolution is 640x480
.
+
+
+meter
+Set the EBU scale meter. Default is 9
. Common values are 9
and
+18
, respectively for EBU scale meter +9 and EBU scale meter +18. Any
+other integer value between this range is allowed.
+
+
+metadata
+Set metadata injection. If set to 1
, the audio input will be segmented
+into 100ms output frames, each of them containing various loudness information
+in metadata. All the metadata keys are prefixed with lavfi.r128.
.
+
+Default is 0
.
+
+
+framelog
+Force the frame logging level.
+
+Available values are:
+
+‘info ’
+information logging level
+
+‘verbose ’
+verbose logging level
+
+
+
+By default, the logging level is set to info . If the video or
+the metadata options are set, it switches to verbose .
+
+
+peak
+Set peak mode(s).
+
+Available modes can be cumulated (the option is a flag
type). Possible
+values are:
+
+‘none ’
+Disable any peak mode (default).
+
+‘sample ’
+Enable sample-peak mode.
+
+Simple peak mode looking for the higher sample value. It logs a message
+for sample-peak (identified by SPK
).
+
+‘true ’
+Enable true-peak mode.
+
+If enabled, the peak lookup is done on an over-sampled version of the input
+stream for better peak accuracy. It logs a message for true-peak.
+(identified by TPK
) and true-peak per frame (identified by FTPK
).
+This mode requires a build with libswresample
.
+
+
+
+
+
+
+
+
41.3.1 Examples# TOC
+
+
+ Real-time graph using ffplay
, with a EBU scale meter +18:
+
+
ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
+
+
+ Run an analysis with ffmpeg
:
+
+
ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
+
+
+
+
+
41.4 interleave, ainterleave# TOC
+
+
Temporally interleave frames from several inputs.
+
+
interleave
works with video inputs, ainterleave
with audio.
+
+
These filters read frames from several inputs and send the oldest
+queued frame to the output.
+
+
Input streams must have a well defined, monotonically increasing frame
+timestamp values.
+
+
In order to submit one frame to output, these filters need to enqueue
+at least one frame for each input, so they cannot work in case one
+input is not yet terminated and will not receive incoming frames.
+
+
For example consider the case when one input is a select
filter
+which always drop input frames. The interleave
filter will keep
+reading from that input, but it will never be able to send new frames
+to output until the input will send an end-of-stream signal.
+
+
Also, depending on inputs synchronization, the filters will drop
+frames in case one input receives more frames than the other ones, and
+the queue is already filled.
+
+
These filters accept the following options:
+
+
+nb_inputs, n
+Set the number of different inputs, it is 2 by default.
+
+
+
+
+
41.4.1 Examples# TOC
+
+
+ Interleave frames belonging to different streams using ffmpeg
:
+
+
ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
+
+
+ Add flickering blur effect:
+
+
select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
+
+
+
+
+
41.5 perms, aperms# TOC
+
+
Set read/write permissions for the output frames.
+
+
These filters are mainly aimed at developers to test direct path in the
+following filter in the filtergraph.
+
+
The filters accept the following options:
+
+
+mode
+Select the permissions mode.
+
+It accepts the following values:
+
+‘none ’
+Do nothing. This is the default.
+
+‘ro ’
+Set all the output frames read-only.
+
+‘rw ’
+Set all the output frames directly writable.
+
+‘toggle ’
+Make the frame read-only if writable, and writable if read-only.
+
+‘random ’
+Set each output frame read-only or writable randomly.
+
+
+
+
+seed
+Set the seed for the random mode, must be an integer included between
+0
and UINT32_MAX
. If not specified, or if explicitly set to
+-1
, the filter will try to use a good random seed on a best effort
+basis.
+
+
+
+
Note: in case of auto-inserted filter between the permission filter and the
+following one, the permission might not be received as expected in that
+following filter. Inserting a format or aformat filter before the
+perms/aperms filter can avoid this problem.
+
+
+
41.6 select, aselect# TOC
+
+
Select frames to pass in output.
+
+
This filter accepts the following options:
+
+
+expr, e
+Set expression, which is evaluated for each input frame.
+
+If the expression is evaluated to zero, the frame is discarded.
+
+If the evaluation result is negative or NaN, the frame is sent to the
+first output; otherwise it is sent to the output with index
+ceil(val)-1
, assuming that the input index starts from 0.
+
+For example a value of 1.2
corresponds to the output with index
+ceil(1.2)-1 = 2-1 = 1
, that is the second output.
+
+
+outputs, n
+Set the number of outputs. The output to which to send the selected
+frame is based on the result of the evaluation. Default value is 1.
+
+
+
+
The expression can contain the following constants:
+
+
+n
+The (sequential) number of the filtered frame, starting from 0.
+
+
+selected_n
+The (sequential) number of the selected frame, starting from 0.
+
+
+prev_selected_n
+The sequential number of the last selected frame. It’s NAN if undefined.
+
+
+TB
+The timebase of the input timestamps.
+
+
+pts
+The PTS (Presentation TimeStamp) of the filtered video frame,
+expressed in TB units. It’s NAN if undefined.
+
+
+t
+The PTS of the filtered video frame,
+expressed in seconds. It’s NAN if undefined.
+
+
+prev_pts
+The PTS of the previously filtered video frame. It’s NAN if undefined.
+
+
+prev_selected_pts
+The PTS of the last previously filtered video frame. It’s NAN if undefined.
+
+
+prev_selected_t
+The PTS of the last previously selected video frame. It’s NAN if undefined.
+
+
+start_pts
+The PTS of the first video frame in the video. It’s NAN if undefined.
+
+
+start_t
+The time of the first video frame in the video. It’s NAN if undefined.
+
+
+pict_type (video only)
+The type of the filtered frame. It can assume one of the following
+values:
+
+I
+P
+B
+S
+SI
+SP
+BI
+
+
+
+interlace_type (video only)
+The frame interlace type. It can assume one of the following values:
+
+PROGRESSIVE
+The frame is progressive (not interlaced).
+
+TOPFIRST
+The frame is top-field-first.
+
+BOTTOMFIRST
+The frame is bottom-field-first.
+
+
+
+
+consumed_sample_n (audio only)
+the number of selected samples before the current frame
+
+
+samples_n (audio only)
+the number of samples in the current frame
+
+
+sample_rate (audio only)
+the input sample rate
+
+
+key
+This is 1 if the filtered frame is a key-frame, 0 otherwise.
+
+
+pos
+the position in the file of the filtered frame, -1 if the information
+is not available (e.g. for synthetic video)
+
+
+scene (video only)
+value between 0 and 1 to indicate a new scene; a low value reflects a low
+probability for the current frame to introduce a new scene, while a higher
+value means the current frame is more likely to be one (see the example below)
+
+
+
+
+
The default value of the select expression is "1".
+
+
+
41.6.1 Examples# TOC
+
+
+
+
+
41.7 sendcmd, asendcmd# TOC
+
+
Send commands to filters in the filtergraph.
+
+
These filters read commands to be sent to other filters in the
+filtergraph.
+
+
sendcmd
must be inserted between two video filters,
+asendcmd
must be inserted between two audio filters, but apart
+from that they act the same way.
+
+
The specification of commands can be provided in the filter arguments
+with the commands option, or in a file specified by the
+filename option.
+
+
These filters accept the following options:
+
+commands, c
+Set the commands to be read and sent to the other filters.
+
+filename, f
+Set the filename of the commands to be read and sent to the other
+filters.
+
+
+
+
+
41.7.1 Commands syntax# TOC
+
+
A commands description consists of a sequence of interval
+specifications, comprising a list of commands to be executed when a
+particular event related to that interval occurs. The occurring event
+is typically the current frame time entering or leaving a given time
+interval.
+
+
An interval is specified by the following syntax:
+
+
+
The time interval is specified by the START and END times.
+END is optional and defaults to the maximum time.
+
+
The current frame time is considered within the specified interval if
+it is included in the interval [START , END ), that is when
+the time is greater or equal to START and is lesser than
+END .
+
+
COMMANDS consists of a sequence of one or more command
+specifications, separated by ",", relating to that interval. The
+syntax of a command specification is given by:
+
+
[FLAGS ] TARGET COMMAND ARG
+
+
+
FLAGS is optional and specifies the type of events relating to
+the time interval which enable sending the specified command, and must
+be a non-null sequence of identifier flags separated by "+" or "|" and
+enclosed between "[" and "]".
+
+
The following flags are recognized:
+
+enter
+The command is sent when the current frame timestamp enters the
+specified interval. In other words, the command is sent when the
+previous frame timestamp was not in the given interval, and the
+current is.
+
+
+leave
+The command is sent when the current frame timestamp leaves the
+specified interval. In other words, the command is sent when the
+previous frame timestamp was in the given interval, and the
+current is not.
+
+
+
+
If FLAGS is not specified, a default value of [enter]
is
+assumed.
+
+
TARGET specifies the target of the command, usually the name of
+the filter class or a specific filter instance name.
+
+
COMMAND specifies the name of the command for the target filter.
+
+
ARG is optional and specifies the optional list of argument for
+the given COMMAND .
+
+
Between one interval specification and another, whitespaces, or
+sequences of characters starting with #
until the end of line,
+are ignored and can be used to annotate comments.
+
+
A simplified BNF description of the commands specification syntax
+follows:
+
+
COMMAND_FLAG ::= "enter" | "leave"
+COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG ]
+COMMAND ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG ]
+COMMANDS ::= COMMAND [,COMMANDS ]
+INTERVAL ::= START [-END ] COMMANDS
+INTERVALS ::= INTERVAL [;INTERVALS ]
+
+
+
+
41.7.2 Examples# TOC
+
+
+
+
+
41.8 setpts, asetpts# TOC
+
+
Change the PTS (presentation timestamp) of the input frames.
+
+
setpts
works on video frames, asetpts
on audio frames.
+
+
This filter accepts the following options:
+
+
+expr
+The expression which is evaluated for each frame to construct its timestamp.
+
+
+
+
+
The expression is evaluated through the eval API and can contain the following
+constants:
+
+
+FRAME_RATE
+frame rate, only defined for constant frame-rate video
+
+
+PTS
+The presentation timestamp in input
+
+
+N
+The count of the input frame for video or the number of consumed samples,
+not including the current frame for audio, starting from 0.
+
+
+NB_CONSUMED_SAMPLES
+The number of consumed samples, not including the current frame (only
+audio)
+
+
+NB_SAMPLES, S
+The number of samples in the current frame (only audio)
+
+
+SAMPLE_RATE, SR
+The audio sample rate.
+
+
+STARTPTS
+The PTS of the first frame.
+
+
+STARTT
+the time in seconds of the first frame
+
+
+INTERLACED
+State whether the current frame is interlaced.
+
+
+T
+the time in seconds of the current frame
+
+
+POS
+original position in the file of the frame, or undefined if undefined
+for the current frame
+
+
+PREV_INPTS
+The previous input PTS.
+
+
+PREV_INT
+previous input time in seconds
+
+
+PREV_OUTPTS
+The previous output PTS.
+
+
+PREV_OUTT
+previous output time in seconds
+
+
+RTCTIME
+The wallclock (RTC) time in microseconds. This is deprecated, use time(0)
+instead.
+
+
+RTCSTART
+The wallclock (RTC) time at the start of the movie in microseconds.
+
+
+TB
+The timebase of the input timestamps.
+
+
+
+
+
+
41.8.1 Examples# TOC
+
+
+ Start counting PTS from zero
+
+
+ Apply fast motion effect:
+
+
+ Apply slow motion effect:
+
+
+ Set fixed rate of 25 frames per second:
+
+
+ Set fixed rate 25 fps with some jitter:
+
+
setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
+
+
+ Apply an offset of 10 seconds to the input PTS:
+
+
+ Generate timestamps from a "live source" and rebase onto the current timebase:
+
+
setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
+
+
+ Generate timestamps by counting samples:
+
+
+
+
+
+
41.9 settb, asettb# TOC
+
+
Set the timebase to use for the output frames timestamps.
+It is mainly useful for testing timebase configuration.
+
+
It accepts the following parameters:
+
+
+expr, tb
+The expression which is evaluated into the output timebase.
+
+
+
+
+
The value for tb is an arithmetic expression representing a
+rational. The expression can contain the constants "AVTB" (the default
+timebase), "intb" (the input timebase) and "sr" (the sample rate,
+audio only). Default value is "intb".
+
+
+
41.9.1 Examples# TOC
+
+
+ Set the timebase to 1/25:
+
+
+ Set the timebase to 1/10:
+
+
+ Set the timebase to 1001/1000:
+
+
+ Set the timebase to 2*intb:
+
+
+ Set the default timebase value:
+
+
+
+
+
41.10 showcqt# TOC
+
Convert input audio to a video output representing
+frequency spectrum logarithmically (using constant Q transform with
+Brown-Puckette algorithm), with musical tone scale, from E0 to D#10 (10 octaves).
+
+
The filter accepts the following options:
+
+
+volume
+Specify transform volume (multiplier) expression. The expression can contain
+variables:
+
+frequency, freq, f
+the frequency where transform is evaluated
+
+timeclamp, tc
+value of timeclamp option
+
+
+and functions:
+
+a_weighting(f)
+A-weighting of equal loudness
+
+b_weighting(f)
+B-weighting of equal loudness
+
+c_weighting(f)
+C-weighting of equal loudness
+
+
+Default value is 16
.
+
+
+tlength
+Specify transform length expression. The expression can contain variables:
+
+frequency, freq, f
+the frequency where transform is evaluated
+
+timeclamp, tc
+value of timeclamp option
+
+
+Default value is 384/f*tc/(384/f+tc)
.
+
+
+timeclamp
+Specify the transform timeclamp. At low frequency, there is trade-off between
+accuracy in time domain and frequency domain. If timeclamp is lower,
+event in time domain is represented more accurately (such as fast bass drum),
+otherwise event in frequency domain is represented more accurately
+(such as bass guitar). Acceptable value is [0.1, 1.0]. Default value is 0.17
.
+
+
+coeffclamp
+Specify the transform coeffclamp. If coeffclamp is lower, transform is
+more accurate, otherwise transform is faster. Acceptable value is [0.1, 10.0].
+Default value is 1.0
.
+
+
+gamma
+Specify gamma. Lower gamma makes the spectrum more contrast, higher gamma
+makes the spectrum having more range. Acceptable value is [1.0, 7.0].
+Default value is 3.0
.
+
+
+fontfile
+Specify font file for use with freetype. If not specified, use embedded font.
+
+
+fontcolor
+Specify font color expression. This is arithmetic expression that should return
+integer value 0xRRGGBB. The expression can contain variables:
+
+frequency, freq, f
+the frequency where transform is evaluated
+
+timeclamp, tc
+value of timeclamp option
+
+
+and functions:
+
+midi(f)
+midi number of frequency f, some midi numbers: E0(16), C1(24), C2(36), A4(69)
+
+r(x), g(x), b(x)
+red, green, and blue value of intensity x
+
+
+Default value is st(0, (midi(f)-59.5)/12);
+st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));
+r(1-ld(1)) + b(ld(1))
+
+
+fullhd
+If set to 1 (the default), the video size is 1920x1080 (full HD),
+if set to 0, the video size is 960x540. Use this option to make CPU usage lower.
+
+
+fps
+Specify video fps. Default value is 25
.
+
+
+count
+Specify number of transform per frame, so there are fps*count transforms
+per second. Note that audio data rate must be divisible by fps*count.
+Default value is 6
.
+
+
+
+
+
+
41.10.1 Examples# TOC
+
+
+ Playing audio while showing the spectrum:
+
+
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt [out0]'
+
+
+ Same as above, but with frame rate 30 fps:
+
+
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fps=30:count=5 [out0]'
+
+
+ Playing at 960x540 and lower CPU usage:
+
+
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fullhd=0:count=3 [out0]'
+
+
+ A1 and its harmonics: A1, A2, (near)E3, A3:
+
+
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
+ asplit[a][out1]; [a] showcqt [out0]'
+
+
+ Same as above, but with more accuracy in frequency domain (and slower):
+
+
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
+ asplit[a][out1]; [a] showcqt=timeclamp=0.5 [out0]'
+
+
+ B-weighting of equal loudness
+
+
volume=16*b_weighting(f)
+
+
+ Lower Q factor
+
+
tlength=100/f*tc/(100/f+tc)
+
+
+ Custom fontcolor, C-note is colored green, others are colored blue
+
+
fontcolor='if(mod(floor(midi(f)+0.5),12), 0x0000FF, g(1))'
+
+
+
+
+
+
41.11 showspectrum# TOC
+
+
Convert input audio to a video output, representing the audio frequency
+spectrum.
+
+
The filter accepts the following options:
+
+
+size, s
+Specify the video size for the output. For the syntax of this option, check
+the "Video size" section in the ffmpeg-utils manual. Default value is
+640x512
.
+
+
+slide
+Specify how the spectrum should slide along the window.
+
+It accepts the following values:
+
+‘replace ’
+the samples start again on the left when they reach the right
+
+‘scroll ’
+the samples scroll from right to left
+
+‘fullframe ’
+frames are only produced when the samples reach the right
+
+
+
+Default value is replace
.
+
+
+mode
+Specify display mode.
+
+It accepts the following values:
+
+‘combined ’
+all channels are displayed in the same row
+
+‘separate ’
+all channels are displayed in separate rows
+
+
+
+Default value is ‘combined ’.
+
+
+color
+Specify display color mode.
+
+It accepts the following values:
+
+‘channel ’
+each channel is displayed in a separate color
+
+‘intensity ’
+each channel is is displayed using the same color scheme
+
+
+
+Default value is ‘channel ’.
+
+
+scale
+Specify scale used for calculating intensity color values.
+
+It accepts the following values:
+
+‘lin ’
+linear
+
+‘sqrt ’
+square root, default
+
+‘cbrt ’
+cubic root
+
+‘log ’
+logarithmic
+
+
+
+Default value is ‘sqrt ’.
+
+
+saturation
+Set saturation modifier for displayed colors. Negative values provide
+alternative color scheme. 0
is no saturation at all.
+Saturation must be in [-10.0, 10.0] range.
+Default value is 1
.
+
+
+win_func
+Set window function.
+
+It accepts the following values:
+
+‘none ’
+No samples pre-processing (do not expect this to be faster)
+
+‘hann ’
+Hann window
+
+‘hamming ’
+Hamming window
+
+‘blackman ’
+Blackman window
+
+
+
+Default value is hann
.
+
+
+
+
The usage is very similar to the showwaves filter; see the examples in that
+section.
+
+
+
41.11.1 Examples# TOC
+
+
+ Large window with logarithmic color scaling:
+
+
showspectrum=s=1280x480:scale=log
+
+
+ Complete example for a colored and sliding spectrum per channel using ffplay
:
+
+
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
+ [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
+
+
+
+
+
41.12 showwaves# TOC
+
+
Convert input audio to a video output, representing the samples waves.
+
+
The filter accepts the following options:
+
+
+size, s
+Specify the video size for the output. For the syntax of this option, check
+the "Video size" section in the ffmpeg-utils manual. Default value
+is "600x240".
+
+
+mode
+Set display mode.
+
+Available values are:
+
+‘point ’
+Draw a point for each sample.
+
+
+‘line ’
+Draw a vertical line for each sample.
+
+
+‘p2p ’
+Draw a point for each sample and a line between them.
+
+
+‘cline ’
+Draw a centered vertical line for each sample.
+
+
+
+Default value is point
.
+
+
+n
+Set the number of samples which are printed on the same column. A
+larger value will decrease the frame rate. Must be a positive
+integer. This option can be set only if the value for rate
+is not explicitly specified.
+
+
+rate, r
+Set the (approximate) output frame rate. This is done by setting the
+option n . Default value is "25".
+
+
+split_channels
+Set if channels should be drawn separately or overlap. Default value is 0.
+
+
+
+
+
+
41.12.1 Examples# TOC
+
+
+ Output the input file audio and the corresponding video representation
+at the same time:
+
+
amovie=a.mp3,asplit[out0],showwaves[out1]
+
+
+ Create a synthetic signal and show it with showwaves, forcing a
+frame rate of 30 frames per second:
+
+
aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
+
+
+
+
+
41.13 split, asplit# TOC
+
+
Split input into several identical outputs.
+
+
asplit
works with audio input, split
with video.
+
+
The filter accepts a single parameter which specifies the number of outputs. If
+unspecified, it defaults to 2.
+
+
+
41.13.1 Examples# TOC
+
+
+ Create two separate outputs from the same input:
+
+
[in] split [out0][out1]
+
+
+ To create 3 or more outputs, you need to specify the number of
+outputs, like in:
+
+
[in] asplit=3 [out0][out1][out2]
+
+
+ Create two separate outputs from the same input, one cropped and
+one padded:
+
+
[in] split [splitout1][splitout2];
+[splitout1] crop=100:100:0:0 [cropout];
+[splitout2] pad=200:200:100:100 [padout];
+
+
+ Create 5 copies of the input audio with ffmpeg
:
+
+
ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
+
+
+
+
+
41.14 zmq, azmq# TOC
+
+
Receive commands sent through a libzmq client, and forward them to
+filters in the filtergraph.
+
+
zmq
and azmq
work as a pass-through filters. zmq
+must be inserted between two video filters, azmq
between two
+audio filters.
+
+
To enable these filters you need to install the libzmq library and
+headers and configure FFmpeg with --enable-libzmq
.
+
+
For more information about libzmq see:
+http://www.zeromq.org/
+
+
The zmq
and azmq
filters work as a libzmq server, which
+receives messages sent through a network interface defined by the
+bind_address option.
+
+
The received message must be in the form:
+
+
+
TARGET specifies the target of the command, usually the name of
+the filter class or a specific filter instance name.
+
+
COMMAND specifies the name of the command for the target filter.
+
+
ARG is optional and specifies the optional argument list for the
+given COMMAND .
+
+
Upon reception, the message is processed and the corresponding command
+is injected into the filtergraph. Depending on the result, the filter
+will send a reply to the client, adopting the format:
+
+
ERROR_CODE ERROR_REASON
+MESSAGE
+
+
+
MESSAGE is optional.
+
+
+
41.14.1 Examples# TOC
+
+
Look at tools/zmqsend for an example of a zmq client which can
+be used to send commands processed by these filters.
+
+
Consider the following filtergraph generated by ffplay
+
+
ffplay -dumpgraph 1 -f lavfi "
+color=s=100x100:c=red [l];
+color=s=100x100:c=blue [r];
+nullsrc=s=200x100, zmq [bg];
+[bg][l] overlay [bg+l];
+[bg+l][r] overlay=x=100 "
+
+
+
To change the color of the left side of the video, the following
+command can be used:
+
+
echo Parsed_color_0 c yellow | tools/zmqsend
+
+
+
To change the right side:
+
+
echo Parsed_color_1 c pink | tools/zmqsend
+
+
+
+
+
42 Multimedia Sources# TOC
+
+
Below is a description of the currently available multimedia sources.
+
+
+
42.1 amovie# TOC
+
+
This is the same as movie source, except it selects an audio
+stream by default.
+
+
+
42.2 movie# TOC
+
+
Read audio and/or video stream(s) from a movie container.
+
+
It accepts the following parameters:
+
+
+filename
+The name of the resource to read (not necessarily a file; it can also be a
+device or a stream accessed through some protocol).
+
+
+format_name, f
+Specifies the format assumed for the movie to read, and can be either
+the name of a container or an input device. If not specified, the
+format is guessed from movie_name or by probing.
+
+
+seek_point, sp
+Specifies the seek point in seconds. The frames will be output
+starting from this seek point. The parameter is evaluated with
+av_strtod
, so the numerical value may be suffixed by an IS
+postfix. The default value is "0".
+
+
+streams, s
+Specifies the streams to read. Several streams can be specified,
+separated by "+". The source will then have as many outputs, in the
+same order. The syntax is explained in the “Stream specifiers”
+section in the ffmpeg manual. Two special names, "dv" and "da" specify
+respectively the default (best suited) video and audio stream. Default
+is "dv", or "da" if the filter is called as "amovie".
+
+
+stream_index, si
+Specifies the index of the video stream to read. If the value is -1,
+the most suitable video stream will be automatically selected. The default
+value is "-1". Deprecated. If the filter is called "amovie", it will select
+audio instead of video.
+
+
+loop
+Specifies how many times to read the stream in sequence.
+If the value is less than 1, the stream will be read again and again.
+Default value is "1".
+
+Note that when the movie is looped the source timestamps are not
+changed, so it will generate non monotonically increasing timestamps.
+
+
+
+
It allows overlaying a second video on top of the main input of
+a filtergraph, as shown in this graph:
+
+
input -----------> deltapts0 --> overlay --> output
+ ^
+ |
+movie --> scale--> deltapts1 -------+
+
+
+
42.2.1 Examples# TOC
+
+
+ Skip 3.2 seconds from the start of the AVI file in.avi, and overlay it
+on top of the input labelled "in":
+
+
movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
+[in] setpts=PTS-STARTPTS [main];
+[main][over] overlay=16:16 [out]
+
+
+ Read from a video4linux2 device, and overlay it on top of the input
+labelled "in":
+
+
movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
+[in] setpts=PTS-STARTPTS [main];
+[main][over] overlay=16:16 [out]
+
+
+ Read the first video stream and the audio stream with id 0x81 from
+dvd.vob; the video is connected to the pad named "video" and the audio is
+connected to the pad named "audio":
+
+
movie=dvd.vob:s=v:0+#0x81 [video] [audio]
+
+
+
+
+
+
43 See Also# TOC
+
+
ffmpeg
+ffplay , ffprobe , ffserver ,
+ffmpeg-utils ,
+ffmpeg-scaler ,
+ffmpeg-resampler ,
+ffmpeg-codecs ,
+ffmpeg-bitstream-filters ,
+ffmpeg-formats ,
+ffmpeg-devices ,
+ffmpeg-protocols ,
+ffmpeg-filters
+
+
+
+
44 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-bitstream-filters.html b/Externals/ffmpeg/dev/doc/ffmpeg-bitstream-filters.html
new file mode 100644
index 0000000000..b7195b944f
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/ffmpeg-bitstream-filters.html
@@ -0,0 +1,261 @@
+
+
+
+
+
+
+ FFmpeg Bitstream Filters Documentation
+
+
+
+
+
+
+
+
+ FFmpeg Bitstream Filters Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
This document describes the bitstream filters provided by the
+libavcodec library.
+
+
A bitstream filter operates on the encoded stream data, and performs
+bitstream level modifications without performing decoding.
+
+
+
+
2 Bitstream Filters# TOC
+
+
When you configure your FFmpeg build, all the supported bitstream
+filters are enabled by default. You can list all available ones using
+the configure option --list-bsfs
.
+
+
You can disable all the bitstream filters using the configure option
+--disable-bsfs
, and selectively enable any bitstream filter using
+the option --enable-bsf=BSF
, or you can disable a particular
+bitstream filter using the option --disable-bsf=BSF
.
+
+
The option -bsfs
of the ff* tools will display the list of
+all the supported bitstream filters included in your build.
+
+
The ff* tools have a -bsf option applied per stream, taking a
+comma-separated list of filters, whose parameters follow the filter
+name after a ’=’.
+
+
+
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
+
+
+
Below is a description of the currently available bitstream filters,
+with their parameters, if any.
+
+
+
2.1 aac_adtstoasc# TOC
+
+
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
+bitstream filter.
+
+
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
+ADTS header and removes the ADTS header.
+
+
This is required for example when copying an AAC stream from a raw
+ADTS AAC container to a FLV or a MOV/MP4 file.
+
+
+
2.2 chomp# TOC
+
+
Remove zero padding at the end of a packet.
+
+
+
2.3 dump_extra# TOC
+
+
Add extradata to the beginning of the filtered packets.
+
+
The additional argument specifies which packets should be filtered.
+It accepts the values:
+
+‘a ’
+add extradata to all key packets, but only if local_header is
+set in the flags2 codec context field
+
+
+‘k ’
+add extradata to all key packets
+
+
+‘e ’
+add extradata to all packets
+
+
+
+
If not specified it is assumed ‘k ’.
+
+
For example the following ffmpeg
command forces a global
+header (thus disabling individual packet headers) in the H.264 packets
+generated by the libx264
encoder, but corrects them by adding
+the header stored in extradata to the key packets:
+
+
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
+
+
+
+
2.4 h264_mp4toannexb# TOC
+
+
Convert an H.264 bitstream from length prefixed mode to start code
+prefixed mode (as defined in the Annex B of the ITU-T H.264
+specification).
+
+
This is required by some streaming formats, typically the MPEG-2
+transport stream format ("mpegts").
+
+
For example to remux an MP4 file containing an H.264 stream to mpegts
+format with ffmpeg
, you can use the command:
+
+
+
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
+
+
+
+
2.5 imxdump# TOC
+
+
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
+Pro decoder. This filter only applies to the mpeg2video codec, and is
+likely not needed for Final Cut Pro 7 and newer with the appropriate
+-tag:v .
+
+
For example, to remux 30 MB/sec NTSC IMX to MOV:
+
+
+
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
+
+
+
+
2.6 mjpeg2jpeg# TOC
+
+
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
+
+
MJPEG is a video codec wherein each video frame is essentially a
+JPEG image. The individual frames can be extracted without loss,
+e.g. by
+
+
+
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
+
+
+
Unfortunately, these chunks are incomplete JPEG images, because
+they lack the DHT segment required for decoding. Quoting from
+http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml :
+
+
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
+commented that "MJPEG, or at least the MJPEG in AVIs having the
+MJPG fourcc, is restricted JPEG with a fixed – and *omitted* –
+Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
+and it must use basic Huffman encoding, not arithmetic or
+progressive. . . . You can indeed extract the MJPEG frames and
+decode them with a regular JPEG decoder, but you have to prepend
+the DHT segment to them, or else the decoder won’t have any idea
+how to decompress the data. The exact table necessary is given in
+the OpenDML spec."
+
+
This bitstream filter patches the header of frames extracted from an MJPEG
+stream (carrying the AVI1 header ID and lacking a DHT segment) to
+produce fully qualified JPEG images.
+
+
+
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
+exiftran -i -9 frame*.jpg
+ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
+
+
+
+
2.7 mjpega_dump_header# TOC
+
+
+
2.8 movsub# TOC
+
+
+
2.9 mp3_header_decompress# TOC
+
+
+
2.10 noise# TOC
+
+
Damages the contents of packets without damaging the container. Can be
+used for fuzzing or testing error resilience/concealment.
+
+
Parameters:
+A numeral string, whose value is related to how often output bytes will
+be modified. Therefore, values below or equal to 0 are forbidden, and
+the lower the more frequent bytes will be modified, with 1 meaning
+every byte is modified.
+
+
+
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
+
+
applies the modification to every byte.
+
+
+
2.11 remove_extra# TOC
+
+
+
+
3 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+libavcodec
+
+
+
+
4 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-codecs.html b/Externals/ffmpeg/dev/doc/ffmpeg-codecs.html
new file mode 100644
index 0000000000..968b12f421
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/ffmpeg-codecs.html
@@ -0,0 +1,4474 @@
+
+
+
+
+
+
+ FFmpeg Codecs Documentation
+
+
+
+
+
+
+
+
+ FFmpeg Codecs Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
This document describes the codecs (decoders and encoders) provided by
+the libavcodec library.
+
+
+
+
2 Codec Options# TOC
+
+
libavcodec provides some generic global options, which can be set on
+all the encoders and decoders. In addition each codec may support
+so-called private options, which are specific for a given codec.
+
+
Sometimes, a global option may only affect a specific kind of codec,
+and may be nonsensical or ignored by another, so you need to be aware
+of the meaning of the specified options. Also some options are
+meant only for decoding or encoding.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, or by setting the value explicitly in the
+AVCodecContext
options or using the libavutil/opt.h API
+for programmatic use.
+
+
The list of supported options follow:
+
+
+b integer (encoding,audio,video )
+Set bitrate in bits/s. Default value is 200K.
+
+
+ab integer (encoding,audio )
+Set audio bitrate (in bits/s). Default value is 128K.
+
+
+bt integer (encoding,video )
+Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate
+tolerance specifies how far ratecontrol is willing to deviate from the
+target average bitrate value. This is not related to min/max
+bitrate. Lowering tolerance too much has an adverse effect on quality.
+
+
+flags flags (decoding/encoding,audio,video,subtitles )
+Set generic flags.
+
+Possible values:
+
+‘mv4 ’
+Use four motion vector by macroblock (mpeg4).
+
+‘qpel ’
+Use 1/4 pel motion compensation.
+
+‘loop ’
+Use loop filter.
+
+‘qscale ’
+Use fixed qscale.
+
+‘gmc ’
+Use gmc.
+
+‘mv0 ’
+Always try a mb with mv=<0,0>.
+
+‘input_preserved ’
+‘pass1 ’
+Use internal 2pass ratecontrol in first pass mode.
+
+‘pass2 ’
+Use internal 2pass ratecontrol in second pass mode.
+
+‘gray ’
+Only decode/encode grayscale.
+
+‘emu_edge ’
+Do not draw edges.
+
+‘psnr ’
+Set error[?] variables during encoding.
+
+‘truncated ’
+‘naq ’
+Normalize adaptive quantization.
+
+‘ildct ’
+Use interlaced DCT.
+
+‘low_delay ’
+Force low delay.
+
+‘global_header ’
+Place global headers in extradata instead of every keyframe.
+
+‘bitexact ’
+Only write platform-, build- and time-independent data. (except (I)DCT).
+This ensures that file and data checksums are reproducible and match between
+platforms. Its primary use is for regression testing.
+
+‘aic ’
+Apply H263 advanced intra coding / mpeg4 ac prediction.
+
+‘cbp ’
+Deprecated, use mpegvideo private options instead.
+
+‘qprd ’
+Deprecated, use mpegvideo private options instead.
+
+‘ilme ’
+Apply interlaced motion estimation.
+
+‘cgop ’
+Use closed gop.
+
+
+
+
+me_method integer (encoding,video )
+Set motion estimation method.
+
+Possible values:
+
+‘zero ’
+zero motion estimation (fastest)
+
+‘full ’
+full motion estimation (slowest)
+
+‘epzs ’
+EPZS motion estimation (default)
+
+‘esa ’
+esa motion estimation (alias for full)
+
+‘tesa ’
+tesa motion estimation
+
+‘dia ’
+dia motion estimation (alias for epzs)
+
+‘log ’
+log motion estimation
+
+‘phods ’
+phods motion estimation
+
+‘x1 ’
+X1 motion estimation
+
+‘hex ’
+hex motion estimation
+
+‘umh ’
+umh motion estimation
+
+‘iter ’
+iter motion estimation
+
+
+
+
+extradata_size integer
+Set extradata size.
+
+
+time_base rational number
+Set codec time base.
+
+It is the fundamental unit of time (in seconds) in terms of which
+frame timestamps are represented. For fixed-fps content, timebase
+should be 1 / frame_rate
and timestamp increments should be
+identically 1.
+
+
+g integer (encoding,video )
+Set the group of picture size. Default value is 12.
+
+
+ar integer (decoding/encoding,audio )
+Set audio sampling rate (in Hz).
+
+
+ac integer (decoding/encoding,audio )
+Set number of audio channels.
+
+
+cutoff integer (encoding,audio )
+Set cutoff bandwidth.
+
+
+frame_size integer (encoding,audio )
+Set audio frame size.
+
+Each submitted frame except the last must contain exactly frame_size
+samples per channel. May be 0 when the codec has
+CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not
+restricted. It is set by some decoders to indicate constant frame
+size.
+
+
+frame_number integer
+Set the frame number.
+
+
+delay integer
+qcomp float (encoding,video )
+Set video quantizer scale compression (VBR). It is used as a constant
+in the ratecontrol equation. Recommended range for default rc_eq:
+0.0-1.0.
+
+
+qblur float (encoding,video )
+Set video quantizer scale blur (VBR).
+
+
+qmin integer (encoding,video )
+Set min video quantizer scale (VBR). Must be included between -1 and
+69, default value is 2.
+
+
+qmax integer (encoding,video )
+Set max video quantizer scale (VBR). Must be included between -1 and
+1024, default value is 31.
+
+
+qdiff integer (encoding,video )
+Set max difference between the quantizer scale (VBR).
+
+
+bf integer (encoding,video )
+Set max number of B frames between non-B-frames.
+
+Must be an integer between -1 and 16. 0 means that B-frames are
+disabled. If a value of -1 is used, it will choose an automatic value
+depending on the encoder.
+
+Default value is 0.
+
+
+b_qfactor float (encoding,video )
+Set qp factor between P and B frames.
+
+
+rc_strategy integer (encoding,video )
+Set ratecontrol method.
+
+
+b_strategy integer (encoding,video )
+Set strategy to choose between I/P/B-frames.
+
+
+ps integer (encoding,video )
+Set RTP payload size in bytes.
+
+
+mv_bits integer
+header_bits integer
+i_tex_bits integer
+p_tex_bits integer
+i_count integer
+p_count integer
+skip_count integer
+misc_bits integer
+frame_bits integer
+codec_tag integer
+bug flags (decoding,video )
+Workaround not auto detected encoder bugs.
+
+Possible values:
+
+‘autodetect ’
+‘old_msmpeg4 ’
+some old lavc generated msmpeg4v3 files (no autodetection)
+
+‘xvid_ilace ’
+Xvid interlacing bug (autodetected if fourcc==XVIX)
+
+‘ump4 ’
+(autodetected if fourcc==UMP4)
+
+‘no_padding ’
+padding bug (autodetected)
+
+‘amv ’
+‘ac_vlc ’
+illegal vlc bug (autodetected per fourcc)
+
+‘qpel_chroma ’
+‘std_qpel ’
+old standard qpel (autodetected per fourcc/version)
+
+‘qpel_chroma2 ’
+‘direct_blocksize ’
+direct-qpel-blocksize bug (autodetected per fourcc/version)
+
+‘edge ’
+edge padding bug (autodetected per fourcc/version)
+
+‘hpel_chroma ’
+‘dc_clip ’
+‘ms ’
+Workaround various bugs in microsoft broken decoders.
+
+‘trunc ’
+trancated frames
+
+
+
+
+lelim integer (encoding,video )
+Set single coefficient elimination threshold for luminance (negative
+values also consider DC coefficient).
+
+
+celim integer (encoding,video )
+Set single coefficient elimination threshold for chrominance (negative
+values also consider dc coefficient)
+
+
+strict integer (decoding/encoding,audio,video )
+Specify how strictly to follow the standards.
+
+Possible values:
+
+‘very ’
+strictly conform to a older more strict version of the spec or reference software
+
+‘strict ’
+strictly conform to all the things in the spec no matter what consequences
+
+‘normal ’
+‘unofficial ’
+allow unofficial extensions
+
+‘experimental ’
+allow non standardized experimental things, experimental
+(unfinished/work in progress/not well tested) decoders and encoders.
+Note: experimental decoders can pose a security risk, do not use this for
+decoding untrusted input.
+
+
+
+
+b_qoffset float (encoding,video )
+Set QP offset between P and B frames.
+
+
+err_detect flags (decoding,audio,video )
+Set error detection flags.
+
+Possible values:
+
+‘crccheck ’
+verify embedded CRCs
+
+‘bitstream ’
+detect bitstream specification deviations
+
+‘buffer ’
+detect improper bitstream length
+
+‘explode ’
+abort decoding on minor error detection
+
+‘ignore_err ’
+ignore decoding errors, and continue decoding.
+This is useful if you want to analyze the content of a video and thus want
+everything to be decoded no matter what. This option will not result in a video
+that is pleasing to watch in case of errors.
+
+‘careful ’
+consider things that violate the spec and have not been seen in the wild as errors
+
+‘compliant ’
+consider all spec non compliancies as errors
+
+‘aggressive ’
+consider things that a sane encoder should not do as an error
+
+
+
+
+has_b_frames integer
+block_align integer
+mpeg_quant integer (encoding,video )
+Use MPEG quantizers instead of H.263.
+
+
+qsquish float (encoding,video )
+How to keep quantizer between qmin and qmax (0 = clip, 1 = use
+differentiable function).
+
+
+rc_qmod_amp float (encoding,video )
+Set experimental quantizer modulation.
+
+
+rc_qmod_freq integer (encoding,video )
+Set experimental quantizer modulation.
+
+
+rc_override_count integer
+rc_eq string (encoding,video )
+Set rate control equation. When computing the expression, besides the
+standard functions defined in the section ’Expression Evaluation’, the
+following functions are available: bits2qp(bits), qp2bits(qp). Also
+the following constants are available: iTex pTex tex mv fCode iCount
+mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex
+avgTex.
+
+
+maxrate integer (encoding,audio,video )
+Set max bitrate tolerance (in bits/s). Requires bufsize to be set.
+
+
+minrate integer (encoding,audio,video )
+Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR
+encode. It is of little use elsewise.
+
+
+bufsize integer (encoding,audio,video )
+Set ratecontrol buffer size (in bits).
+
+
+rc_buf_aggressivity float (encoding,video )
+Currently useless.
+
+
+i_qfactor float (encoding,video )
+Set QP factor between P and I frames.
+
+
+i_qoffset float (encoding,video )
+Set QP offset between P and I frames.
+
+
+rc_init_cplx float (encoding,video )
+Set initial complexity for 1-pass encoding.
+
+
+dct integer (encoding,video )
+Set DCT algorithm.
+
+Possible values:
+
+‘auto ’
+autoselect a good one (default)
+
+‘fastint ’
+fast integer
+
+‘int ’
+accurate integer
+
+‘mmx ’
+‘altivec ’
+‘faan ’
+floating point AAN DCT
+
+
+
+
+lumi_mask float (encoding,video )
+Compress bright areas stronger than medium ones.
+
+
+tcplx_mask float (encoding,video )
+Set temporal complexity masking.
+
+
+scplx_mask float (encoding,video )
+Set spatial complexity masking.
+
+
+p_mask float (encoding,video )
+Set inter masking.
+
+
+dark_mask float (encoding,video )
+Compress dark areas stronger than medium ones.
+
+
+idct integer (decoding/encoding,video )
+Select IDCT implementation.
+
+Possible values:
+
+‘auto ’
+‘int ’
+‘simple ’
+‘simplemmx ’
+‘simpleauto ’
+Automatically pick a IDCT compatible with the simple one
+
+
+‘arm ’
+‘altivec ’
+‘sh4 ’
+‘simplearm ’
+‘simplearmv5te ’
+‘simplearmv6 ’
+‘simpleneon ’
+‘simplealpha ’
+‘ipp ’
+‘xvidmmx ’
+‘faani ’
+floating point AAN IDCT
+
+
+
+
+slice_count integer
+ec flags (decoding,video )
+Set error concealment strategy.
+
+Possible values:
+
+‘guess_mvs ’
+iterative motion vector (MV) search (slow)
+
+‘deblock ’
+use strong deblock filter for damaged MBs
+
+‘favor_inter ’
+favor predicting from the previous frame instead of the current
+
+
+
+
+bits_per_coded_sample integer
+pred integer (encoding,video )
+Set prediction method.
+
+Possible values:
+
+‘left ’
+‘plane ’
+‘median ’
+
+
+
+aspect rational number (encoding,video )
+Set sample aspect ratio.
+
+
+debug flags (decoding/encoding,audio,video,subtitles )
+Print specific debug info.
+
+Possible values:
+
+‘pict ’
+picture info
+
+‘rc ’
+rate control
+
+‘bitstream ’
+‘mb_type ’
+macroblock (MB) type
+
+‘qp ’
+per-block quantization parameter (QP)
+
+‘mv ’
+motion vector
+
+‘dct_coeff ’
+‘skip ’
+‘startcode ’
+‘pts ’
+‘er ’
+error recognition
+
+‘mmco ’
+memory management control operations (H.264)
+
+‘bugs ’
+‘vis_qp ’
+visualize quantization parameter (QP), lower QP are tinted greener
+
+‘vis_mb_type ’
+visualize block types
+
+‘buffers ’
+picture buffer allocations
+
+‘thread_ops ’
+threading operations
+
+‘nomc ’
+skip motion compensation
+
+
+
+
+vismv integer (decoding,video )
+Visualize motion vectors (MVs).
+
+This option is deprecated, see the codecview filter instead.
+
+Possible values:
+
+‘pf ’
+forward predicted MVs of P-frames
+
+‘bf ’
+forward predicted MVs of B-frames
+
+‘bb ’
+backward predicted MVs of B-frames
+
+
+
+
+cmp integer (encoding,video )
+Set full pel me compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+subcmp integer (encoding,video )
+Set sub pel me compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+mbcmp integer (encoding,video )
+Set macroblock compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+ildctcmp integer (encoding,video )
+Set interlaced dct compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+dia_size integer (encoding,video )
+Set diamond type & size for motion estimation.
+
+
+last_pred integer (encoding,video )
+Set amount of motion predictors from the previous frame.
+
+
+preme integer (encoding,video )
+Set pre motion estimation.
+
+
+precmp integer (encoding,video )
+Set pre motion estimation compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+pre_dia_size integer (encoding,video )
+Set diamond type & size for motion estimation pre-pass.
+
+
+subq integer (encoding,video )
+Set sub pel motion estimation quality.
+
+
+dtg_active_format integer
+me_range integer (encoding,video )
+Set limit motion vectors range (1023 for DivX player).
+
+
+ibias integer (encoding,video )
+Set intra quant bias.
+
+
+pbias integer (encoding,video )
+Set inter quant bias.
+
+
+color_table_id integer
+global_quality integer (encoding,audio,video )
+coder integer (encoding,video )
+
+Possible values:
+
+‘vlc ’
+variable length coder / huffman coder
+
+‘ac ’
+arithmetic coder
+
+‘raw ’
+raw (no encoding)
+
+‘rle ’
+run-length coder
+
+‘deflate ’
+deflate-based coder
+
+
+
+
+context integer (encoding,video )
+Set context model.
+
+
+slice_flags integer
+xvmc_acceleration integer
+mbd integer (encoding,video )
+Set macroblock decision algorithm (high quality mode).
+
+Possible values:
+
+‘simple ’
+use mbcmp (default)
+
+‘bits ’
+use fewest bits
+
+‘rd ’
+use best rate distortion
+
+
+
+
+stream_codec_tag integer
+sc_threshold integer (encoding,video )
+Set scene change threshold.
+
+
+lmin integer (encoding,video )
+Set min lagrange factor (VBR).
+
+
+lmax integer (encoding,video )
+Set max lagrange factor (VBR).
+
+
+nr integer (encoding,video )
+Set noise reduction.
+
+
+rc_init_occupancy integer (encoding,video )
+Set number of bits which should be loaded into the rc buffer before
+decoding starts.
+
+
+flags2 flags (decoding/encoding,audio,video )
+
+Possible values:
+
+‘fast ’
+Allow non spec compliant speedup tricks.
+
+‘sgop ’
+Deprecated, use mpegvideo private options instead.
+
+‘noout ’
+Skip bitstream encoding.
+
+‘ignorecrop ’
+Ignore cropping information from sps.
+
+‘local_header ’
+Place global headers at every keyframe instead of in extradata.
+
+‘chunks ’
+Frame data might be split into multiple chunks.
+
+‘showall ’
+Show all frames before the first keyframe.
+
+‘skiprd ’
+Deprecated, use mpegvideo private options instead.
+
+‘export_mvs ’
+Export motion vectors into frame side-data (see AV_FRAME_DATA_MOTION_VECTORS
)
+for codecs that support it. See also doc/examples/export_mvs.c .
+
+
+
+
+error integer (encoding,video )
+qns integer (encoding,video )
+Deprecated, use mpegvideo private options instead.
+
+
+threads integer (decoding/encoding,video )
+
+Possible values:
+
+‘auto ’
+detect a good number of threads
+
+
+
+
+me_threshold integer (encoding,video )
+Set motion estimation threshold.
+
+
+mb_threshold integer (encoding,video )
+Set macroblock threshold.
+
+
+dc integer (encoding,video )
+Set intra_dc_precision.
+
+
+nssew integer (encoding,video )
+Set nsse weight.
+
+
+skip_top integer (decoding,video )
+Set number of macroblock rows at the top which are skipped.
+
+
+skip_bottom integer (decoding,video )
+Set number of macroblock rows at the bottom which are skipped.
+
+
+profile integer (encoding,audio,video )
+
+Possible values:
+
+‘unknown ’
+‘aac_main ’
+‘aac_low ’
+‘aac_ssr ’
+‘aac_ltp ’
+‘aac_he ’
+‘aac_he_v2 ’
+‘aac_ld ’
+‘aac_eld ’
+‘mpeg2_aac_low ’
+‘mpeg2_aac_he ’
+‘mpeg4_sp ’
+‘mpeg4_core ’
+‘mpeg4_main ’
+‘mpeg4_asp ’
+‘dts ’
+‘dts_es ’
+‘dts_96_24 ’
+‘dts_hd_hra ’
+‘dts_hd_ma ’
+
+
+
+level integer (encoding,audio,video )
+
+Possible values:
+
+‘unknown ’
+
+
+
+lowres integer (decoding,audio,video )
+Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
+
+
+skip_threshold integer (encoding,video )
+Set frame skip threshold.
+
+
+skip_factor integer (encoding,video )
+Set frame skip factor.
+
+
+skip_exp integer (encoding,video )
+Set frame skip exponent.
+Negative values behave identical to the corresponding positive ones, except
+that the score is normalized.
+Positive values exist primarily for compatibility reasons and are not so useful.
+
+
+skipcmp integer (encoding,video )
+Set frame skip compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+border_mask float (encoding,video )
+Increase the quantizer for macroblocks close to borders.
+
+
+mblmin integer (encoding,video )
+Set min macroblock lagrange factor (VBR).
+
+
+mblmax integer (encoding,video )
+Set max macroblock lagrange factor (VBR).
+
+
+mepc integer (encoding,video )
+Set motion estimation bitrate penalty compensation (1.0 = 256).
+
+
+skip_loop_filter integer (decoding,video )
+skip_idct integer (decoding,video )
+skip_frame integer (decoding,video )
+
+Make decoder discard processing depending on the frame type selected
+by the option value.
+
+skip_loop_filter skips frame loop filtering, skip_idct
+skips frame IDCT/dequantization, skip_frame skips decoding.
+
+Possible values:
+
+‘none ’
+Discard no frame.
+
+
+‘default ’
+Discard useless frames like 0-sized frames.
+
+
+‘noref ’
+Discard all non-reference frames.
+
+
+‘bidir ’
+Discard all bidirectional frames.
+
+
+‘nokey ’
+Discard all frames excepts keyframes.
+
+
+‘all ’
+Discard all frames.
+
+
+
+Default value is ‘default ’.
+
+
+bidir_refine integer (encoding,video )
+Refine the two motion vectors used in bidirectional macroblocks.
+
+
+brd_scale integer (encoding,video )
+Downscale frames for dynamic B-frame decision.
+
+
+keyint_min integer (encoding,video )
+Set minimum interval between IDR-frames.
+
+
+refs integer (encoding,video )
+Set reference frames to consider for motion compensation.
+
+
+chromaoffset integer (encoding,video )
+Set chroma qp offset from luma.
+
+
+trellis integer (encoding,audio,video )
+Set rate-distortion optimal quantization.
+
+
+sc_factor integer (encoding,video )
+Set value multiplied by qscale for each frame and added to
+scene_change_score.
+
+
+mv0_threshold integer (encoding,video )
+b_sensitivity integer (encoding,video )
+Adjust sensitivity of b_frame_strategy 1.
+
+
+compression_level integer (encoding,audio,video )
+min_prediction_order integer (encoding,audio )
+max_prediction_order integer (encoding,audio )
+timecode_frame_start integer (encoding,video )
+Set GOP timecode frame start number, in non drop frame format.
+
+
+request_channels integer (decoding,audio )
+Set desired number of audio channels.
+
+
+bits_per_raw_sample integer
+channel_layout integer (decoding/encoding,audio )
+
+Possible values:
+
+request_channel_layout integer (decoding,audio )
+
+Possible values:
+
+rc_max_vbv_use float (encoding,video )
+rc_min_vbv_use float (encoding,video )
+ticks_per_frame integer (decoding/encoding,audio,video )
+color_primaries integer (decoding/encoding,video )
+color_trc integer (decoding/encoding,video )
+colorspace integer (decoding/encoding,video )
+color_range integer (decoding/encoding,video )
+chroma_sample_location integer (decoding/encoding,video )
+log_level_offset integer
+Set the log level offset.
+
+
+slices integer (encoding,video )
+Number of slices, used in parallelized encoding.
+
+
+thread_type flags (decoding/encoding,video )
+Select which multithreading methods to use.
+
+Use of ‘frame ’ will increase decoding delay by one frame per
+thread, so clients which cannot provide future frames should not use
+it.
+
+Possible values:
+
+‘slice ’
+Decode more than one part of a single frame at once.
+
+Multithreading using slices works only when the video was encoded with
+slices.
+
+
+‘frame ’
+Decode more than one frame at once.
+
+
+
+Default value is ‘slice+frame ’.
+
+
+audio_service_type integer (encoding,audio )
+Set audio service type.
+
+Possible values:
+
+‘ma ’
+Main Audio Service
+
+‘ef ’
+Effects
+
+‘vi ’
+Visually Impaired
+
+‘hi ’
+Hearing Impaired
+
+‘di ’
+Dialogue
+
+‘co ’
+Commentary
+
+‘em ’
+Emergency
+
+‘vo ’
+Voice Over
+
+‘ka ’
+Karaoke
+
+
+
+
+request_sample_fmt sample_fmt (decoding,audio )
+Set sample format audio decoders should prefer. Default value is
+none
.
+
+
+pkt_timebase rational number
+sub_charenc encoding (decoding,subtitles )
+Set the input subtitles character encoding.
+
+
+field_order field_order (video )
+Set/override the field order of the video.
+Possible values:
+
+‘progressive ’
+Progressive video
+
+‘tt ’
+Interlaced video, top field coded and displayed first
+
+‘bb ’
+Interlaced video, bottom field coded and displayed first
+
+‘tb ’
+Interlaced video, top coded first, bottom displayed first
+
+‘bt ’
+Interlaced video, bottom coded first, top displayed first
+
+
+
+
+skip_alpha integer (decoding,video )
+Set to 1 to disable processing alpha (transparency). This works like the
+‘gray ’ flag in the flags option which skips chroma information
+instead of alpha. Default is 0.
+
+
+codec_whitelist list (input )
+"," separated List of allowed decoders. By default all are allowed.
+
+
+dump_separator string (input )
+Separator used to separate the fields printed on the command line about the
+Stream parameters.
+For example to separate the fields with newlines and indention:
+
+
ffprobe -dump_separator "
+ " -i ~/videos/matrixbench_mpeg2.mpg
+
+
+
+
+
+
+
+
3 Decoders# TOC
+
+
Decoders are configured elements in FFmpeg which allow the decoding of
+multimedia streams.
+
+
When you configure your FFmpeg build, all the supported native decoders
+are enabled by default. Decoders requiring an external library must be enabled
+manually via the corresponding --enable-lib
option. You can list all
+available decoders using the configure option --list-decoders
.
+
+
You can disable all the decoders with the configure option
+--disable-decoders
and selectively enable / disable single decoders
+with the options --enable-decoder=DECODER
/
+--disable-decoder=DECODER
.
+
+
The option -decoders
of the ff* tools will display the list of
+enabled decoders.
+
+
+
+
4 Video Decoders# TOC
+
+
A description of some of the currently available video decoders
+follows.
+
+
+
4.1 rawvideo# TOC
+
+
Raw video decoder.
+
+
This decoder decodes rawvideo streams.
+
+
+
4.1.1 Options# TOC
+
+
+top top_field_first
+Specify the assumed field type of the input video.
+
+-1
+the video is assumed to be progressive (default)
+
+0
+bottom-field-first is assumed
+
+1
+top-field-first is assumed
+
+
+
+
+
+
+
+
+
5 Audio Decoders# TOC
+
+
A description of some of the currently available audio decoders
+follows.
+
+
+
+
+
AC-3 audio decoder.
+
+
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
+the undocumented RealAudio 3 (a.k.a. dnet).
+
+
+
5.1.1 AC-3 Decoder Options# TOC
+
+
+-drc_scale value
+Dynamic Range Scale Factor. The factor to apply to dynamic range values
+from the AC-3 stream. This factor is applied exponentially.
+There are 3 notable scale factor ranges:
+
+drc_scale == 0
+DRC disabled. Produces full range audio.
+
+0 < drc_scale <= 1
+DRC enabled. Applies a fraction of the stream DRC value.
+Audio reproduction is between full range and full compression.
+
+drc_scale > 1
+DRC enabled. Applies drc_scale asymmetrically.
+Loud sounds are fully compressed. Soft sounds are enhanced.
+
+
+
+
+
+
+
+
5.2 ffwavesynth# TOC
+
+
Internal wave synthetizer.
+
+
This decoder generates wave patterns according to predefined sequences. Its
+use is purely internal and the format of the data it accepts is not publicly
+documented.
+
+
+
5.3 libcelt# TOC
+
+
libcelt decoder wrapper.
+
+
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
+Requires the presence of the libcelt headers and library during configuration.
+You need to explicitly configure the build with --enable-libcelt
.
+
+
+
5.4 libgsm# TOC
+
+
libgsm decoder wrapper.
+
+
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
+the presence of the libgsm headers and library during configuration. You need
+to explicitly configure the build with --enable-libgsm
.
+
+
This decoder supports both the ordinary GSM and the Microsoft variant.
+
+
+
5.5 libilbc# TOC
+
+
libilbc decoder wrapper.
+
+
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
+audio codec. Requires the presence of the libilbc headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libilbc
.
+
+
+
5.5.1 Options# TOC
+
+
The following option is supported by the libilbc wrapper.
+
+
+enhance
+
+Enable the enhancement of the decoded audio when set to 1. The default
+value is 0 (disabled).
+
+
+
+
+
+
5.6 libopencore-amrnb# TOC
+
+
libopencore-amrnb decoder wrapper.
+
+
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
+Narrowband audio codec. Using it requires the presence of the
+libopencore-amrnb headers and library during configuration. You need to
+explicitly configure the build with --enable-libopencore-amrnb
.
+
+
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
+without this library.
+
+
+
5.7 libopencore-amrwb# TOC
+
+
libopencore-amrwb decoder wrapper.
+
+
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
+Wideband audio codec. Using it requires the presence of the
+libopencore-amrwb headers and library during configuration. You need to
+explicitly configure the build with --enable-libopencore-amrwb
.
+
+
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
+without this library.
+
+
+
5.8 libopus# TOC
+
+
libopus decoder wrapper.
+
+
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
+Requires the presence of the libopus headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libopus
.
+
+
An FFmpeg native decoder for Opus exists, so users can decode Opus
+without this library.
+
+
+
+
6 Subtitles Decoders# TOC
+
+
+
6.1 dvdsub# TOC
+
+
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
+also be found in VobSub file pairs and in some Matroska files.
+
+
+
6.1.1 Options# TOC
+
+
+palette
+Specify the global palette used by the bitmaps. When stored in VobSub, the
+palette is normally specified in the index file; in Matroska, the palette is
+stored in the codec extra-data in the same format as in VobSub. In DVDs, the
+palette is stored in the IFO file, and therefore not available when reading
+from dumped VOB files.
+
+The format for this option is a string containing 16 24-bits hexadecimal
+numbers (without 0x prefix) separated by comas, for example 0d00ee,
+ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
+7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b
.
+
+
+ifo_palette
+Specify the IFO file from which the global palette is obtained.
+(experimental)
+
+
+forced_subs_only
+Only decode subtitle entries marked as forced. Some titles have forced
+and non-forced subtitles in the same track. Setting this flag to 1
+will only keep the forced subtitles. Default value is 0
.
+
+
+
+
+
6.2 libzvbi-teletext# TOC
+
+
Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext
+subtitles. Requires the presence of the libzvbi headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libzvbi
.
+
+
+
6.2.1 Options# TOC
+
+
+txt_page
+List of teletext page numbers to decode. You may use the special * string to
+match all pages. Pages that do not match the specified list are dropped.
+Default value is *.
+
+txt_chop_top
+Discards the top teletext line. Default value is 1.
+
+txt_format
+Specifies the format of the decoded subtitles. The teletext decoder is capable
+of decoding the teletext pages to bitmaps or to simple text, you should use
+"bitmap" for teletext pages, because certain graphics and colors cannot be
+expressed in simple text. You might use "text" for teletext based subtitles if
+your application can handle simple text based subtitles. Default value is
+bitmap.
+
+txt_left
+X offset of generated bitmaps, default is 0.
+
+txt_top
+Y offset of generated bitmaps, default is 0.
+
+txt_chop_spaces
+Chops leading and trailing spaces and removes empty lines from the generated
+text. This option is useful for teletext based subtitles where empty spaces may
+be present at the start or at the end of the lines or empty lines may be
+present between the subtitle lines because of double-sized teletext charactes.
+Default value is 1.
+
+txt_duration
+Sets the display duration of the decoded teletext pages or subtitles in
+miliseconds. Default value is 30000 which is 30 seconds.
+
+txt_transparent
+Force transparent background of the generated teletext bitmaps. Default value
+is 0 which means an opaque (black) background.
+
+
+
+
+
7 Encoders# TOC
+
+
Encoders are configured elements in FFmpeg which allow the encoding of
+multimedia streams.
+
+
When you configure your FFmpeg build, all the supported native encoders
+are enabled by default. Encoders requiring an external library must be enabled
+manually via the corresponding --enable-lib
option. You can list all
+available encoders using the configure option --list-encoders
.
+
+
You can disable all the encoders with the configure option
+--disable-encoders
and selectively enable / disable single encoders
+with the options --enable-encoder=ENCODER
/
+--disable-encoder=ENCODER
.
+
+
The option -encoders
of the ff* tools will display the list of
+enabled encoders.
+
+
+
+
8 Audio Encoders# TOC
+
+
A description of some of the currently available audio encoders
+follows.
+
+
+
+
+
Advanced Audio Coding (AAC) encoder.
+
+
This encoder is an experimental FFmpeg-native AAC encoder. Currently only the
+low complexity (AAC-LC) profile is supported. To use this encoder, you must set
+strict option to ‘experimental ’ or lower.
+
+
As this encoder is experimental, unexpected behavior may exist from time to
+time. For a more stable AAC encoder, see libvo-aacenc . However, be warned
+that it has a worse quality reported by some users.
+
+
See also libfdk_aac and libfaac .
+
+
+
8.1.1 Options# TOC
+
+
+b
+Set bit rate in bits/s. Setting this automatically activates constant bit rate
+(CBR) mode.
+
+
+q
+Set quality for variable bit rate (VBR) mode. This option is valid only using
+the ffmpeg
command-line tool. For library interface users, use
+global_quality .
+
+
+stereo_mode
+Set stereo encoding mode. Possible values:
+
+
+‘auto ’
+Automatically selected by the encoder.
+
+
+‘ms_off ’
+Disable middle/side encoding. This is the default.
+
+
+‘ms_force ’
+Force middle/side encoding.
+
+
+
+
+aac_coder
+Set AAC encoder coding method. Possible values:
+
+
+‘faac ’
+FAAC-inspired method.
+
+This method is a simplified reimplementation of the method used in FAAC, which
+sets thresholds proportional to the band energies, and then decreases all the
+thresholds with quantizer steps to find the appropriate quantization with
+distortion below threshold band by band.
+
+The quality of this method is comparable to the two loop searching method
+described below, but somewhat a little better and slower.
+
+
+‘anmr ’
+Average noise to mask ratio (ANMR) trellis-based solution.
+
+This has a theoretic best quality out of all the coding methods, but at the
+cost of the slowest speed.
+
+
+‘twoloop ’
+Two loop searching (TLS) method.
+
+This method first sets quantizers depending on band thresholds and then tries
+to find an optimal combination by adding or subtracting a specific value from
+all quantizers and adjusting some individual quantizer a little.
+
+This method produces similar quality with the FAAC method and is the default.
+
+
+‘fast ’
+Constant quantizer method.
+
+This method sets a constant quantizer for all bands. This is the fastest of all
+the methods, yet produces the worst quality.
+
+
+
+
+
+
+
+
+
8.2 ac3 and ac3_fixed# TOC
+
+
AC-3 audio encoders.
+
+
These encoders implement part of ATSC A/52:2010 and ETSI TS 102 366, as well as
+the undocumented RealAudio 3 (a.k.a. dnet).
+
+
The ac3 encoder uses floating-point math, while the ac3_fixed
+encoder only uses fixed-point integer math. This does not mean that one is
+always faster, just that one or the other may be better suited to a
+particular system. The floating-point encoder will generally produce better
+quality audio for a given bitrate. The ac3_fixed encoder is not the
+default codec for any of the output formats, so it must be specified explicitly
+using the option -acodec ac3_fixed
in order to use it.
+
+
+
8.2.1 AC-3 Metadata# TOC
+
+
The AC-3 metadata options are used to set parameters that describe the audio,
+but in most cases do not affect the audio encoding itself. Some of the options
+do directly affect or influence the decoding and playback of the resulting
+bitstream, while others are just for informational purposes. A few of the
+options will add bits to the output stream that could otherwise be used for
+audio data, and will thus affect the quality of the output. Those will be
+indicated accordingly with a note in the option list below.
+
+
These parameters are described in detail in several publicly-available
+documents.
+
+
+
+
8.2.1.1 Metadata Control Options# TOC
+
+
+-per_frame_metadata boolean
+Allow Per-Frame Metadata. Specifies if the encoder should check for changing
+metadata for each frame.
+
+0
+The metadata values set at initialization will be used for every frame in the
+stream. (default)
+
+1
+Metadata values can be changed before encoding each frame.
+
+
+
+
+
+
+
+
8.2.1.2 Downmix Levels# TOC
+
+
+-center_mixlev level
+Center Mix Level. The amount of gain the decoder should apply to the center
+channel when downmixing to stereo. This field will only be written to the
+bitstream if a center channel is present. The value is specified as a scale
+factor. There are 3 valid values:
+
+0.707
+Apply -3dB gain
+
+0.595
+Apply -4.5dB gain (default)
+
+0.500
+Apply -6dB gain
+
+
+
+
+-surround_mixlev level
+Surround Mix Level. The amount of gain the decoder should apply to the surround
+channel(s) when downmixing to stereo. This field will only be written to the
+bitstream if one or more surround channels are present. The value is specified
+as a scale factor. There are 3 valid values:
+
+0.707
+Apply -3dB gain
+
+0.500
+Apply -6dB gain (default)
+
+0.000
+Silence Surround Channel(s)
+
+
+
+
+
+
+
+
8.2.1.3 Audio Production Information# TOC
+
Audio Production Information is optional information describing the mixing
+environment. Either none or both of the fields are written to the bitstream.
+
+
+-mixing_level number
+Mixing Level. Specifies peak sound pressure level (SPL) in the production
+environment when the mix was mastered. Valid values are 80 to 111, or -1 for
+unknown or not indicated. The default value is -1, but that value cannot be
+used if the Audio Production Information is written to the bitstream. Therefore,
+if the room_type
option is not the default value, the mixing_level
+option must not be -1.
+
+
+-room_type type
+Room Type. Describes the equalization used during the final mixing session at
+the studio or on the dubbing stage. A large room is a dubbing stage with the
+industry standard X-curve equalization; a small room has flat equalization.
+This field will not be written to the bitstream if both the mixing_level
+option and the room_type
option have the default values.
+
+0
+notindicated
+Not Indicated (default)
+
+1
+large
+Large Room
+
+2
+small
+Small Room
+
+
+
+
+
+
+
+
8.2.1.4 Other Metadata Options# TOC
+
+
+-copyright boolean
+Copyright Indicator. Specifies whether a copyright exists for this audio.
+
+0
+off
+No Copyright Exists (default)
+
+1
+on
+Copyright Exists
+
+
+
+
+-dialnorm value
+Dialogue Normalization. Indicates how far the average dialogue level of the
+program is below digital 100% full scale (0 dBFS). This parameter determines a
+level shift during audio reproduction that sets the average volume of the
+dialogue to a preset level. The goal is to match volume level between program
+sources. A value of -31dB will result in no volume level change, relative to
+the source volume, during audio reproduction. Valid values are whole numbers in
+the range -31 to -1, with -31 being the default.
+
+
+-dsur_mode mode
+Dolby Surround Mode. Specifies whether the stereo signal uses Dolby Surround
+(Pro Logic). This field will only be written to the bitstream if the audio
+stream is stereo. Using this option does NOT mean the encoder will actually
+apply Dolby Surround processing.
+
+0
+notindicated
+Not Indicated (default)
+
+1
+off
+Not Dolby Surround Encoded
+
+2
+on
+Dolby Surround Encoded
+
+
+
+
+-original boolean
+Original Bit Stream Indicator. Specifies whether this audio is from the
+original source and not a copy.
+
+0
+off
+Not Original Source
+
+1
+on
+Original Source (default)
+
+
+
+
+
+
+
+
8.2.2 Extended Bitstream Information# TOC
+
The extended bitstream options are part of the Alternate Bit Stream Syntax as
+specified in Annex D of the A/52:2010 standard. It is grouped into 2 parts.
+If any one parameter in a group is specified, all values in that group will be
+written to the bitstream. Default values are used for those that are written
+but have not been specified. If the mixing levels are written, the decoder
+will use these values instead of the ones specified in the center_mixlev
+and surround_mixlev
options if it supports the Alternate Bit Stream
+Syntax.
+
+
+
8.2.2.1 Extended Bitstream Information - Part 1# TOC
+
+
+-dmix_mode mode
+Preferred Stereo Downmix Mode. Allows the user to select either Lt/Rt
+(Dolby Surround) or Lo/Ro (normal stereo) as the preferred stereo downmix mode.
+
+0
+notindicated
+Not Indicated (default)
+
+1
+ltrt
+Lt/Rt Downmix Preferred
+
+2
+loro
+Lo/Ro Downmix Preferred
+
+
+
+
+-ltrt_cmixlev level
+Lt/Rt Center Mix Level. The amount of gain the decoder should apply to the
+center channel when downmixing to stereo in Lt/Rt mode.
+
+1.414
+Apply +3dB gain
+
+1.189
+Apply +1.5dB gain
+
+1.000
+Apply 0dB gain
+
+0.841
+Apply -1.5dB gain
+
+0.707
+Apply -3.0dB gain
+
+0.595
+Apply -4.5dB gain (default)
+
+0.500
+Apply -6.0dB gain
+
+0.000
+Silence Center Channel
+
+
+
+
+-ltrt_surmixlev level
+Lt/Rt Surround Mix Level. The amount of gain the decoder should apply to the
+surround channel(s) when downmixing to stereo in Lt/Rt mode.
+
+0.841
+Apply -1.5dB gain
+
+0.707
+Apply -3.0dB gain
+
+0.595
+Apply -4.5dB gain
+
+0.500
+Apply -6.0dB gain (default)
+
+0.000
+Silence Surround Channel(s)
+
+
+
+
+-loro_cmixlev level
+Lo/Ro Center Mix Level. The amount of gain the decoder should apply to the
+center channel when downmixing to stereo in Lo/Ro mode.
+
+1.414
+Apply +3dB gain
+
+1.189
+Apply +1.5dB gain
+
+1.000
+Apply 0dB gain
+
+0.841
+Apply -1.5dB gain
+
+0.707
+Apply -3.0dB gain
+
+0.595
+Apply -4.5dB gain (default)
+
+0.500
+Apply -6.0dB gain
+
+0.000
+Silence Center Channel
+
+
+
+
+-loro_surmixlev level
+Lo/Ro Surround Mix Level. The amount of gain the decoder should apply to the
+surround channel(s) when downmixing to stereo in Lo/Ro mode.
+
+0.841
+Apply -1.5dB gain
+
+0.707
+Apply -3.0dB gain
+
+0.595
+Apply -4.5dB gain
+
+0.500
+Apply -6.0dB gain (default)
+
+0.000
+Silence Surround Channel(s)
+
+
+
+
+
+
+
+
8.2.2.2 Extended Bitstream Information - Part 2# TOC
+
+
+-dsurex_mode mode
+Dolby Surround EX Mode. Indicates whether the stream uses Dolby Surround EX
+(7.1 matrixed to 5.1). Using this option does NOT mean the encoder will actually
+apply Dolby Surround EX processing.
+
+0
+notindicated
+Not Indicated (default)
+
+1
+on
+Dolby Surround EX Off
+
+2
+off
+Dolby Surround EX On
+
+
+
+
+-dheadphone_mode mode
+Dolby Headphone Mode. Indicates whether the stream uses Dolby Headphone
+encoding (multi-channel matrixed to 2.0 for use with headphones). Using this
+option does NOT mean the encoder will actually apply Dolby Headphone
+processing.
+
+0
+notindicated
+Not Indicated (default)
+
+1
+on
+Dolby Headphone Off
+
+2
+off
+Dolby Headphone On
+
+
+
+
+-ad_conv_type type
+A/D Converter Type. Indicates whether the audio has passed through HDCD A/D
+conversion.
+
+0
+standard
+Standard A/D Converter (default)
+
+1
+hdcd
+HDCD A/D Converter
+
+
+
+
+
+
+
+
8.2.3 Other AC-3 Encoding Options# TOC
+
+
+-stereo_rematrixing boolean
+Stereo Rematrixing. Enables/Disables use of rematrixing for stereo input. This
+is an optional AC-3 feature that increases quality by selectively encoding
+the left/right channels as mid/side. This option is enabled by default, and it
+is highly recommended that it be left as enabled except for testing purposes.
+
+
+
+
+
+
8.2.4 Floating-Point-Only AC-3 Encoding Options# TOC
+
+
These options are only valid for the floating-point encoder and do not exist
+for the fixed-point encoder due to the corresponding features not being
+implemented in fixed-point.
+
+
+-channel_coupling boolean
+Enables/Disables use of channel coupling, which is an optional AC-3 feature
+that increases quality by combining high frequency information from multiple
+channels into a single channel. The per-channel high frequency information is
+sent with less accuracy in both the frequency and time domains. This allows
+more bits to be used for lower frequencies while preserving enough information
+to reconstruct the high frequencies. This option is enabled by default for the
+floating-point encoder and should generally be left as enabled except for
+testing purposes or to increase encoding speed.
+
+-1
+auto
+Selected by Encoder (default)
+
+0
+off
+Disable Channel Coupling
+
+1
+on
+Enable Channel Coupling
+
+
+
+
+-cpl_start_band number
+Coupling Start Band. Sets the channel coupling start band, from 1 to 15. If a
+value higher than the bandwidth is used, it will be reduced to 1 less than the
+coupling end band. If auto is used, the start band will be determined by
+the encoder based on the bit rate, sample rate, and channel layout. This option
+has no effect if channel coupling is disabled.
+
+-1
+auto
+Selected by Encoder (default)
+
+
+
+
+
+
+
+
8.3 libfaac# TOC
+
+
libfaac AAC (Advanced Audio Coding) encoder wrapper.
+
+
Requires the presence of the libfaac headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libfaac --enable-nonfree
.
+
+
This encoder is considered to be of higher quality with respect to the
+the native experimental FFmpeg AAC encoder .
+
+
For more information see the libfaac project at
+http://www.audiocoding.com/faac.html/ .
+
+
+
8.3.1 Options# TOC
+
+
The following shared FFmpeg codec options are recognized.
+
+
The following options are supported by the libfaac wrapper. The
+faac
-equivalent of the options are listed in parentheses.
+
+
+b (-b )
+Set bit rate in bits/s for ABR (Average Bit Rate) mode. If the bit rate
+is not explicitly specified, it is automatically set to a suitable
+value depending on the selected profile. faac
bitrate is
+expressed in kilobits/s.
+
+Note that libfaac does not support CBR (Constant Bit Rate) but only
+ABR (Average Bit Rate).
+
+If VBR mode is enabled this option is ignored.
+
+
+ar (-R )
+Set audio sampling rate (in Hz).
+
+
+ac (-c )
+Set the number of audio channels.
+
+
+cutoff (-C )
+Set cutoff frequency. If not specified (or explicitly set to 0) it
+will use a value automatically computed by the library. Default value
+is 0.
+
+
+profile
+Set audio profile.
+
+The following profiles are recognized:
+
+‘aac_main ’
+Main AAC (Main)
+
+
+‘aac_low ’
+Low Complexity AAC (LC)
+
+
+‘aac_ssr ’
+Scalable Sample Rate (SSR)
+
+
+‘aac_ltp ’
+Long Term Prediction (LTP)
+
+
+
+If not specified it is set to ‘aac_low ’.
+
+
+flags +qscale
+Set constant quality VBR (Variable Bit Rate) mode.
+
+
+global_quality
+Set quality in VBR mode as an integer number of lambda units.
+
+Only relevant when VBR mode is enabled with flags +qscale
. The
+value is converted to QP units by dividing it by FF_QP2LAMBDA
,
+and used to set the quality value used by libfaac. A reasonable range
+for the option value in QP units is [10-500], the higher the value the
+higher the quality.
+
+
+q (-q )
+Enable VBR mode when set to a non-negative value, and set constant
+quality value as a double floating point value in QP units.
+
+The value sets the quality value used by libfaac. A reasonable range
+for the option value is [10-500], the higher the value the higher the
+quality.
+
+This option is valid only using the ffmpeg
command-line
+tool. For library interface users, use global_quality .
+
+
+
+
+
8.3.2 Examples# TOC
+
+
+ Use ffmpeg
to convert an audio file to ABR 128 kbps AAC in an M4A (MP4)
+container:
+
+
ffmpeg -i input.wav -codec:a libfaac -b:a 128k -output.m4a
+
+
+ Use ffmpeg
to convert an audio file to VBR AAC, using the
+LTP AAC profile:
+
+
ffmpeg -i input.wav -c:a libfaac -profile:a aac_ltp -q:a 100 output.m4a
+
+
+
+
+
8.4 libfdk_aac# TOC
+
+
libfdk-aac AAC (Advanced Audio Coding) encoder wrapper.
+
+
The libfdk-aac library is based on the Fraunhofer FDK AAC code from
+the Android project.
+
+
Requires the presence of the libfdk-aac headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libfdk-aac
. The library is also incompatible with GPL,
+so if you allow the use of GPL, you should configure with
+--enable-gpl --enable-nonfree --enable-libfdk-aac
.
+
+
This encoder is considered to be of higher quality with respect to
+both the native experimental FFmpeg AAC encoder and
+libfaac .
+
+
VBR encoding, enabled through the vbr or flags
++qscale options, is experimental and only works with some
+combinations of parameters.
+
+
Support for encoding 7.1 audio is only available with libfdk-aac 0.1.3 or
+higher.
+
+
For more information see the fdk-aac project at
+http://sourceforge.net/p/opencore-amr/fdk-aac/ .
+
+
+
8.4.1 Options# TOC
+
+
The following options are mapped on the shared FFmpeg codec options.
+
+
+b
+Set bit rate in bits/s. If the bitrate is not explicitly specified, it
+is automatically set to a suitable value depending on the selected
+profile.
+
+In case VBR mode is enabled the option is ignored.
+
+
+ar
+Set audio sampling rate (in Hz).
+
+
+channels
+Set the number of audio channels.
+
+
+flags +qscale
+Enable fixed quality, VBR (Variable Bit Rate) mode.
+Note that VBR is implicitly enabled when the vbr value is
+positive.
+
+
+cutoff
+Set cutoff frequency. If not specified (or explicitly set to 0) it
+will use a value automatically computed by the library. Default value
+is 0.
+
+
+profile
+Set audio profile.
+
+The following profiles are recognized:
+
+‘aac_low ’
+Low Complexity AAC (LC)
+
+
+‘aac_he ’
+High Efficiency AAC (HE-AAC)
+
+
+‘aac_he_v2 ’
+High Efficiency AAC version 2 (HE-AACv2)
+
+
+‘aac_ld ’
+Low Delay AAC (LD)
+
+
+‘aac_eld ’
+Enhanced Low Delay AAC (ELD)
+
+
+
+If not specified it is set to ‘aac_low ’.
+
+
+
+
The following are private options of the libfdk_aac encoder.
+
+
+afterburner
+Enable afterburner feature if set to 1, disabled if set to 0. This
+improves the quality but also the required processing power.
+
+Default value is 1.
+
+
+eld_sbr
+Enable SBR (Spectral Band Replication) for ELD if set to 1, disabled
+if set to 0.
+
+Default value is 0.
+
+
+signaling
+Set SBR/PS signaling style.
+
+It can assume one of the following values:
+
+‘default ’
+choose signaling implicitly (explicit hierarchical by default,
+implicit if global header is disabled)
+
+
+‘implicit ’
+implicit backwards compatible signaling
+
+
+‘explicit_sbr ’
+explicit SBR, implicit PS signaling
+
+
+‘explicit_hierarchical ’
+explicit hierarchical signaling
+
+
+
+Default value is ‘default ’.
+
+
+latm
+Output LATM/LOAS encapsulated data if set to 1, disabled if set to 0.
+
+Default value is 0.
+
+
+header_period
+Set StreamMuxConfig and PCE repetition period (in frames) for sending
+in-band configuration buffers within LATM/LOAS transport layer.
+
+Must be a 16-bits non-negative integer.
+
+Default value is 0.
+
+
+vbr
+Set VBR mode, from 1 to 5. 1 is lowest quality (though still pretty
+good) and 5 is highest quality. A value of 0 will disable VBR, and CBR
+(Constant Bit Rate) is enabled.
+
+Currently only the ‘aac_low ’ profile supports VBR encoding.
+
+VBR modes 1-5 correspond to roughly the following average bit rates:
+
+
+‘1 ’
+32 kbps/channel
+
+‘2 ’
+40 kbps/channel
+
+‘3 ’
+48-56 kbps/channel
+
+‘4 ’
+64 kbps/channel
+
+‘5 ’
+about 80-96 kbps/channel
+
+
+
+Default value is 0.
+
+
+
+
+
8.4.2 Examples# TOC
+
+
+ Use ffmpeg
to convert an audio file to VBR AAC in an M4A (MP4)
+container:
+
+
ffmpeg -i input.wav -codec:a libfdk_aac -vbr 3 output.m4a
+
+
+ Use ffmpeg
to convert an audio file to CBR 64k kbps AAC, using the
+High-Efficiency AAC profile:
+
+
ffmpeg -i input.wav -c:a libfdk_aac -profile:a aac_he -b:a 64k output.m4a
+
+
+
+
+
8.5 libmp3lame# TOC
+
+
LAME (Lame Ain’t an MP3 Encoder) MP3 encoder wrapper.
+
+
Requires the presence of the libmp3lame headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libmp3lame
.
+
+
See libshine for a fixed-point MP3 encoder, although with a
+lower quality.
+
+
+
8.5.1 Options# TOC
+
+
The following options are supported by the libmp3lame wrapper. The
+lame
-equivalent of the options are listed in parentheses.
+
+
+b (-b )
+Set bitrate expressed in bits/s for CBR or ABR. LAME bitrate
is
+expressed in kilobits/s.
+
+
+q (-V )
+Set constant quality setting for VBR. This option is valid only
+using the ffmpeg
command-line tool. For library interface
+users, use global_quality .
+
+
+compression_level (-q )
+Set algorithm quality. Valid arguments are integers in the 0-9 range,
+with 0 meaning highest quality but slowest, and 9 meaning fastest
+while producing the worst quality.
+
+
+reservoir
+Enable use of bit reservoir when set to 1. Default value is 1. LAME
+has this enabled by default, but can be overridden by use
+--nores option.
+
+
+joint_stereo (-m j )
+Enable the encoder to use (on a frame by frame basis) either L/R
+stereo or mid/side stereo. Default value is 1.
+
+
+abr (--abr )
+Enable the encoder to use ABR when set to 1. The lame
+--abr sets the target bitrate, while this options only
+tells FFmpeg to use ABR still relies on b to set bitrate.
+
+
+
+
+
+
8.6 libopencore-amrnb# TOC
+
+
OpenCORE Adaptive Multi-Rate Narrowband encoder.
+
+
Requires the presence of the libopencore-amrnb headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libopencore-amrnb --enable-version3
.
+
+
This is a mono-only encoder. Officially it only supports 8000Hz sample rate,
+but you can override it by setting strict to ‘unofficial ’ or
+lower.
+
+
+
8.6.1 Options# TOC
+
+
+b
+Set bitrate in bits per second. Only the following bitrates are supported,
+otherwise libavcodec will round to the nearest valid bitrate.
+
+
+4750
+5150
+5900
+6700
+7400
+7950
+10200
+12200
+
+
+
+dtx
+Allow discontinuous transmission (generate comfort noise) when set to 1. The
+default value is 0 (disabled).
+
+
+
+
+
+
8.7 libshine# TOC
+
+
Shine Fixed-Point MP3 encoder wrapper.
+
+
Shine is a fixed-point MP3 encoder. It has a far better performance on
+platforms without an FPU, e.g. armel CPUs, and some phones and tablets.
+However, as it is more targeted on performance than quality, it is not on par
+with LAME and other production-grade encoders quality-wise. Also, according to
+the project’s homepage, this encoder may not be free of bugs as the code was
+written a long time ago and the project was dead for at least 5 years.
+
+
This encoder only supports stereo and mono input. This is also CBR-only.
+
+
The original project (last updated in early 2007) is at
+http://sourceforge.net/projects/libshine-fxp/ . We only support the
+updated fork by the Savonet/Liquidsoap project at https://github.com/savonet/shine .
+
+
Requires the presence of the libshine headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libshine
.
+
+
See also libmp3lame .
+
+
+
8.7.1 Options# TOC
+
+
The following options are supported by the libshine wrapper. The
+shineenc
-equivalent of the options are listed in parentheses.
+
+
+b (-b )
+Set bitrate expressed in bits/s for CBR. shineenc
-b option
+is expressed in kilobits/s.
+
+
+
+
+
+
8.8 libtwolame# TOC
+
+
TwoLAME MP2 encoder wrapper.
+
+
Requires the presence of the libtwolame headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libtwolame
.
+
+
+
8.8.1 Options# TOC
+
+
The following options are supported by the libtwolame wrapper. The
+twolame
-equivalent options follow the FFmpeg ones and are in
+parentheses.
+
+
+b (-b )
+Set bitrate expressed in bits/s for CBR. twolame
b
+option is expressed in kilobits/s. Default value is 128k.
+
+
+q (-V )
+Set quality for experimental VBR support. Maximum value range is
+from -50 to 50, useful range is from -10 to 10. The higher the
+value, the better the quality. This option is valid only using the
+ffmpeg
command-line tool. For library interface users,
+use global_quality .
+
+
+mode (--mode )
+Set the mode of the resulting audio. Possible values:
+
+
+‘auto ’
+Choose mode automatically based on the input. This is the default.
+
+‘stereo ’
+Stereo
+
+‘joint_stereo ’
+Joint stereo
+
+‘dual_channel ’
+Dual channel
+
+‘mono ’
+Mono
+
+
+
+
+psymodel (--psyc-mode )
+Set psychoacoustic model to use in encoding. The argument must be
+an integer between -1 and 4, inclusive. The higher the value, the
+better the quality. The default value is 3.
+
+
+energy_levels (--energy )
+Enable energy levels extensions when set to 1. The default value is
+0 (disabled).
+
+
+error_protection (--protect )
+Enable CRC error protection when set to 1. The default value is 0
+(disabled).
+
+
+copyright (--copyright )
+Set MPEG audio copyright flag when set to 1. The default value is 0
+(disabled).
+
+
+original (--original )
+Set MPEG audio original flag when set to 1. The default value is 0
+(disabled).
+
+
+
+
+
+
8.9 libvo-aacenc# TOC
+
+
VisualOn AAC encoder.
+
+
Requires the presence of the libvo-aacenc headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libvo-aacenc --enable-version3
.
+
+
This encoder is considered to be worse than the
+native experimental FFmpeg AAC encoder , according to
+multiple sources.
+
+
+
8.9.1 Options# TOC
+
+
The VisualOn AAC encoder only support encoding AAC-LC and up to 2
+channels. It is also CBR-only.
+
+
+b
+Set bit rate in bits/s.
+
+
+
+
+
+
8.10 libvo-amrwbenc# TOC
+
+
VisualOn Adaptive Multi-Rate Wideband encoder.
+
+
Requires the presence of the libvo-amrwbenc headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libvo-amrwbenc --enable-version3
.
+
+
This is a mono-only encoder. Officially it only supports 16000Hz sample
+rate, but you can override it by setting strict to
+‘unofficial ’ or lower.
+
+
+
8.10.1 Options# TOC
+
+
+b
+Set bitrate in bits/s. Only the following bitrates are supported, otherwise
+libavcodec will round to the nearest valid bitrate.
+
+
+‘6600 ’
+‘8850 ’
+‘12650 ’
+‘14250 ’
+‘15850 ’
+‘18250 ’
+‘19850 ’
+‘23050 ’
+‘23850 ’
+
+
+
+dtx
+Allow discontinuous transmission (generate comfort noise) when set to 1. The
+default value is 0 (disabled).
+
+
+
+
+
+
8.11 libopus# TOC
+
+
libopus Opus Interactive Audio Codec encoder wrapper.
+
+
Requires the presence of the libopus headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libopus
.
+
+
+
8.11.1 Option Mapping# TOC
+
+
Most libopus options are modelled after the opusenc
utility from
+opus-tools. The following is an option mapping chart describing options
+supported by the libopus wrapper, and their opusenc
-equivalent
+in parentheses.
+
+
+b (bitrate )
+Set the bit rate in bits/s. FFmpeg’s b option is
+expressed in bits/s, while opusenc
’s bitrate in
+kilobits/s.
+
+
+vbr (vbr , hard-cbr , and cvbr )
+Set VBR mode. The FFmpeg vbr option has the following
+valid arguments, with the their opusenc
equivalent options
+in parentheses:
+
+
+‘off (hard-cbr ) ’
+Use constant bit rate encoding.
+
+
+‘on (vbr ) ’
+Use variable bit rate encoding (the default).
+
+
+‘constrained (cvbr ) ’
+Use constrained variable bit rate encoding.
+
+
+
+
+compression_level (comp )
+Set encoding algorithm complexity. Valid options are integers in
+the 0-10 range. 0 gives the fastest encodes but lower quality, while 10
+gives the highest quality but slowest encoding. The default is 10.
+
+
+frame_duration (framesize )
+Set maximum frame size, or duration of a frame in milliseconds. The
+argument must be exactly the following: 2.5, 5, 10, 20, 40, 60. Smaller
+frame sizes achieve lower latency but less quality at a given bitrate.
+Sizes greater than 20ms are only interesting at fairly low bitrates.
+The default is 20ms.
+
+
+packet_loss (expect-loss )
+Set expected packet loss percentage. The default is 0.
+
+
+application (N.A.)
+Set intended application type. Valid options are listed below:
+
+
+‘voip ’
+Favor improved speech intelligibility.
+
+‘audio ’
+Favor faithfulness to the input (the default).
+
+‘lowdelay ’
+Restrict to only the lowest delay modes.
+
+
+
+
+cutoff (N.A.)
+Set cutoff bandwidth in Hz. The argument must be exactly one of the
+following: 4000, 6000, 8000, 12000, or 20000, corresponding to
+narrowband, mediumband, wideband, super wideband, and fullband
+respectively. The default is 0 (cutoff disabled).
+
+
+
+
+
+
8.12 libvorbis# TOC
+
+
libvorbis encoder wrapper.
+
+
Requires the presence of the libvorbisenc headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libvorbis
.
+
+
+
8.12.1 Options# TOC
+
+
The following options are supported by the libvorbis wrapper. The
+oggenc
-equivalent of the options are listed in parentheses.
+
+
To get a more accurate and extensive documentation of the libvorbis
+options, consult the libvorbisenc’s and oggenc
’s documentations.
+See http://xiph.org/vorbis/ ,
+http://wiki.xiph.org/Vorbis-tools , and oggenc(1).
+
+
+b (-b )
+Set bitrate expressed in bits/s for ABR. oggenc
-b is
+expressed in kilobits/s.
+
+
+q (-q )
+Set constant quality setting for VBR. The value should be a float
+number in the range of -1.0 to 10.0. The higher the value, the better
+the quality. The default value is ‘3.0 ’.
+
+This option is valid only using the ffmpeg
command-line tool.
+For library interface users, use global_quality .
+
+
+cutoff (--advanced-encode-option lowpass_frequency=N )
+Set cutoff bandwidth in Hz, a value of 0 disables cutoff. oggenc
’s
+related option is expressed in kHz. The default value is ‘0 ’ (cutoff
+disabled).
+
+
+minrate (-m )
+Set minimum bitrate expressed in bits/s. oggenc
-m is
+expressed in kilobits/s.
+
+
+maxrate (-M )
+Set maximum bitrate expressed in bits/s. oggenc
-M is
+expressed in kilobits/s. This only has effect on ABR mode.
+
+
+iblock (--advanced-encode-option impulse_noisetune=N )
+Set noise floor bias for impulse blocks. The value is a float number from
+-15.0 to 0.0. A negative bias instructs the encoder to pay special attention
+to the crispness of transients in the encoded audio. The tradeoff for better
+transient response is a higher bitrate.
+
+
+
+
+
+
8.13 libwavpack# TOC
+
+
A wrapper providing WavPack encoding through libwavpack.
+
+
Only lossless mode using 32-bit integer samples is supported currently.
+
+
Requires the presence of the libwavpack headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libwavpack
.
+
+
Note that a libavcodec-native encoder for the WavPack codec exists so users can
+encode audios with this codec without using this encoder. See wavpackenc .
+
+
+
8.13.1 Options# TOC
+
+
wavpack
command line utility’s corresponding options are listed in
+parentheses, if any.
+
+
+frame_size (--blocksize )
+Default is 32768.
+
+
+compression_level
+Set speed vs. compression tradeoff. Acceptable arguments are listed below:
+
+
+‘0 (-f ) ’
+Fast mode.
+
+
+‘1 ’
+Normal (default) settings.
+
+
+‘2 (-h ) ’
+High quality.
+
+
+‘3 (-hh ) ’
+Very high quality.
+
+
+‘4-8 (-hh -x EXTRAPROC ) ’
+Same as ‘3 ’, but with extra processing enabled.
+
+‘4 ’ is the same as -x2 and ‘8 ’ is the same as -x6 .
+
+
+
+
+
+
+
+
8.14 wavpack# TOC
+
+
WavPack lossless audio encoder.
+
+
This is a libavcodec-native WavPack encoder. There is also an encoder based on
+libwavpack, but there is virtually no reason to use that encoder.
+
+
See also libwavpack .
+
+
+
8.14.1 Options# TOC
+
+
The equivalent options for wavpack
command line utility are listed in
+parentheses.
+
+
+
8.14.1.1 Shared options# TOC
+
+
The following shared options are effective for this encoder. Only special notes
+about this particular encoder will be documented here. For the general meaning
+of the options, see the Codec Options chapter .
+
+
+frame_size (--blocksize )
+For this encoder, the range for this option is between 128 and 131072. Default
+is automatically decided based on sample rate and number of channel.
+
+For the complete formula of calculating default, see
+libavcodec/wavpackenc.c .
+
+
+compression_level (-f , -h , -hh , and -x )
+This option’s syntax is consistent with libwavpack ’s.
+
+
+
+
+
8.14.1.2 Private options# TOC
+
+
+joint_stereo (-j )
+Set whether to enable joint stereo. Valid values are:
+
+
+‘on (1 ) ’
+Force mid/side audio encoding.
+
+‘off (0 ) ’
+Force left/right audio encoding.
+
+‘auto ’
+Let the encoder decide automatically.
+
+
+
+
+optimize_mono
+Set whether to enable optimization for mono. This option is only effective for
+non-mono streams. Available values:
+
+
+‘on ’
+enabled
+
+‘off ’
+disabled
+
+
+
+
+
+
+
+
+
9 Video Encoders# TOC
+
+
A description of some of the currently available video encoders
+follows.
+
+
+
9.1 libtheora# TOC
+
+
libtheora Theora encoder wrapper.
+
+
Requires the presence of the libtheora headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libtheora
.
+
+
For more information about the libtheora project see
+http://www.theora.org/ .
+
+
+
9.1.1 Options# TOC
+
+
The following global options are mapped to internal libtheora options
+which affect the quality and the bitrate of the encoded stream.
+
+
+b
+Set the video bitrate in bit/s for CBR (Constant Bit Rate) mode. In
+case VBR (Variable Bit Rate) mode is enabled this option is ignored.
+
+
+flags
+Used to enable constant quality mode (VBR) encoding through the
+qscale flag, and to enable the pass1
and pass2
+modes.
+
+
+g
+Set the GOP size.
+
+
+global_quality
+Set the global quality as an integer in lambda units.
+
+Only relevant when VBR mode is enabled with flags +qscale
. The
+value is converted to QP units by dividing it by FF_QP2LAMBDA
,
+clipped in the [0 - 10] range, and then multiplied by 6.3 to get a
+value in the native libtheora range [0-63]. A higher value corresponds
+to a higher quality.
+
+
+q
+Enable VBR mode when set to a non-negative value, and set constant
+quality value as a double floating point value in QP units.
+
+The value is clipped in the [0-10] range, and then multiplied by 6.3
+to get a value in the native libtheora range [0-63].
+
+This option is valid only using the ffmpeg
command-line
+tool. For library interface users, use global_quality .
+
+
+
+
+
9.1.2 Examples# TOC
+
+
+ Set maximum constant quality (VBR) encoding with ffmpeg
:
+
+
ffmpeg -i INPUT -codec:v libtheora -q:v 10 OUTPUT.ogg
+
+
+ Use ffmpeg
to convert a CBR 1000 kbps Theora video stream:
+
+
ffmpeg -i INPUT -codec:v libtheora -b:v 1000k OUTPUT.ogg
+
+
+
+
+
9.2 libvpx# TOC
+
+
VP8/VP9 format supported through libvpx.
+
+
Requires the presence of the libvpx headers and library during configuration.
+You need to explicitly configure the build with --enable-libvpx
.
+
+
+
9.2.1 Options# TOC
+
+
Mapping from FFmpeg to libvpx options with conversion notes in parentheses.
+
+
+threads
+g_threads
+
+
+profile
+g_profile
+
+
+vb
+rc_target_bitrate
+
+
+g
+kf_max_dist
+
+
+keyint_min
+kf_min_dist
+
+
+qmin
+rc_min_quantizer
+
+
+qmax
+rc_max_quantizer
+
+
+bufsize, vb
+rc_buf_sz
+(bufsize * 1000 / vb)
+
+rc_buf_optimal_sz
+(bufsize * 1000 / vb * 5 / 6)
+
+
+rc_init_occupancy, vb
+rc_buf_initial_sz
+(rc_init_occupancy * 1000 / vb)
+
+
+rc_buffer_aggressivity
+rc_undershoot_pct
+
+
+skip_threshold
+rc_dropframe_thresh
+
+
+qcomp
+rc_2pass_vbr_bias_pct
+
+
+maxrate, vb
+rc_2pass_vbr_maxsection_pct
+(maxrate * 100 / vb)
+
+
+minrate, vb
+rc_2pass_vbr_minsection_pct
+(minrate * 100 / vb)
+
+
+minrate, maxrate, vb
+VPX_CBR
+(minrate == maxrate == vb)
+
+
+crf
+VPX_CQ
, VP8E_SET_CQ_LEVEL
+
+
+quality
+
+best
+VPX_DL_BEST_QUALITY
+
+good
+VPX_DL_GOOD_QUALITY
+
+realtime
+VPX_DL_REALTIME
+
+
+
+
+speed
+VP8E_SET_CPUUSED
+
+
+nr
+VP8E_SET_NOISE_SENSITIVITY
+
+
+mb_threshold
+VP8E_SET_STATIC_THRESHOLD
+
+
+slices
+VP8E_SET_TOKEN_PARTITIONS
+
+
+max-intra-rate
+VP8E_SET_MAX_INTRA_BITRATE_PCT
+
+
+force_key_frames
+VPX_EFLAG_FORCE_KF
+
+
+Alternate reference frame related
+
+vp8flags altref
+VP8E_SET_ENABLEAUTOALTREF
+
+arnr_max_frames
+VP8E_SET_ARNR_MAXFRAMES
+
+arnr_type
+VP8E_SET_ARNR_TYPE
+
+arnr_strength
+VP8E_SET_ARNR_STRENGTH
+
+rc_lookahead
+g_lag_in_frames
+
+
+
+
+vp8flags error_resilient
+g_error_resilient
+
+
+aq_mode
+VP9E_SET_AQ_MODE
+
+
+
+
+
For more information about libvpx see:
+http://www.webmproject.org/
+
+
+
+
9.3 libwebp# TOC
+
+
libwebp WebP Image encoder wrapper
+
+
libwebp is Google’s official encoder for WebP images. It can encode in either
+lossy or lossless mode. Lossy images are essentially a wrapper around a VP8
+frame. Lossless images are a separate codec developed by Google.
+
+
+
9.3.1 Pixel Format# TOC
+
+
Currently, libwebp only supports YUV420 for lossy and RGB for lossless due
+to limitations of the format and libwebp. Alpha is supported for either mode.
+Because of API limitations, if RGB is passed in when encoding lossy or YUV is
+passed in for encoding lossless, the pixel format will automatically be
+converted using functions from libwebp. This is not ideal and is done only for
+convenience.
+
+
+
9.3.2 Options# TOC
+
+
+-lossless boolean
+Enables/Disables use of lossless mode. Default is 0.
+
+
+-compression_level integer
+For lossy, this is a quality/speed tradeoff. Higher values give better quality
+for a given size at the cost of increased encoding time. For lossless, this is
+a size/speed tradeoff. Higher values give smaller size at the cost of increased
+encoding time. More specifically, it controls the number of extra algorithms
+and compression tools used, and varies the combination of these tools. This
+maps to the method option in libwebp. The valid range is 0 to 6.
+Default is 4.
+
+
+-qscale float
+For lossy encoding, this controls image quality, 0 to 100. For lossless
+encoding, this controls the effort and time spent at compressing more. The
+default value is 75. Note that for usage via libavcodec, this option is called
+global_quality and must be multiplied by FF_QP2LAMBDA .
+
+
+-preset type
+Configuration preset. This does some automatic settings based on the general
+type of the image.
+
+none
+Do not use a preset.
+
+default
+Use the encoder default.
+
+picture
+Digital picture, like portrait, inner shot
+
+photo
+Outdoor photograph, with natural lighting
+
+drawing
+Hand or line drawing, with high-contrast details
+
+icon
+Small-sized colorful images
+
+text
+Text-like
+
+
+
+
+
+
+
+
9.4 libx264, libx264rgb# TOC
+
+
x264 H.264/MPEG-4 AVC encoder wrapper.
+
+
This encoder requires the presence of the libx264 headers and library
+during configuration. You need to explicitly configure the build with
+--enable-libx264
.
+
+
libx264 supports an impressive number of features, including 8x8 and
+4x4 adaptive spatial transform, adaptive B-frame placement, CAVLC/CABAC
+entropy coding, interlacing (MBAFF), lossless mode, psy optimizations
+for detail retention (adaptive quantization, psy-RD, psy-trellis).
+
+
Many libx264 encoder options are mapped to FFmpeg global codec
+options, while unique encoder options are provided through private
+options. Additionally the x264opts and x264-params
+private options allows one to pass a list of key=value tuples as accepted
+by the libx264 x264_param_parse
function.
+
+
The x264 project website is at
+http://www.videolan.org/developers/x264.html .
+
+
The libx264rgb encoder is the same as libx264, except it accepts packed RGB
+pixel formats as input instead of YUV.
+
+
+
9.4.1 Supported Pixel Formats# TOC
+
+
x264 supports 8- to 10-bit color spaces. The exact bit depth is controlled at
+x264’s configure time. FFmpeg only supports one bit depth in one particular
+build. In other words, it is not possible to build one FFmpeg with multiple
+versions of x264 with different bit depths.
+
+
+
9.4.2 Options# TOC
+
+
The following options are supported by the libx264 wrapper. The
+x264
-equivalent options or values are listed in parentheses
+for easy migration.
+
+
To reduce the duplication of documentation, only the private options
+and some others requiring special attention are documented here. For
+the documentation of the undocumented generic options, see
+the Codec Options chapter .
+
+
To get a more accurate and extensive documentation of the libx264
+options, invoke the command x264 --full-help
or consult
+the libx264 documentation.
+
+
+b (bitrate )
+Set bitrate in bits/s. Note that FFmpeg’s b option is
+expressed in bits/s, while x264
’s bitrate is in
+kilobits/s.
+
+
+bf (bframes )
+g (keyint )
+qmin (qpmin )
+Minimum quantizer scale.
+
+
+qmax (qpmax )
+Maximum quantizer scale.
+
+
+qdiff (qpstep )
+Maximum difference between quantizer scales.
+
+
+qblur (qblur )
+Quantizer curve blur
+
+
+qcomp (qcomp )
+Quantizer curve compression factor
+
+
+refs (ref )
+Number of reference frames each P-frame can use. The range is from 0-16 .
+
+
+sc_threshold (scenecut )
+Sets the threshold for the scene change detection.
+
+
+trellis (trellis )
+Performs Trellis quantization to increase efficiency. Enabled by default.
+
+
+nr (nr )
+me_range (merange )
+Maximum range of the motion search in pixels.
+
+
+me_method (me )
+Set motion estimation method. Possible values in the decreasing order
+of speed:
+
+
+‘dia (dia ) ’
+‘epzs (dia ) ’
+Diamond search with radius 1 (fastest). ‘epzs ’ is an alias for
+‘dia ’.
+
+‘hex (hex ) ’
+Hexagonal search with radius 2.
+
+‘umh (umh ) ’
+Uneven multi-hexagon search.
+
+‘esa (esa ) ’
+Exhaustive search.
+
+‘tesa (tesa ) ’
+Hadamard exhaustive search (slowest).
+
+
+
+
+subq (subme )
+Sub-pixel motion estimation method.
+
+
+b_strategy (b-adapt )
+Adaptive B-frame placement decision algorithm. Use only on first-pass.
+
+
+keyint_min (min-keyint )
+Minimum GOP size.
+
+
+coder
+Set entropy encoder. Possible values:
+
+
+‘ac ’
+Enable CABAC.
+
+
+‘vlc ’
+Enable CAVLC and disable CABAC. It generates the same effect as
+x264
’s --no-cabac option.
+
+
+
+
+cmp
+Set full pixel motion estimation comparation algorithm. Possible values:
+
+
+‘chroma ’
+Enable chroma in motion estimation.
+
+
+‘sad ’
+Ignore chroma in motion estimation. It generates the same effect as
+x264
’s --no-chroma-me option.
+
+
+
+
+threads (threads )
+Number of encoding threads.
+
+
+thread_type
+Set multithreading technique. Possible values:
+
+
+‘slice ’
+Slice-based multithreading. It generates the same effect as
+x264
’s --sliced-threads option.
+
+‘frame ’
+Frame-based multithreading.
+
+
+
+
+flags
+Set encoding flags. It can be used to disable closed GOP and enable
+open GOP by setting it to -cgop
. The result is similar to
+the behavior of x264
’s --open-gop option.
+
+
+rc_init_occupancy (vbv-init )
+preset (preset )
+Set the encoding preset.
+
+
+tune (tune )
+Set tuning of the encoding params.
+
+
+profile (profile )
+Set profile restrictions.
+
+
+fastfirstpass
+Enable fast settings when encoding first pass, when set to 1. When set
+to 0, it has the same effect of x264
’s
+--slow-firstpass option.
+
+
+crf (crf )
+Set the quality for constant quality mode.
+
+
+crf_max (crf-max )
+In CRF mode, prevents VBV from lowering quality beyond this point.
+
+
+qp (qp )
+Set constant quantization rate control method parameter.
+
+
+aq-mode (aq-mode )
+Set AQ method. Possible values:
+
+
+‘none (0 ) ’
+Disabled.
+
+
+‘variance (1 ) ’
+Variance AQ (complexity mask).
+
+
+‘autovariance (2 ) ’
+Auto-variance AQ (experimental).
+
+
+
+
+aq-strength (aq-strength )
+Set AQ strength, reduce blocking and blurring in flat and textured areas.
+
+
+psy
+Use psychovisual optimizations when set to 1. When set to 0, it has the
+same effect as x264
’s --no-psy option.
+
+
+psy-rd (psy-rd )
+Set strength of psychovisual optimization, in
+psy-rd :psy-trellis format.
+
+
+rc-lookahead (rc-lookahead )
+Set number of frames to look ahead for frametype and ratecontrol.
+
+
+weightb
+Enable weighted prediction for B-frames when set to 1. When set to 0,
+it has the same effect as x264
’s --no-weightb option.
+
+
+weightp (weightp )
+Set weighted prediction method for P-frames. Possible values:
+
+
+‘none (0 ) ’
+Disabled
+
+‘simple (1 ) ’
+Enable only weighted refs
+
+‘smart (2 ) ’
+Enable both weighted refs and duplicates
+
+
+
+
+ssim (ssim )
+Enable calculation and printing SSIM stats after the encoding.
+
+
+intra-refresh (intra-refresh )
+Enable the use of Periodic Intra Refresh instead of IDR frames when set
+to 1.
+
+
+avcintra-class (class )
+Configure the encoder to generate AVC-Intra.
+Valid values are 50,100 and 200
+
+
+bluray-compat (bluray-compat )
+Configure the encoder to be compatible with the bluray standard.
+It is a shorthand for setting "bluray-compat=1 force-cfr=1".
+
+
+b-bias (b-bias )
+Set the influence on how often B-frames are used.
+
+
+b-pyramid (b-pyramid )
+Set method for keeping of some B-frames as references. Possible values:
+
+
+‘none (none ) ’
+Disabled.
+
+‘strict (strict ) ’
+Strictly hierarchical pyramid.
+
+‘normal (normal ) ’
+Non-strict (not Blu-ray compatible).
+
+
+
+
+mixed-refs
+Enable the use of one reference per partition, as opposed to one
+reference per macroblock when set to 1. When set to 0, it has the
+same effect as x264
’s --no-mixed-refs option.
+
+
+8x8dct
+Enable adaptive spatial transform (high profile 8x8 transform)
+when set to 1. When set to 0, it has the same effect as
+x264
’s --no-8x8dct option.
+
+
+fast-pskip
+Enable early SKIP detection on P-frames when set to 1. When set
+to 0, it has the same effect as x264
’s
+--no-fast-pskip option.
+
+
+aud (aud )
+Enable use of access unit delimiters when set to 1.
+
+
+mbtree
+Enable use macroblock tree ratecontrol when set to 1. When set
+to 0, it has the same effect as x264
’s
+--no-mbtree option.
+
+
+deblock (deblock )
+Set loop filter parameters, in alpha :beta form.
+
+
+cplxblur (cplxblur )
+Set fluctuations reduction in QP (before curve compression).
+
+
+partitions (partitions )
+Set partitions to consider as a comma-separated list of. Possible
+values in the list:
+
+
+‘p8x8 ’
+8x8 P-frame partition.
+
+‘p4x4 ’
+4x4 P-frame partition.
+
+‘b8x8 ’
+4x4 B-frame partition.
+
+‘i8x8 ’
+8x8 I-frame partition.
+
+‘i4x4 ’
+4x4 I-frame partition.
+(Enabling ‘p4x4 ’ requires ‘p8x8 ’ to be enabled. Enabling
+‘i8x8 ’ requires adaptive spatial transform (8x8dct
+option) to be enabled.)
+
+‘none (none ) ’
+Do not consider any partitions.
+
+‘all (all ) ’
+Consider every partition.
+
+
+
+
+direct-pred (direct )
+Set direct MV prediction mode. Possible values:
+
+
+‘none (none ) ’
+Disable MV prediction.
+
+‘spatial (spatial ) ’
+Enable spatial predicting.
+
+‘temporal (temporal ) ’
+Enable temporal predicting.
+
+‘auto (auto ) ’
+Automatically decided.
+
+
+
+
+slice-max-size (slice-max-size )
+Set the limit of the size of each slice in bytes. If not specified
+but RTP payload size (ps ) is specified, that is used.
+
+
+stats (stats )
+Set the file name for multi-pass stats.
+
+
+nal-hrd (nal-hrd )
+Set signal HRD information (requires vbv-bufsize to be set).
+Possible values:
+
+
+‘none (none ) ’
+Disable HRD information signaling.
+
+‘vbr (vbr ) ’
+Variable bit rate.
+
+‘cbr (cbr ) ’
+Constant bit rate (not allowed in MP4 container).
+
+
+
+
+x264opts (N.A.)
+Set any x264 option, see x264 --fullhelp
for a list.
+
+Argument is a list of key =value couples separated by
+":". In filter and psy-rd options that use ":" as a separator
+themselves, use "," instead. They accept it as well since long ago but this
+is kept undocumented for some reason.
+
+For example to specify libx264 encoding options with ffmpeg
:
+
+
ffmpeg -i foo.mpg -vcodec libx264 -x264opts keyint=123:min-keyint=20 -an out.mkv
+
+
+
+x264-params (N.A.)
+Override the x264 configuration using a :-separated list of key=value
+parameters.
+
+This option is functionally the same as the x264opts , but is
+duplicated for compatibility with the Libav fork.
+
+For example to specify libx264 encoding options with ffmpeg
:
+
+
ffmpeg -i INPUT -c:v libx264 -x264-params level=30:bframes=0:weightp=0:\
+cabac=0:ref=1:vbv-maxrate=768:vbv-bufsize=2000:analyse=all:me=umh:\
+no-fast-pskip=1:subq=6:8x8dct=0:trellis=0 OUTPUT
+
+
+
+
+
Encoding ffpresets for common usages are provided so they can be used with the
+general presets system (e.g. passing the pre option).
+
+
+
9.5 libx265# TOC
+
+
x265 H.265/HEVC encoder wrapper.
+
+
This encoder requires the presence of the libx265 headers and library
+during configuration. You need to explicitly configure the build with
+--enable-libx265 .
+
+
+
9.5.1 Options# TOC
+
+
+preset
+Set the x265 preset.
+
+
+tune
+Set the x265 tune parameter.
+
+
+x265-params
+Set x265 options using a list of key =value couples separated
+by ":". See x265 --help
for a list of options.
+
+For example to specify libx265 encoding options with -x265-params :
+
+
+
ffmpeg -i input -c:v libx265 -x265-params crf=26:psy-rd=1 output.mp4
+
+
+
+
+
+
9.6 libxvid# TOC
+
+
Xvid MPEG-4 Part 2 encoder wrapper.
+
+
This encoder requires the presence of the libxvidcore headers and library
+during configuration. You need to explicitly configure the build with
+--enable-libxvid --enable-gpl
.
+
+
The native mpeg4
encoder supports the MPEG-4 Part 2 format, so
+users can encode to this format without this library.
+
+
+
9.6.1 Options# TOC
+
+
The following options are supported by the libxvid wrapper. Some of
+the following options are listed but are not documented, and
+correspond to shared codec options. See the Codec
+Options chapter for their documentation. The other shared options
+which are not listed have no effect for the libxvid encoder.
+
+
+b
+g
+qmin
+qmax
+mpeg_quant
+threads
+bf
+b_qfactor
+b_qoffset
+flags
+Set specific encoding flags. Possible values:
+
+
+‘mv4 ’
+Use four motion vector by macroblock.
+
+
+‘aic ’
+Enable high quality AC prediction.
+
+
+‘gray ’
+Only encode grayscale.
+
+
+‘gmc ’
+Enable the use of global motion compensation (GMC).
+
+
+‘qpel ’
+Enable quarter-pixel motion compensation.
+
+
+‘cgop ’
+Enable closed GOP.
+
+
+‘global_header ’
+Place global headers in extradata instead of every keyframe.
+
+
+
+
+
+trellis
+me_method
+Set motion estimation method. Possible values in decreasing order of
+speed and increasing order of quality:
+
+
+‘zero ’
+Use no motion estimation (default).
+
+
+‘phods ’
+‘x1 ’
+‘log ’
+Enable advanced diamond zonal search for 16x16 blocks and half-pixel
+refinement for 16x16 blocks. ‘x1 ’ and ‘log ’ are aliases for
+‘phods ’.
+
+
+‘epzs ’
+Enable all of the things described above, plus advanced diamond zonal
+search for 8x8 blocks, half-pixel refinement for 8x8 blocks, and motion
+estimation on chroma planes.
+
+
+‘full ’
+Enable all of the things described above, plus extended 16x16 and 8x8
+blocks search.
+
+
+
+
+mbd
+Set macroblock decision algorithm. Possible values in the increasing
+order of quality:
+
+
+‘simple ’
+Use macroblock comparing function algorithm (default).
+
+
+‘bits ’
+Enable rate distortion-based half pixel and quarter pixel refinement for
+16x16 blocks.
+
+
+‘rd ’
+Enable all of the things described above, plus rate distortion-based
+half pixel and quarter pixel refinement for 8x8 blocks, and rate
+distortion-based search using square pattern.
+
+
+
+
+lumi_aq
+Enable lumi masking adaptive quantization when set to 1. Default is 0
+(disabled).
+
+
+variance_aq
+Enable variance adaptive quantization when set to 1. Default is 0
+(disabled).
+
+When combined with lumi_aq , the resulting quality will not
+be better than any of the two specified individually. In other
+words, the resulting quality will be the worse one of the two
+effects.
+
+
+ssim
+Set structural similarity (SSIM) displaying method. Possible values:
+
+
+‘off ’
+Disable displaying of SSIM information.
+
+
+‘avg ’
+Output average SSIM at the end of encoding to stdout. The format of
+showing the average SSIM is:
+
+
+
+For users who are not familiar with C, %f means a float number, or
+a decimal (e.g. 0.939232).
+
+
+‘frame ’
+Output both per-frame SSIM data during encoding and average SSIM at
+the end of encoding to stdout. The format of per-frame information
+is:
+
+
+
SSIM: avg: %1.3f min: %1.3f max: %1.3f
+
+
+For users who are not familiar with C, %1.3f means a float number
+rounded to 3 digits after the dot (e.g. 0.932).
+
+
+
+
+
+ssim_acc
+Set SSIM accuracy. Valid options are integers within the range of
+0-4, while 0 gives the most accurate result and 4 computes the
+fastest.
+
+
+
+
+
+
9.7 mpeg2# TOC
+
+
MPEG-2 video encoder.
+
+
+
9.7.1 Options# TOC
+
+
+seq_disp_ext integer
+Specifies if the encoder should write a sequence_display_extension to the
+output.
+
+-1
+auto
+Decide automatically to write it or not (this is the default) by checking if
+the data to be written is different from the default or unspecified values.
+
+0
+never
+Never write it.
+
+1
+always
+Always write it.
+
+
+
+
+
+
+
+
+
PNG image encoder.
+
+
+
9.8.1 Private options# TOC
+
+
+dpi integer
+Set physical density of pixels, in dots per inch, unset by default
+
+dpm integer
+Set physical density of pixels, in dots per meter, unset by default
+
+
+
+
+
9.9 ProRes# TOC
+
+
Apple ProRes encoder.
+
+
FFmpeg contains 2 ProRes encoders, the prores-aw and prores-ks encoder.
+The used encoder can be chosen with the -vcodec
option.
+
+
+
9.9.1 Private Options for prores-ks# TOC
+
+
+profile integer
+Select the ProRes profile to encode
+
+‘proxy ’
+‘lt ’
+‘standard ’
+‘hq ’
+‘4444 ’
+
+
+
+quant_mat integer
+Select quantization matrix.
+
+‘auto ’
+‘default ’
+‘proxy ’
+‘lt ’
+‘standard ’
+‘hq ’
+
+If set to auto , the matrix matching the profile will be picked.
+If not set, the matrix providing the highest quality, default , will be
+picked.
+
+
+bits_per_mb integer
+How many bits to allot for coding one macroblock. Different profiles use
+between 200 and 2400 bits per macroblock, the maximum is 8000.
+
+
+mbs_per_slice integer
+Number of macroblocks in each slice (1-8); the default value (8)
+should be good in almost all situations.
+
+
+vendor string
+Override the 4-byte vendor ID.
+A custom vendor ID like apl0 would claim the stream was produced by
+the Apple encoder.
+
+
+alpha_bits integer
+Specify number of bits for alpha component.
+Possible values are 0 , 8 and 16 .
+Use 0 to disable alpha plane coding.
+
+
+
+
+
+
9.9.2 Speed considerations# TOC
+
+
In the default mode of operation the encoder has to honor frame constraints
+(i.e. not produce frames with size bigger than requested) while still making
+output picture as good as possible.
+A frame containing a lot of small details is harder to compress and the encoder
+would spend more time searching for appropriate quantizers for each slice.
+
+
Setting a higher bits_per_mb limit will improve the speed.
+
+
For the fastest encoding speed set the qscale parameter (4 is the
+recommended value) and do not set a size constraint.
+
+
+
+
10 Subtitles Encoders# TOC
+
+
+
10.1 dvdsub# TOC
+
+
This codec encodes the bitmap subtitle format that is used in DVDs.
+Typically they are stored in VOBSUB file pairs (*.idx + *.sub),
+and they can also be used in Matroska files.
+
+
+
10.1.1 Options# TOC
+
+
+even_rows_fix
+When set to 1, enable a work-around that makes the number of pixel rows
+even in all subtitles. This fixes a problem with some players that
+cut off the bottom row if the number is odd. The work-around just adds
+a fully transparent row if needed. The overhead is low, typically
+one byte per subtitle on average.
+
+By default, this work-around is disabled.
+
+
+
+
+
+
11 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+libavcodec
+
+
+
+
12 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-devices.html b/Externals/ffmpeg/dev/doc/ffmpeg-devices.html
new file mode 100644
index 0000000000..a460bd1369
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/ffmpeg-devices.html
@@ -0,0 +1,1810 @@
+
+
+
+
+
+
+ FFmpeg Devices Documentation
+
+
+
+
+
+
+
+
+ FFmpeg Devices Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
This document describes the input and output devices provided by the
+libavdevice library.
+
+
+
+
2 Device Options# TOC
+
+
The libavdevice library provides the same interface as
+libavformat. Namely, an input device is considered like a demuxer, and
+an output device like a muxer, and the interface and generic device
+options are the same provided by libavformat (see the ffmpeg-formats
+manual).
+
+
In addition each input or output device may support so-called private
+options, which are specific for that component.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, or by setting the value explicitly in the device
+AVFormatContext
options or using the libavutil/opt.h API
+for programmatic use.
+
+
+
+
3 Input Devices# TOC
+
+
Input devices are configured elements in FFmpeg which allow to access
+the data coming from a multimedia device attached to your system.
+
+
When you configure your FFmpeg build, all the supported input devices
+are enabled by default. You can list all available ones using the
+configure option "–list-indevs".
+
+
You can disable all the input devices using the configure option
+"–disable-indevs", and selectively enable an input device using the
+option "–enable-indev=INDEV ", or you can disable a particular
+input device using the option "–disable-indev=INDEV ".
+
+
The option "-devices" of the ff* tools will display the list of
+supported input devices.
+
+
A description of the currently available input devices follows.
+
+
+
3.1 alsa# TOC
+
+
ALSA (Advanced Linux Sound Architecture) input device.
+
+
To enable this input device during configuration you need libasound
+installed on your system.
+
+
This device allows capturing from an ALSA device. The name of the
+device to capture has to be an ALSA card identifier.
+
+
An ALSA identifier has the syntax:
+
+
hw:CARD [,DEV [,SUBDEV ]]
+
+
+
where the DEV and SUBDEV components are optional.
+
+
The three arguments (in order: CARD ,DEV ,SUBDEV )
+specify card number or identifier, device number and subdevice number
+(-1 means any).
+
+
To see the list of cards currently recognized by your system check the
+files /proc/asound/cards and /proc/asound/devices .
+
+
For example to capture with ffmpeg
from an ALSA device with
+card id 0, you may run the command:
+
+
ffmpeg -f alsa -i hw:0 alsaout.wav
+
+
+
For more information see:
+http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html
+
+
+
3.2 avfoundation# TOC
+
+
AVFoundation input device.
+
+
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
+The older QTKit framework has been marked deprecated since OSX version 10.7.
+
+
The input filename has to be given in the following syntax:
+
+
-i "[[VIDEO]:[AUDIO]]"
+
+
The first entry selects the video input while the latter selects the audio input.
+The stream has to be specified by the device name or the device index as shown by the device list.
+Alternatively, the video and/or audio input device can be chosen by index using the
+
+ -video_device_index <INDEX>
+
+and/or
+
+ -audio_device_index <INDEX>
+
+, overriding any
+device name or index given in the input filename.
+
+
All available devices can be enumerated by using -list_devices true , listing
+all device names and corresponding indices.
+
+
There are two device name aliases:
+
+default
+Select the AVFoundation default device of the corresponding type.
+
+
+none
+Do not record the corresponding media type.
+This is equivalent to specifying an empty device name or index.
+
+
+
+
+
+
3.2.1 Options# TOC
+
+
AVFoundation supports the following options:
+
+
+-list_devices <TRUE|FALSE>
+If set to true, a list of all available input devices is given showing all
+device names and indices.
+
+
+-video_device_index <INDEX>
+Specify the video device by its index. Overrides anything given in the input filename.
+
+
+-audio_device_index <INDEX>
+Specify the audio device by its index. Overrides anything given in the input filename.
+
+
+-pixel_format <FORMAT>
+Request the video device to use a specific pixel format.
+If the specified format is not supported, a list of available formats is given
+und the first one in this list is used instead. Available pixel formats are:
+monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
+ bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
+ yuv420p, nv12, yuyv422, gray
+
+
+
+
+
+
3.2.2 Examples# TOC
+
+
+ Print the list of AVFoundation supported devices and exit:
+
+
$ ffmpeg -f avfoundation -list_devices true -i ""
+
+
+ Record video from video device 0 and audio from audio device 0 into out.avi:
+
+
$ ffmpeg -f avfoundation -i "0:0" out.avi
+
+
+ Record video from video device 2 and audio from audio device 1 into out.avi:
+
+
$ ffmpeg -f avfoundation -video_device_index 2 -i ":1" out.avi
+
+
+ Record video from the system default video device using the pixel format bgr0 and do not record any audio into out.avi:
+
+
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
+
+
+
+
+
+
3.3 bktr# TOC
+
+
BSD video input device.
+
+
+
3.4 dshow# TOC
+
+
Windows DirectShow input device.
+
+
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
+Currently only audio and video devices are supported.
+
+
Multiple devices may be opened as separate inputs, but they may also be
+opened on the same input, which should improve synchronism between them.
+
+
The input name should be in the format:
+
+
+
+
where TYPE can be either audio or video ,
+and NAME is the device’s name.
+
+
+
3.4.1 Options# TOC
+
+
If no options are specified, the device’s defaults are used.
+If the device does not support the requested options, it will
+fail to open.
+
+
+video_size
+Set the video size in the captured video.
+
+
+framerate
+Set the frame rate in the captured video.
+
+
+sample_rate
+Set the sample rate (in Hz) of the captured audio.
+
+
+sample_size
+Set the sample size (in bits) of the captured audio.
+
+
+channels
+Set the number of channels in the captured audio.
+
+
+list_devices
+If set to true , print a list of devices and exit.
+
+
+list_options
+If set to true , print a list of selected device’s options
+and exit.
+
+
+video_device_number
+Set video device number for devices with same name (starts at 0,
+defaults to 0).
+
+
+audio_device_number
+Set audio device number for devices with same name (starts at 0,
+defaults to 0).
+
+
+pixel_format
+Select pixel format to be used by DirectShow. This may only be set when
+the video codec is not set or set to rawvideo.
+
+
+audio_buffer_size
+Set audio device buffer size in milliseconds (which can directly
+impact latency, depending on the device).
+Defaults to using the audio device’s
+default buffer size (typically some multiple of 500ms).
+Setting this value too low can degrade performance.
+See also
+http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx
+
+
+
+
+
+
3.4.2 Examples# TOC
+
+
+ Print the list of DirectShow supported devices and exit:
+
+
$ ffmpeg -list_devices true -f dshow -i dummy
+
+
+ Open video device Camera :
+
+
$ ffmpeg -f dshow -i video="Camera"
+
+
+ Open second video device with name Camera :
+
+
$ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
+
+
+ Open video device Camera and audio device Microphone :
+
+
$ ffmpeg -f dshow -i video="Camera":audio="Microphone"
+
+
+ Print the list of supported options in selected device and exit:
+
+
$ ffmpeg -list_options true -f dshow -i video="Camera"
+
+
+
+
+
+
3.5 dv1394# TOC
+
+
Linux DV 1394 input device.
+
+
+
3.6 fbdev# TOC
+
+
Linux framebuffer input device.
+
+
The Linux framebuffer is a graphic hardware-independent abstraction
+layer to show graphics on a computer monitor, typically on the
+console. It is accessed through a file device node, usually
+/dev/fb0 .
+
+
For more detailed information read the file
+Documentation/fb/framebuffer.txt included in the Linux source tree.
+
+
To record from the framebuffer device /dev/fb0 with
+ffmpeg
:
+
+
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
+
+
+
You can take a single screenshot image with the command:
+
+
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
+
+
+
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
+
+
+
3.7 gdigrab# TOC
+
+
Win32 GDI-based screen capture device.
+
+
This device allows you to capture a region of the display on Windows.
+
+
There are two options for the input filename:
+
+
or
+
+
+
The first option will capture the entire desktop, or a fixed region of the
+desktop. The second option will instead capture the contents of a single
+window, regardless of its position on the screen.
+
+
For example, to grab the entire desktop using ffmpeg
:
+
+
ffmpeg -f gdigrab -framerate 6 -i desktop out.mpg
+
+
+
Grab a 640x480 region at position 10,20
:
+
+
ffmpeg -f gdigrab -framerate 6 -offset_x 10 -offset_y 20 -video_size vga -i desktop out.mpg
+
+
+
Grab the contents of the window named "Calculator"
+
+
ffmpeg -f gdigrab -framerate 6 -i title=Calculator out.mpg
+
+
+
+
3.7.1 Options# TOC
+
+
+draw_mouse
+Specify whether to draw the mouse pointer. Use the value 0
to
+not draw the pointer. Default value is 1
.
+
+
+framerate
+Set the grabbing frame rate. Default value is ntsc
,
+corresponding to a frame rate of 30000/1001
.
+
+
+show_region
+Show grabbed region on screen.
+
+If show_region is specified with 1
, then the grabbing
+region will be indicated on screen. With this option, it is easy to
+know what is being grabbed if only a portion of the screen is grabbed.
+
+Note that show_region is incompatible with grabbing the contents
+of a single window.
+
+For example:
+
+
ffmpeg -f gdigrab -show_region 1 -framerate 6 -video_size cif -offset_x 10 -offset_y 20 -i desktop out.mpg
+
+
+
+video_size
+Set the video frame size. The default is to capture the full screen if desktop is selected, or the full window size if title=window_title is selected.
+
+
+offset_x
+When capturing a region with video_size , set the distance from the left edge of the screen or desktop.
+
+Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned to the left of your primary monitor, you will need to use a negative offset_x value to move the region to that monitor.
+
+
+offset_y
+When capturing a region with video_size , set the distance from the top edge of the screen or desktop.
+
+Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned above your primary monitor, you will need to use a negative offset_y value to move the region to that monitor.
+
+
+
+
+
+
3.8 iec61883# TOC
+
+
FireWire DV/HDV input device using libiec61883.
+
+
To enable this input device, you need libiec61883, libraw1394 and
+libavc1394 installed on your system. Use the configure option
+--enable-libiec61883
to compile with the device enabled.
+
+
The iec61883 capture device supports capturing from a video device
+connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
+FireWire stack (juju). This is the default DV/HDV input method in Linux
+Kernel 2.6.37 and later, since the old FireWire stack was removed.
+
+
Specify the FireWire port to be used as input file, or "auto"
+to choose the first port connected.
+
+
+
3.8.1 Options# TOC
+
+
+dvtype
+Override autodetection of DV/HDV. This should only be used if auto
+detection does not work, or if usage of a different device type
+should be prohibited. Treating a DV device as HDV (or vice versa) will
+not work and result in undefined behavior.
+The values auto , dv and hdv are supported.
+
+
+dvbuffer
+Set maximum size of buffer for incoming data, in frames. For DV, this
+is an exact value. For HDV, it is not frame exact, since HDV does
+not have a fixed frame size.
+
+
+dvguid
+Select the capture device by specifying it’s GUID. Capturing will only
+be performed from the specified device and fails if no device with the
+given GUID is found. This is useful to select the input if multiple
+devices are connected at the same time.
+Look at /sys/bus/firewire/devices to find out the GUIDs.
+
+
+
+
+
+
3.8.2 Examples# TOC
+
+
+ Grab and show the input of a FireWire DV/HDV device.
+
+
ffplay -f iec61883 -i auto
+
+
+ Grab and record the input of a FireWire DV/HDV device,
+using a packet buffer of 100000 packets if the source is HDV.
+
+
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
+
+
+
+
+
+
3.9 jack# TOC
+
+
JACK input device.
+
+
To enable this input device during configuration you need libjack
+installed on your system.
+
+
A JACK input device creates one or more JACK writable clients, one for
+each audio channel, with name client_name :input_N , where
+client_name is the name provided by the application, and N
+is a number which identifies the channel.
+Each writable client will send the acquired data to the FFmpeg input
+device.
+
+
Once you have created one or more JACK readable clients, you need to
+connect them to one or more JACK writable clients.
+
+
To connect or disconnect JACK clients you can use the jack_connect
+and jack_disconnect
programs, or do it through a graphical interface,
+for example with qjackctl
.
+
+
To list the JACK clients and their properties you can invoke the command
+jack_lsp
.
+
+
Follows an example which shows how to capture a JACK readable client
+with ffmpeg
.
+
+
# Create a JACK writable client with name "ffmpeg".
+$ ffmpeg -f jack -i ffmpeg -y out.wav
+
+# Start the sample jack_metro readable client.
+$ jack_metro -b 120 -d 0.2 -f 4000
+
+# List the current JACK clients.
+$ jack_lsp -c
+system:capture_1
+system:capture_2
+system:playback_1
+system:playback_2
+ffmpeg:input_1
+metro:120_bpm
+
+# Connect metro to the ffmpeg writable client.
+$ jack_connect metro:120_bpm ffmpeg:input_1
+
+
+
For more information read:
+http://jackaudio.org/
+
+
+
3.10 lavfi# TOC
+
+
Libavfilter input virtual device.
+
+
This input device reads data from the open output pads of a libavfilter
+filtergraph.
+
+
For each filtergraph open output, the input device will create a
+corresponding stream which is mapped to the generated output. Currently
+only video data is supported. The filtergraph is specified through the
+option graph .
+
+
+
3.10.1 Options# TOC
+
+
+graph
+Specify the filtergraph to use as input. Each video open output must be
+labelled by a unique string of the form "outN ", where N is a
+number starting from 0 corresponding to the mapped input stream
+generated by the device.
+The first unlabelled output is automatically assigned to the "out0"
+label, but all the others need to be specified explicitly.
+
+The suffix "+subcc" can be appended to the output label to create an extra
+stream with the closed captions packets attached to that output
+(experimental; only for EIA-608 / CEA-708 for now).
+The subcc streams are created after all the normal streams, in the order of
+the corresponding stream.
+For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
+stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
+
+If not specified defaults to the filename specified for the input
+device.
+
+
+graph_file
+Set the filename of the filtergraph to be read and sent to the other
+filters. Syntax of the filtergraph is the same as the one specified by
+the option graph .
+
+
+
+
+
+
3.10.2 Examples# TOC
+
+
+ Create a color video stream and play it back with ffplay
:
+
+
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
+
+
+ As the previous example, but use filename for specifying the graph
+description, and omit the "out0" label:
+
+
ffplay -f lavfi color=c=pink
+
+
+ Create three different video test filtered sources and play them:
+
+
ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
+
+
+ Read an audio stream from a file using the amovie source and play it
+back with ffplay
:
+
+
ffplay -f lavfi "amovie=test.wav"
+
+
+ Read an audio stream and a video stream and play it back with
+ffplay
:
+
+
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
+
+
+ Dump decoded frames to images and closed captions to a file (experimental):
+
+
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
+
+
+
+
+
+
3.11 libcdio# TOC
+
+
Audio-CD input device based on cdio.
+
+
To enable this input device during configuration you need libcdio
+installed on your system. Requires the configure option
+--enable-libcdio
.
+
+
This device allows playing and grabbing from an Audio-CD.
+
+
For example to copy with ffmpeg
the entire Audio-CD in /dev/sr0,
+you may run the command:
+
+
ffmpeg -f libcdio -i /dev/sr0 cd.wav
+
+
+
+
3.12 libdc1394# TOC
+
+
IIDC1394 input device, based on libdc1394 and libraw1394.
+
+
Requires the configure option --enable-libdc1394
.
+
+
+
3.13 openal# TOC
+
+
The OpenAL input device provides audio capture on all systems with a
+working OpenAL 1.1 implementation.
+
+
To enable this input device during configuration, you need OpenAL
+headers and libraries installed on your system, and need to configure
+FFmpeg with --enable-openal
.
+
+
OpenAL headers and libraries should be provided as part of your OpenAL
+implementation, or as an additional download (an SDK). Depending on your
+installation you may need to specify additional flags via the
+--extra-cflags
and --extra-ldflags
for allowing the build
+system to locate the OpenAL headers and libraries.
+
+
An incomplete list of OpenAL implementations follows:
+
+
+Creative
+The official Windows implementation, providing hardware acceleration
+with supported devices and software fallback.
+See http://openal.org/ .
+
+OpenAL Soft
+Portable, open source (LGPL) software implementation. Includes
+backends for the most common sound APIs on the Windows, Linux,
+Solaris, and BSD operating systems.
+See http://kcat.strangesoft.net/openal.html .
+
+Apple
+OpenAL is part of Core Audio, the official Mac OS X Audio interface.
+See http://developer.apple.com/technologies/mac/audio-and-video.html
+
+
+
+
This device allows one to capture from an audio input device handled
+through OpenAL.
+
+
You need to specify the name of the device to capture in the provided
+filename. If the empty string is provided, the device will
+automatically select the default device. You can get the list of the
+supported devices by using the option list_devices .
+
+
+
3.13.1 Options# TOC
+
+
+channels
+Set the number of channels in the captured audio. Only the values
+1 (monaural) and 2 (stereo) are currently supported.
+Defaults to 2 .
+
+
+sample_size
+Set the sample size (in bits) of the captured audio. Only the values
+8 and 16 are currently supported. Defaults to
+16 .
+
+
+sample_rate
+Set the sample rate (in Hz) of the captured audio.
+Defaults to 44.1k .
+
+
+list_devices
+If set to true , print a list of devices and exit.
+Defaults to false .
+
+
+
+
+
+
3.13.2 Examples# TOC
+
+
Print the list of OpenAL supported devices and exit:
+
+
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
+
+
+
Capture from the OpenAL device DR-BT101 via PulseAudio :
+
+
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
+
+
+
Capture from the default device (note the empty string ” as filename):
+
+
$ ffmpeg -f openal -i '' out.ogg
+
+
+
Capture from two devices simultaneously, writing to two different files,
+within the same ffmpeg
command:
+
+
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
+
+
Note: not all OpenAL implementations support multiple simultaneous capture -
+try the latest OpenAL Soft if the above does not work.
+
+
+
3.14 oss# TOC
+
+
Open Sound System input device.
+
+
The filename to provide to the input device is the device node
+representing the OSS input device, and is usually set to
+/dev/dsp .
+
+
For example to grab from /dev/dsp using ffmpeg
use the
+command:
+
+
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
+
+
+
For more information about OSS see:
+http://manuals.opensound.com/usersguide/dsp.html
+
+
+
3.15 pulse# TOC
+
+
PulseAudio input device.
+
+
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
+
+
The filename to provide to the input device is a source device or the
+string "default"
+
+
To list the PulseAudio source devices and their properties you can invoke
+the command pactl list sources
.
+
+
More information about PulseAudio can be found on http://www.pulseaudio.org .
+
+
+
3.15.1 Options# TOC
+
+server
+Connect to a specific PulseAudio server, specified by an IP address.
+Default server is used when not provided.
+
+
+name
+Specify the application name PulseAudio will use when showing active clients,
+by default it is the LIBAVFORMAT_IDENT
string.
+
+
+stream_name
+Specify the stream name PulseAudio will use when showing active streams,
+by default it is "record".
+
+
+sample_rate
+Specify the samplerate in Hz, by default 48kHz is used.
+
+
+channels
+Specify the channels in use, by default 2 (stereo) is set.
+
+
+frame_size
+Specify the number of bytes per frame, by default it is set to 1024.
+
+
+fragment_size
+Specify the minimal buffering fragment in PulseAudio, it will affect the
+audio latency. By default it is unset.
+
+
+
+
+
3.15.2 Examples# TOC
+
Record a stream from default device:
+
+
ffmpeg -f pulse -i default /tmp/pulse.wav
+
+
+
+
3.16 qtkit# TOC
+
+
QTKit input device.
+
+
The filename passed as input is parsed to contain either a device name or index.
+The device index can also be given by using -video_device_index.
+A given device index will override any given device name.
+If the desired device consists of numbers only, use -video_device_index to identify it.
+The default device will be chosen if an empty string or the device name "default" is given.
+The available devices can be enumerated by using -list_devices.
+
+
+
ffmpeg -f qtkit -i "0" out.mpg
+
+
+
+
ffmpeg -f qtkit -video_device_index 0 -i "" out.mpg
+
+
+
+
ffmpeg -f qtkit -i "default" out.mpg
+
+
+
+
ffmpeg -f qtkit -list_devices true -i ""
+
+
+
+
3.17 sndio# TOC
+
+
sndio input device.
+
+
To enable this input device during configuration you need libsndio
+installed on your system.
+
+
The filename to provide to the input device is the device node
+representing the sndio input device, and is usually set to
+/dev/audio0 .
+
+
For example to grab from /dev/audio0 using ffmpeg
use the
+command:
+
+
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
+
+
+
+
3.18 video4linux2, v4l2# TOC
+
+
Video4Linux2 input video device.
+
+
"v4l2" can be used as alias for "video4linux2".
+
+
If FFmpeg is built with v4l-utils support (by using the
+--enable-libv4l2
configure option), it is possible to use it with the
+-use_libv4l2
input device option.
+
+
The name of the device to grab is a file device node, usually Linux
+systems tend to automatically create such nodes when the device
+(e.g. an USB webcam) is plugged into the system, and has a name of the
+kind /dev/videoN , where N is a number associated to
+the device.
+
+
Video4Linux2 devices usually support a limited set of
+width xheight sizes and frame rates. You can check which are
+supported using -list_formats all
for Video4Linux2 devices.
+Some devices, like TV cards, support one or more standards. It is possible
+to list all the supported standards using -list_standards all
.
+
+
The time base for the timestamps is 1 microsecond. Depending on the kernel
+version and configuration, the timestamps may be derived from the real time
+clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
+boot time, unaffected by NTP or manual changes to the clock). The
+-timestamps abs or -ts abs option can be used to force
+conversion into the real time clock.
+
+
Some usage examples of the video4linux2 device with ffmpeg
+and ffplay
:
+
+ Grab and show the input of a video4linux2 device:
+
+
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
+
+
+ Grab and record the input of a video4linux2 device, leave the
+frame rate and size as previously set:
+
+
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
+
+
+
+
For more information about Video4Linux, check http://linuxtv.org/ .
+
+
+
3.18.1 Options# TOC
+
+
+standard
+Set the standard. Must be the name of a supported standard. To get a
+list of the supported standards, use the list_standards
+option.
+
+
+channel
+Set the input channel number. Default to -1, which means using the
+previously selected channel.
+
+
+video_size
+Set the video frame size. The argument must be a string in the form
+WIDTH xHEIGHT or a valid size abbreviation.
+
+
+pixel_format
+Select the pixel format (only valid for raw video input).
+
+
+input_format
+Set the preferred pixel format (for raw video) or a codec name.
+This option allows one to select the input format, when several are
+available.
+
+
+framerate
+Set the preferred video frame rate.
+
+
+list_formats
+List available formats (supported pixel formats, codecs, and frame
+sizes) and exit.
+
+Available values are:
+
+‘all ’
+Show all available (compressed and non-compressed) formats.
+
+
+‘raw ’
+Show only raw video (non-compressed) formats.
+
+
+‘compressed ’
+Show only compressed formats.
+
+
+
+
+list_standards
+List supported standards and exit.
+
+Available values are:
+
+‘all ’
+Show all supported standards.
+
+
+
+
+timestamps, ts
+Set type of timestamps for grabbed frames.
+
+Available values are:
+
+‘default ’
+Use timestamps from the kernel.
+
+
+‘abs ’
+Use absolute timestamps (wall clock).
+
+
+‘mono2abs ’
+Force conversion from monotonic to absolute timestamps.
+
+
+
+Default value is default
.
+
+
+
+
+
3.19 vfwcap# TOC
+
+
VfW (Video for Windows) capture input device.
+
+
The filename passed as input is the capture driver number, ranging from
+0 to 9. You may use "list" as filename to print a list of drivers. Any
+other filename will be interpreted as device number 0.
+
+
+
3.20 x11grab# TOC
+
+
X11 video input device.
+
+
Depends on X11, Xext, and Xfixes. Requires the configure option
+--enable-x11grab
.
+
+
This device allows one to capture a region of an X11 display.
+
+
The filename passed as input has the syntax:
+
+
[hostname ]:display_number .screen_number [+x_offset ,y_offset ]
+
+
+
hostname :display_number .screen_number specifies the
+X11 display name of the screen to grab from. hostname can be
+omitted, and defaults to "localhost". The environment variable
+DISPLAY
contains the default display name.
+
+
x_offset and y_offset specify the offsets of the grabbed
+area with respect to the top-left border of the X11 screen. They
+default to 0.
+
+
Check the X11 documentation (e.g. man X) for more detailed information.
+
+
Use the dpyinfo
program for getting basic information about the
+properties of your X11 display (e.g. grep for "name" or "dimensions").
+
+
For example to grab from :0.0 using ffmpeg
:
+
+
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+
Grab at position 10,20
:
+
+
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+
+
+
3.20.1 Options# TOC
+
+
+draw_mouse
+Specify whether to draw the mouse pointer. A value of 0
specify
+not to draw the pointer. Default value is 1
.
+
+
+follow_mouse
+Make the grabbed area follow the mouse. The argument can be
+centered
or a number of pixels PIXELS .
+
+When it is specified with "centered", the grabbing region follows the mouse
+pointer and keeps the pointer at the center of region; otherwise, the region
+follows only when the mouse pointer reaches within PIXELS (greater than
+zero) to the edge of region.
+
+For example:
+
+
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+To follow only when the mouse pointer reaches within 100 pixels to edge:
+
+
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+
+framerate
+Set the grabbing frame rate. Default value is ntsc
,
+corresponding to a frame rate of 30000/1001
.
+
+
+show_region
+Show grabbed region on screen.
+
+If show_region is specified with 1
, then the grabbing
+region will be indicated on screen. With this option, it is easy to
+know what is being grabbed if only a portion of the screen is grabbed.
+
+For example:
+
+
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+
+With follow_mouse :
+
+
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+
+video_size
+Set the video frame size. Default value is vga
.
+
+
+use_shm
+Use the MIT-SHM extension for shared memory. Default value is 1
.
+It may be necessary to disable it for remote displays.
+
+
+
+
+
3.21 decklink# TOC
+
+
The decklink input device provides capture capabilities for Blackmagic
+DeckLink devices.
+
+
To enable this input device, you need the Blackmagic DeckLink SDK and you
+need to configure with the appropriate --extra-cflags
+and --extra-ldflags
.
+On Windows, you need to run the IDL files through widl
.
+
+
DeckLink is very picky about the formats it supports. Pixel format is always
+uyvy422, framerate and video size must be determined for your device with
+-list_formats 1
. Audio sample rate is always 48 kHz and the number
+of channels currently is limited to 2 (stereo).
+
+
+
3.21.1 Options# TOC
+
+
+list_devices
+If set to true , print a list of devices and exit.
+Defaults to false .
+
+
+list_formats
+If set to true , print a list of supported formats and exit.
+Defaults to false .
+
+
+
+
+
+
3.21.2 Examples# TOC
+
+
+ List input devices:
+
+
ffmpeg -f decklink -list_devices 1 -i dummy
+
+
+ List supported formats:
+
+
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
+
+
+ Capture video clip at 1080i50 (format 11):
+
+
ffmpeg -f decklink -i 'Intensity Pro@11' -acodec copy -vcodec copy output.avi
+
+
+
+
+
+
+
4 Output Devices# TOC
+
+
Output devices are configured elements in FFmpeg that can write
+multimedia data to an output device attached to your system.
+
+
When you configure your FFmpeg build, all the supported output devices
+are enabled by default. You can list all available ones using the
+configure option "–list-outdevs".
+
+
You can disable all the output devices using the configure option
+"–disable-outdevs", and selectively enable an output device using the
+option "–enable-outdev=OUTDEV ", or you can disable a particular
+input device using the option "–disable-outdev=OUTDEV ".
+
+
The option "-devices" of the ff* tools will display the list of
+enabled output devices.
+
+
A description of the currently available output devices follows.
+
+
+
4.1 alsa# TOC
+
+
ALSA (Advanced Linux Sound Architecture) output device.
+
+
+
4.1.1 Examples# TOC
+
+
+ Play a file on default ALSA device:
+
+
ffmpeg -i INPUT -f alsa default
+
+
+ Play a file on soundcard 1, audio device 7:
+
+
ffmpeg -i INPUT -f alsa hw:1,7
+
+
+
+
+
4.2 caca# TOC
+
+
CACA output device.
+
+
This output device allows one to show a video stream in CACA window.
+Only one CACA window is allowed per application, so you can
+have only one instance of this output device in an application.
+
+
To enable this output device you need to configure FFmpeg with
+--enable-libcaca
.
+libcaca is a graphics library that outputs text instead of pixels.
+
+
For more information about libcaca, check:
+http://caca.zoy.org/wiki/libcaca
+
+
+
4.2.1 Options# TOC
+
+
+window_title
+Set the CACA window title, if not specified default to the filename
+specified for the output device.
+
+
+window_size
+Set the CACA window size, can be a string of the form
+width xheight or a video size abbreviation.
+If not specified it defaults to the size of the input video.
+
+
+driver
+Set display driver.
+
+
+algorithm
+Set dithering algorithm. Dithering is necessary
+because the picture being rendered has usually far more colours than
+the available palette.
+The accepted values are listed with -list_dither algorithms
.
+
+
+antialias
+Set antialias method. Antialiasing smoothens the rendered
+image and avoids the commonly seen staircase effect.
+The accepted values are listed with -list_dither antialiases
.
+
+
+charset
+Set which characters are going to be used when rendering text.
+The accepted values are listed with -list_dither charsets
.
+
+
+color
+Set color to be used when rendering text.
+The accepted values are listed with -list_dither colors
.
+
+
+list_drivers
+If set to true , print a list of available drivers and exit.
+
+
+list_dither
+List available dither options related to the argument.
+The argument must be one of algorithms
, antialiases
,
+charsets
, colors
.
+
+
+
+
+
4.2.2 Examples# TOC
+
+
+ The following command shows the ffmpeg
output is an
+CACA window, forcing its size to 80x25:
+
+
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt rgb24 -window_size 80x25 -f caca -
+
+
+ Show the list of available drivers and exit:
+
+
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_drivers true -
+
+
+ Show the list of available dither colors and exit:
+
+
ffmpeg -i INPUT -pix_fmt rgb24 -f caca -list_dither colors -
+
+
+
+
+
4.3 decklink# TOC
+
+
The decklink output device provides playback capabilities for Blackmagic
+DeckLink devices.
+
+
To enable this output device, you need the Blackmagic DeckLink SDK and you
+need to configure with the appropriate --extra-cflags
+and --extra-ldflags
.
+On Windows, you need to run the IDL files through widl
.
+
+
DeckLink is very picky about the formats it supports. Pixel format is always
+uyvy422, framerate and video size must be determined for your device with
+-list_formats 1
. Audio sample rate is always 48 kHz.
+
+
+
4.3.1 Options# TOC
+
+
+list_devices
+If set to true , print a list of devices and exit.
+Defaults to false .
+
+
+list_formats
+If set to true , print a list of supported formats and exit.
+Defaults to false .
+
+
+preroll
+Amount of time to preroll video in seconds.
+Defaults to 0.5 .
+
+
+
+
+
+
4.3.2 Examples# TOC
+
+
+ List output devices:
+
+
ffmpeg -i test.avi -f decklink -list_devices 1 dummy
+
+
+ List supported formats:
+
+
ffmpeg -i test.avi -f decklink -list_formats 1 'DeckLink Mini Monitor'
+
+
+ Play video clip:
+
+
ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 'DeckLink Mini Monitor'
+
+
+ Play video clip with non-standard framerate or video size:
+
+
ffmpeg -i test.avi -f decklink -pix_fmt uyvy422 -s 720x486 -r 24000/1001 'DeckLink Mini Monitor'
+
+
+
+
+
+
4.4 fbdev# TOC
+
+
Linux framebuffer output device.
+
+
The Linux framebuffer is a graphic hardware-independent abstraction
+layer to show graphics on a computer monitor, typically on the
+console. It is accessed through a file device node, usually
+/dev/fb0 .
+
+
For more detailed information read the file
+Documentation/fb/framebuffer.txt included in the Linux source tree.
+
+
+
4.4.1 Options# TOC
+
+xoffset
+yoffset
+Set x/y coordinate of top left corner. Default is 0.
+
+
+
+
+
4.4.2 Examples# TOC
+
Play a file on framebuffer device /dev/fb0 .
+Required pixel format depends on current framebuffer settings.
+
+
ffmpeg -re -i INPUT -vcodec rawvideo -pix_fmt bgra -f fbdev /dev/fb0
+
+
+
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
+
+
+
4.5 opengl# TOC
+
OpenGL output device.
+
+
To enable this output device you need to configure FFmpeg with --enable-opengl
.
+
+
This output device allows one to render to OpenGL context.
+Context may be provided by application or default SDL window is created.
+
+
When device renders to external context, application must implement handlers for following messages:
+AV_DEV_TO_APP_CREATE_WINDOW_BUFFER
- create OpenGL context on current thread.
+AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER
- make OpenGL context current.
+AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER
- swap buffers.
+AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER
- destroy OpenGL context.
+Application is also required to inform a device about current resolution by sending AV_APP_TO_DEV_WINDOW_SIZE
message.
+
+
+
4.5.1 Options# TOC
+
+background
+Set background color. Black is a default.
+
+no_window
+Disables default SDL window when set to non-zero value.
+Application must provide OpenGL context and both window_size_cb
and window_swap_buffers_cb
callbacks when set.
+
+window_title
+Set the SDL window title, if not specified default to the filename specified for the output device.
+Ignored when no_window is set.
+
+window_size
+Set preferred window size, can be a string of the form widthxheight or a video size abbreviation.
+If not specified it defaults to the size of the input video, downscaled according to the aspect ratio.
+Mostly usable when no_window is not set.
+
+
+
+
+
+
4.5.2 Examples# TOC
+
Play a file on SDL window using OpenGL rendering:
+
+
ffmpeg -i INPUT -f opengl "window title"
+
+
+
+
+
+
OSS (Open Sound System) output device.
+
+
+
4.7 pulse# TOC
+
+
PulseAudio output device.
+
+
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
+
+
More information about PulseAudio can be found on http://www.pulseaudio.org
+
+
+
4.7.1 Options# TOC
+
+server
+Connect to a specific PulseAudio server, specified by an IP address.
+Default server is used when not provided.
+
+
+name
+Specify the application name PulseAudio will use when showing active clients,
+by default it is the LIBAVFORMAT_IDENT
string.
+
+
+stream_name
+Specify the stream name PulseAudio will use when showing active streams,
+by default it is set to the specified output name.
+
+
+device
+Specify the device to use. Default device is used when not provided.
+List of output devices can be obtained with command pactl list sinks
.
+
+
+buffer_size
+buffer_duration
+Control the size and duration of the PulseAudio buffer. A small buffer
+gives more control, but requires more frequent updates.
+
+buffer_size specifies size in bytes while
+buffer_duration specifies duration in milliseconds.
+
+When both options are provided then the highest value is used
+(duration is recalculated to bytes using stream parameters). If they
+are set to 0 (which is default), the device will use the default
+PulseAudio duration value. By default PulseAudio set buffer duration
+to around 2 seconds.
+
+
+prebuf
+Specify pre-buffering size in bytes. The server does not start with
+playback before at least prebuf bytes are available in the
+buffer. By default this option is initialized to the same value as
+buffer_size or buffer_duration (whichever is bigger).
+
+
+minreq
+Specify minimum request size in bytes. The server does not request less
+than minreq bytes from the client, instead waits until the buffer
+is free enough to request more bytes at once. It is recommended to not set
+this option, which will initialize this to a value that is deemed sensible
+by the server.
+
+
+
+
+
+
4.7.2 Examples# TOC
+
Play a file on default device on default server:
+
+
ffmpeg -i INPUT -f pulse "stream name"
+
+
+
+
+
+
SDL (Simple DirectMedia Layer) output device.
+
+
This output device allows one to show a video stream in an SDL
+window. Only one SDL window is allowed per application, so you can
+have only one instance of this output device in an application.
+
+
To enable this output device you need libsdl installed on your system
+when configuring your build.
+
+
For more information about SDL, check:
+http://www.libsdl.org/
+
+
+
4.8.1 Options# TOC
+
+
+window_title
+Set the SDL window title, if not specified default to the filename
+specified for the output device.
+
+
+icon_title
+Set the name of the iconified SDL window, if not specified it is set
+to the same value of window_title .
+
+
+window_size
+Set the SDL window size, can be a string of the form
+width xheight or a video size abbreviation.
+If not specified it defaults to the size of the input video,
+downscaled according to the aspect ratio.
+
+
+window_fullscreen
+Set fullscreen mode when non-zero value is provided.
+Default value is zero.
+
+
+
+
+
4.8.2 Interactive commands# TOC
+
+
The window created by the device can be controlled through the
+following interactive commands.
+
+
+q, ESC
+Quit the device immediately.
+
+
+
+
+
4.8.3 Examples# TOC
+
+
The following command shows the ffmpeg
output is an
+SDL window, forcing its size to the qcif format:
+
+
ffmpeg -i INPUT -vcodec rawvideo -pix_fmt yuv420p -window_size qcif -f sdl "SDL output"
+
+
+
+
4.9 sndio# TOC
+
+
sndio audio output device.
+
+
+
+
+
XV (XVideo) output device.
+
+
This output device allows one to show a video stream in a X Window System
+window.
+
+
+
4.10.1 Options# TOC
+
+
+display_name
+Specify the hardware display name, which determines the display and
+communications domain to be used.
+
+The display name or DISPLAY environment variable can be a string in
+the format hostname [:number [.screen_number ]].
+
+hostname specifies the name of the host machine on which the
+display is physically attached. number specifies the number of
+the display server on that host machine. screen_number specifies
+the screen to be used on that server.
+
+If unspecified, it defaults to the value of the DISPLAY environment
+variable.
+
+For example, dual-headed:0.1
would specify screen 1 of display
+0 on the machine named “dual-headed”.
+
+Check the X11 specification for more detailed information about the
+display name format.
+
+
+window_id
+When set to non-zero value then device doesn’t create new window,
+but uses existing one with provided window_id . By default
+this options is set to zero and device creates its own window.
+
+
+window_size
+Set the created window size, can be a string of the form
+width xheight or a video size abbreviation. If not
+specified it defaults to the size of the input video.
+Ignored when window_id is set.
+
+
+window_x
+window_y
+Set the X and Y window offsets for the created window. They are both
+set to 0 by default. The values may be ignored by the window manager.
+Ignored when window_id is set.
+
+
+window_title
+Set the window title, if not specified default to the filename
+specified for the output device. Ignored when window_id is set.
+
+
+
+
For more information about XVideo see http://www.x.org/ .
+
+
+
4.10.2 Examples# TOC
+
+
+ Decode, display and encode video input with ffmpeg
at the
+same time:
+
+
ffmpeg -i INPUT OUTPUT -f xv display
+
+
+ Decode and display the input video to multiple X11 windows:
+
+
ffmpeg -i INPUT -f xv normal -vf negate -f xv negated
+
+
+
+
+
+
5 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+libavdevice
+
+
+
+
6 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-filters.html b/Externals/ffmpeg/dev/doc/ffmpeg-filters.html
new file mode 100644
index 0000000000..b0373dc55a
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/ffmpeg-filters.html
@@ -0,0 +1,13382 @@
+
+
+
+
+
+
+ FFmpeg Filters Documentation
+
+
+
+
+
+
+
+
+ FFmpeg Filters Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
This document describes filters, sources, and sinks provided by the
+libavfilter library.
+
+
+
+
2 Filtering Introduction# TOC
+
+
Filtering in FFmpeg is enabled through the libavfilter library.
+
+
In libavfilter, a filter can have multiple inputs and multiple
+outputs.
+To illustrate the sorts of things that are possible, we consider the
+following filtergraph.
+
+
+
[main]
+input --> split ---------------------> overlay --> output
+ | ^
+ |[tmp] [flip]|
+ +-----> crop --> vflip -------+
+
+
+
This filtergraph splits the input stream in two streams, then sends one
+stream through the crop filter and the vflip filter, before merging it
+back with the other stream by overlaying it on top. You can use the
+following command to achieve this:
+
+
+
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
+
+
+
The result will be that the top half of the video is mirrored
+onto the bottom half of the output video.
+
+
Filters in the same linear chain are separated by commas, and distinct
+linear chains of filters are separated by semicolons. In our example,
+crop,vflip are in one linear chain, split and
+overlay are separately in another. The points where the linear
+chains join are labelled by names enclosed in square brackets. In the
+example, the split filter generates two outputs that are associated to
+the labels [main] and [tmp] .
+
+
The stream sent to the second output of split , labelled as
+[tmp] , is processed through the crop filter, which crops
+away the lower half part of the video, and then vertically flipped. The
+overlay filter takes in input the first unchanged output of the
+split filter (which was labelled as [main] ), and overlay on its
+lower half the output generated by the crop,vflip filterchain.
+
+
Some filters take in input a list of parameters: they are specified
+after the filter name and an equal sign, and are separated from each other
+by a colon.
+
+
There exist so-called source filters that do not have an
+audio/video input, and sink filters that will not have audio/video
+output.
+
+
+
+
3 graph2dot# TOC
+
+
The graph2dot program included in the FFmpeg tools
+directory can be used to parse a filtergraph description and issue a
+corresponding textual representation in the dot language.
+
+
Invoke the command:
+
+
+
to see how to use graph2dot .
+
+
You can then pass the dot description to the dot program (from
+the graphviz suite of programs) and obtain a graphical representation
+of the filtergraph.
+
+
For example the sequence of commands:
+
+
echo GRAPH_DESCRIPTION | \
+tools/graph2dot -o graph.tmp && \
+dot -Tpng graph.tmp -o graph.png && \
+display graph.png
+
+
+
can be used to create and display an image representing the graph
+described by the GRAPH_DESCRIPTION string. Note that this string must be
+a complete self-contained graph, with its inputs and outputs explicitly defined.
+For example if your command line is of the form:
+
+
ffmpeg -i infile -vf scale=640:360 outfile
+
+
your GRAPH_DESCRIPTION string will need to be of the form:
+
+
nullsrc,scale=640:360,nullsink
+
+
you may also need to set the nullsrc parameters and add a format
+filter in order to simulate a specific input file.
+
+
+
+
4 Filtergraph description# TOC
+
+
A filtergraph is a directed graph of connected filters. It can contain
+cycles, and there can be multiple links between a pair of
+filters. Each link has one input pad on one side connecting it to one
+filter from which it takes its input, and one output pad on the other
+side connecting it to one filter accepting its output.
+
+
Each filter in a filtergraph is an instance of a filter class
+registered in the application, which defines the features and the
+number of input and output pads of the filter.
+
+
A filter with no input pads is called a "source", and a filter with no
+output pads is called a "sink".
+
+
+
4.1 Filtergraph syntax# TOC
+
+
A filtergraph has a textual representation, which is
+recognized by the -filter /-vf and -filter_complex
+options in ffmpeg
and -vf in ffplay
, and by the
+avfilter_graph_parse()
/avfilter_graph_parse2()
functions defined in
+libavfilter/avfilter.h .
+
+
A filterchain consists of a sequence of connected filters, each one
+connected to the previous one in the sequence. A filterchain is
+represented by a list of ","-separated filter descriptions.
+
+
A filtergraph consists of a sequence of filterchains. A sequence of
+filterchains is represented by a list of ";"-separated filterchain
+descriptions.
+
+
A filter is represented by a string of the form:
+[in_link_1 ]...[in_link_N ]filter_name =arguments [out_link_1 ]...[out_link_M ]
+
+
filter_name is the name of the filter class of which the
+described filter is an instance of, and has to be the name of one of
+the filter classes registered in the program.
+The name of the filter class is optionally followed by a string
+"=arguments ".
+
+
arguments is a string which contains the parameters used to
+initialize the filter instance. It may have one of two forms:
+
+ A ’:’-separated list of key=value pairs.
+
+ A ’:’-separated list of value . In this case, the keys are assumed to be
+the option names in the order they are declared. E.g. the fade
filter
+declares three options in this order – type , start_frame and
+nb_frames . Then the parameter list in:0:30 means that the value
+in is assigned to the option type , 0 to
+start_frame and 30 to nb_frames .
+
+ A ’:’-separated list of mixed direct value and long key=value
+pairs. The direct value must precede the key=value pairs, and
+follow the same constraints order of the previous point. The following
+key=value pairs can be set in any preferred order.
+
+
+
+
If the option value itself is a list of items (e.g. the format
filter
+takes a list of pixel formats), the items in the list are usually separated by
+’|’.
+
+
The list of arguments can be quoted using the character "’" as initial
+and ending mark, and the character ’\’ for escaping the characters
+within the quoted text; otherwise the argument string is considered
+terminated when the next special character (belonging to the set
+"[]=;,") is encountered.
+
+
The name and arguments of the filter are optionally preceded and
+followed by a list of link labels.
+A link label allows one to name a link and associate it to a filter output
+or input pad. The preceding labels in_link_1
+... in_link_N , are associated to the filter input pads,
+the following labels out_link_1 ... out_link_M , are
+associated to the output pads.
+
+
When two link labels with the same name are found in the
+filtergraph, a link between the corresponding input and output pad is
+created.
+
+
If an output pad is not labelled, it is linked by default to the first
+unlabelled input pad of the next filter in the filterchain.
+For example in the filterchain
+
+
nullsrc, split[L1], [L2]overlay, nullsink
+
+
the split filter instance has two output pads, and the overlay filter
+instance two input pads. The first output pad of split is labelled
+"L1", the first input pad of overlay is labelled "L2", and the second
+output pad of split is linked to the second input pad of overlay,
+which are both unlabelled.
+
+
In a complete filterchain all the unlabelled filter input and output
+pads must be connected. A filtergraph is considered valid if all the
+filter input and output pads of all the filterchains are connected.
+
+
Libavfilter will automatically insert scale filters where format
+conversion is required. It is possible to specify swscale flags
+for those automatically inserted scalers by prepending
+sws_flags=flags ;
+to the filtergraph description.
+
+
Here is a BNF description of the filtergraph syntax:
+
+
NAME ::= sequence of alphanumeric characters and '_'
+LINKLABEL ::= "[" NAME "]"
+LINKLABELS ::= LINKLABEL [LINKLABELS ]
+FILTER_ARGUMENTS ::= sequence of chars (possibly quoted)
+FILTER ::= [LINKLABELS ] NAME ["=" FILTER_ARGUMENTS ] [LINKLABELS ]
+FILTERCHAIN ::= FILTER [,FILTERCHAIN ]
+FILTERGRAPH ::= [sws_flags=flags ;] FILTERCHAIN [;FILTERGRAPH ]
+
+
+
+
4.2 Notes on filtergraph escaping# TOC
+
+
Filtergraph description composition entails several levels of
+escaping. See (ffmpeg-utils)the "Quoting and escaping"
+section in the ffmpeg-utils(1) manual for more
+information about the employed escaping procedure.
+
+
A first level escaping affects the content of each filter option
+value, which may contain the special character :
used to
+separate values, or one of the escaping characters \'
.
+
+
A second level escaping affects the whole filter description, which
+may contain the escaping characters \'
or the special
+characters [],;
used by the filtergraph description.
+
+
Finally, when you specify a filtergraph on a shell commandline, you
+need to perform a third level escaping for the shell special
+characters contained within it.
+
+
For example, consider the following string to be embedded in
+the drawtext filter description text value:
+
+
this is a 'string': may contain one, or more, special characters
+
+
+
This string contains the '
special escaping character, and the
+:
special character, so it needs to be escaped in this way:
+
+
text=this is a \'string\'\: may contain one, or more, special characters
+
+
+
A second level of escaping is required when embedding the filter
+description in a filtergraph description, in order to escape all the
+filtergraph special characters. Thus the example above becomes:
+
+
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
+
+
(note that in addition to the \'
escaping special characters,
+also ,
needs to be escaped).
+
+
Finally an additional level of escaping is needed when writing the
+filtergraph description in a shell command, which depends on the
+escaping rules of the adopted shell. For example, assuming that
+\
is special and needs to be escaped with another \
, the
+previous string will finally result in:
+
+
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
+
+
+
+
5 Timeline editing# TOC
+
+
Some filters support a generic enable option. For the filters
+supporting timeline editing, this option can be set to an expression which is
+evaluated before sending a frame to the filter. If the evaluation is non-zero,
+the filter will be enabled, otherwise the frame will be sent unchanged to the
+next filter in the filtergraph.
+
+
The expression accepts the following values:
+
+‘t ’
+timestamp expressed in seconds, NAN if the input timestamp is unknown
+
+
+‘n ’
+sequential number of the input frame, starting from 0
+
+
+‘pos ’
+the position in the file of the input frame, NAN if unknown
+
+
+‘w ’
+‘h ’
+width and height of the input frame if video
+
+
+
+
Additionally, these filters support an enable command that can be used
+to re-define the expression.
+
+
Like any other filtering option, the enable option follows the same
+rules.
+
+
For example, to enable a blur filter (smartblur ) from 10 seconds to 3
+minutes, and a curves filter starting at 3 seconds:
+
+
smartblur = enable='between(t,10,3*60)',
+curves = enable='gte(t,3)' : preset=cross_process
+
+
+
+
+
6 Audio Filters# TOC
+
+
When you configure your FFmpeg build, you can disable any of the
+existing filters using --disable-filters
.
+The configure output will show the audio filters included in your
+build.
+
+
Below is a description of the currently available audio filters.
+
+
+
6.1 adelay# TOC
+
+
Delay one or more audio channels.
+
+
Samples in delayed channel are filled with silence.
+
+
The filter accepts the following option:
+
+
+delays
+Set list of delays in milliseconds for each channel separated by ’|’.
+At least one delay greater than 0 should be provided.
+Unused delays will be silently ignored. If number of given delays is
+smaller than number of channels all remaining channels will not be delayed.
+
+
+
+
+
6.1.1 Examples# TOC
+
+
+ Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave
+the second channel (and any other channels that may be present) unchanged.
+
+
+
+
+
6.2 aecho# TOC
+
+
Apply echoing to the input audio.
+
+
Echoes are reflected sound and can occur naturally amongst mountains
+(and sometimes large buildings) when talking or shouting; digital echo
+effects emulate this behaviour and are often used to help fill out the
+sound of a single instrument or vocal. The time difference between the
+original signal and the reflection is the delay
, and the
+loudness of the reflected signal is the decay
.
+Multiple echoes can have different delays and decays.
+
+
A description of the accepted parameters follows.
+
+
+in_gain
+Set input gain of reflected signal. Default is 0.6
.
+
+
+out_gain
+Set output gain of reflected signal. Default is 0.3
.
+
+
+delays
+Set list of time intervals in milliseconds between original signal and reflections
+separated by ’|’. Allowed range for each delay
is (0 - 90000.0]
.
+Default is 1000
.
+
+
+decays
+Set list of loudnesses of reflected signals separated by ’|’.
+Allowed range for each decay
is (0 - 1.0]
.
+Default is 0.5
.
+
+
+
+
+
6.2.1 Examples# TOC
+
+
+ Make it sound as if there are twice as many instruments as are actually playing:
+
+
+ If delay is very short, then it sound like a (metallic) robot playing music:
+
+
+ A longer delay will sound like an open air concert in the mountains:
+
+
aecho=0.8:0.9:1000:0.3
+
+
+ Same as above but with one more mountain:
+
+
aecho=0.8:0.9:1000|1800:0.3|0.25
+
+
+
+
+
6.3 aeval# TOC
+
+
Modify an audio signal according to the specified expressions.
+
+
This filter accepts one or more expressions (one for each channel),
+which are evaluated and used to modify a corresponding audio signal.
+
+
It accepts the following parameters:
+
+
+exprs
+Set the ’|’-separated expressions list for each separate channel. If
+the number of input channels is greater than the number of
+expressions, the last specified expression is used for the remaining
+output channels.
+
+
+channel_layout, c
+Set output channel layout. If not specified, the channel layout is
+specified by the number of expressions. If set to ‘same ’, it will
+use by default the same input channel layout.
+
+
+
+
Each expression in exprs can contain the following constants and functions:
+
+
+ch
+channel number of the current expression
+
+
+n
+number of the evaluated sample, starting from 0
+
+
+s
+sample rate
+
+
+t
+time of the evaluated sample expressed in seconds
+
+
+nb_in_channels
+nb_out_channels
+input and output number of channels
+
+
+val(CH)
+the value of input channel with number CH
+
+
+
+
Note: this filter is slow. For faster processing you should use a
+dedicated filter.
+
+
+
6.3.1 Examples# TOC
+
+
+ Half volume:
+
+
aeval=val(ch)/2:c=same
+
+
+ Invert phase of the second channel:
+
+
+
+
+
6.4 afade# TOC
+
+
Apply fade-in/out effect to input audio.
+
+
A description of the accepted parameters follows.
+
+
+type, t
+Specify the effect type, can be either in
for fade-in, or
+out
for a fade-out effect. Default is in
.
+
+
+start_sample, ss
+Specify the number of the start sample for starting to apply the fade
+effect. Default is 0.
+
+
+nb_samples, ns
+Specify the number of samples for which the fade effect has to last. At
+the end of the fade-in effect the output audio will have the same
+volume as the input audio, at the end of the fade-out transition
+the output audio will be silence. Default is 44100.
+
+
+start_time, st
+Specify the start time of the fade effect. Default is 0.
+The value must be specified as a time duration; see
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+If set this option is used instead of start_sample .
+
+
+duration, d
+Specify the duration of the fade effect. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+At the end of the fade-in effect the output audio will have the same
+volume as the input audio, at the end of the fade-out transition
+the output audio will be silence.
+By default the duration is determined by nb_samples .
+If set this option is used instead of nb_samples .
+
+
+curve
+Set curve for fade transition.
+
+It accepts the following values:
+
+tri
+select triangular, linear slope (default)
+
+qsin
+select quarter of sine wave
+
+hsin
+select half of sine wave
+
+esin
+select exponential sine wave
+
+log
+select logarithmic
+
+par
+select inverted parabola
+
+qua
+select quadratic
+
+cub
+select cubic
+
+squ
+select square root
+
+cbr
+select cubic root
+
+
+
+
+
+
+
6.4.1 Examples# TOC
+
+
+ Fade in first 15 seconds of audio:
+
+
+ Fade out last 25 seconds of a 900 seconds audio:
+
+
afade=t=out:st=875:d=25
+
+
+
+
+
6.5 aformat# TOC
+
+
Set output format constraints for the input audio. The framework will
+negotiate the most appropriate format to minimize conversions.
+
+
It accepts the following parameters:
+
+sample_fmts
+A ’|’-separated list of requested sample formats.
+
+
+sample_rates
+A ’|’-separated list of requested sample rates.
+
+
+channel_layouts
+A ’|’-separated list of requested channel layouts.
+
+See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
+for the required syntax.
+
+
+
+
If a parameter is omitted, all values are allowed.
+
+
Force the output to either unsigned 8-bit or signed 16-bit stereo
+
+
aformat=sample_fmts=u8|s16:channel_layouts=stereo
+
+
+
+
6.6 allpass# TOC
+
+
Apply a two-pole all-pass filter with central frequency (in Hz)
+frequency , and filter-width width .
+An all-pass filter changes the audio’s frequency to phase relationship
+without changing its frequency to amplitude relationship.
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set frequency in Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+
+
+
6.7 amerge# TOC
+
+
Merge two or more audio streams into a single multi-channel stream.
+
+
The filter accepts the following options:
+
+
+inputs
+Set the number of inputs. Default is 2.
+
+
+
+
+
If the channel layouts of the inputs are disjoint, and therefore compatible,
+the channel layout of the output will be set accordingly and the channels
+will be reordered as necessary. If the channel layouts of the inputs are not
+disjoint, the output will have all the channels of the first input then all
+the channels of the second input, in that order, and the channel layout of
+the output will be the default value corresponding to the total number of
+channels.
+
+
For example, if the first input is in 2.1 (FL+FR+LF) and the second input
+is FC+BL+BR, then the output will be in 5.1, with the channels in the
+following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the
+first input, b1 is the first channel of the second input).
+
+
On the other hand, if both input are in stereo, the output channels will be
+in the default order: a1, a2, b1, b2, and the channel layout will be
+arbitrarily set to 4.0, which may or may not be the expected value.
+
+
All inputs must have the same sample rate, and format.
+
+
If inputs do not have the same duration, the output will stop with the
+shortest.
+
+
+
6.7.1 Examples# TOC
+
+
+ Merge two mono files into a stereo stream:
+
+
amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
+
+
+ Multiple merges assuming 1 video stream and 6 audio streams in input.mkv :
+
+
ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
+
+
+
+
+
6.8 amix# TOC
+
+
Mixes multiple audio inputs into a single output.
+
+
Note that this filter only supports float samples (the amerge
+and pan audio filters support many formats). If the amix
+input has integer samples then aresample will be automatically
+inserted to perform the conversion to float samples.
+
+
For example
+
+
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
+
+
will mix 3 input audio streams to a single output with the same duration as the
+first input and a dropout transition time of 3 seconds.
+
+
It accepts the following parameters:
+
+inputs
+The number of inputs. If unspecified, it defaults to 2.
+
+
+duration
+How to determine the end-of-stream.
+
+longest
+The duration of the longest input. (default)
+
+
+shortest
+The duration of the shortest input.
+
+
+first
+The duration of the first input.
+
+
+
+
+
+dropout_transition
+The transition time, in seconds, for volume renormalization when an input
+stream ends. The default value is 2 seconds.
+
+
+
+
+
+
6.9 anull# TOC
+
+
Pass the audio source unchanged to the output.
+
+
+
6.10 apad# TOC
+
+
Pad the end of an audio stream with silence.
+
+
This can be used together with ffmpeg
-shortest to
+extend audio streams to the same length as the video stream.
+
+
A description of the accepted options follows.
+
+
+packet_size
+Set silence packet size. Default value is 4096.
+
+
+pad_len
+Set the number of samples of silence to add to the end. After the
+value is reached, the stream is terminated. This option is mutually
+exclusive with whole_len .
+
+
+whole_len
+Set the minimum total number of samples in the output audio stream. If
+the value is longer than the input audio length, silence is added to
+the end, until the value is reached. This option is mutually exclusive
+with pad_len .
+
+
+
+
If neither the pad_len nor the whole_len option is
+set, the filter will add silence to the end of the input stream
+indefinitely.
+
+
+
6.10.1 Examples# TOC
+
+
+ Add 1024 samples of silence to the end of the input:
+
+
+ Make sure the audio output will contain at least 10000 samples, pad
+the input with silence if required:
+
+
+ Use ffmpeg
to pad the audio input with silence, so that the
+video stream will always result the shortest and will be converted
+until the end in the output file when using the shortest
+option:
+
+
ffmpeg -i VIDEO -i AUDIO -filter_complex "[1:0]apad" -shortest OUTPUT
+
+
+
+
+
6.11 aphaser# TOC
+
Add a phasing effect to the input audio.
+
+
A phaser filter creates series of peaks and troughs in the frequency spectrum.
+The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect.
+
+
A description of the accepted parameters follows.
+
+
+in_gain
+Set input gain. Default is 0.4.
+
+
+out_gain
+Set output gain. Default is 0.74
+
+
+delay
+Set delay in milliseconds. Default is 3.0.
+
+
+decay
+Set decay. Default is 0.4.
+
+
+speed
+Set modulation speed in Hz. Default is 0.5.
+
+
+type
+Set modulation type. Default is triangular.
+
+It accepts the following values:
+
+‘triangular, t ’
+‘sinusoidal, s ’
+
+
+
+
+
+
6.12 aresample# TOC
+
+
Resample the input audio to the specified parameters, using the
+libswresample library. If none are specified then the filter will
+automatically convert between its input and output.
+
+
This filter is also able to stretch/squeeze the audio data to make it match
+the timestamps or to inject silence / cut out audio to make it match the
+timestamps, do a combination of both or do neither.
+
+
The filter accepts the syntax
+[sample_rate :]resampler_options , where sample_rate
+expresses a sample rate and resampler_options is a list of
+key =value pairs, separated by ":". See the
+ffmpeg-resampler manual for the complete list of supported options.
+
+
+
6.12.1 Examples# TOC
+
+
+ Resample the input audio to 44100Hz:
+
+
+ Stretch/squeeze samples to the given timestamps, with a maximum of 1000
+samples per second compensation:
+
+
+
+
+
6.13 asetnsamples# TOC
+
+
Set the number of samples per each output audio frame.
+
+
The last output packet may contain a different number of samples, as
+the filter will flush all the remaining samples when the input audio
+signal its end.
+
+
The filter accepts the following options:
+
+
+nb_out_samples, n
+Set the number of frames per each output audio frame. The number is
+intended as the number of samples per each channel .
+Default value is 1024.
+
+
+pad, p
+If set to 1, the filter will pad the last audio frame with zeroes, so
+that the last frame will contain the same number of samples as the
+previous ones. Default value is 1.
+
+
+
+
For example, to set the number of per-frame samples to 1234 and
+disable padding for the last frame, use:
+
+
asetnsamples=n=1234:p=0
+
+
+
+
6.14 asetrate# TOC
+
+
Set the sample rate without altering the PCM data.
+This will result in a change of speed and pitch.
+
+
The filter accepts the following options:
+
+
+sample_rate, r
+Set the output sample rate. Default is 44100 Hz.
+
+
+
+
+
6.15 ashowinfo# TOC
+
+
Show a line containing various information for each input audio frame.
+The input audio is not modified.
+
+
The shown line contains a sequence of key/value pairs of the form
+key :value .
+
+
The following values are shown in the output:
+
+
+n
+The (sequential) number of the input frame, starting from 0.
+
+
+pts
+The presentation timestamp of the input frame, in time base units; the time base
+depends on the filter input pad, and is usually 1/sample_rate .
+
+
+pts_time
+The presentation timestamp of the input frame in seconds.
+
+
+pos
+position of the frame in the input stream, -1 if this information in
+unavailable and/or meaningless (for example in case of synthetic audio)
+
+
+fmt
+The sample format.
+
+
+chlayout
+The channel layout.
+
+
+rate
+The sample rate for the audio frame.
+
+
+nb_samples
+The number of samples (per channel) in the frame.
+
+
+checksum
+The Adler-32 checksum (printed in hexadecimal) of the audio data. For planar
+audio, the data is treated as if all the planes were concatenated.
+
+
+plane_checksums
+A list of Adler-32 checksums for each data plane.
+
+
+
+
+
6.16 astats# TOC
+
+
Display time domain statistical information about the audio channels.
+Statistics are calculated and displayed for each audio channel and,
+where applicable, an overall figure is also given.
+
+
It accepts the following option:
+
+length
+Short window length in seconds, used for peak and trough RMS measurement.
+Default is 0.05
(50 milliseconds). Allowed range is [0.1 - 10]
.
+
+
+
+
A description of each shown parameter follows:
+
+
+DC offset
+Mean amplitude displacement from zero.
+
+
+Min level
+Minimal sample level.
+
+
+Max level
+Maximal sample level.
+
+
+Peak level dB
+RMS level dB
+Standard peak and RMS level measured in dBFS.
+
+
+RMS peak dB
+RMS trough dB
+Peak and trough values for RMS level measured over a short window.
+
+
+Crest factor
+Standard ratio of peak to RMS level (note: not in dB).
+
+
+Flat factor
+Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels
+(i.e. either Min level or Max level ).
+
+
+Peak count
+Number of occasions (not the number of samples) that the signal attained either
+Min level or Max level .
+
+
+
+
+
6.17 astreamsync# TOC
+
+
Forward two audio streams and control the order the buffers are forwarded.
+
+
The filter accepts the following options:
+
+
+expr, e
+Set the expression deciding which stream should be
+forwarded next: if the result is negative, the first stream is forwarded; if
+the result is positive or zero, the second stream is forwarded. It can use
+the following variables:
+
+
+b1 b2
+number of buffers forwarded so far on each stream
+
+s1 s2
+number of samples forwarded so far on each stream
+
+t1 t2
+current timestamp of each stream
+
+
+
+The default value is t1-t2
, which means to always forward the stream
+that has a smaller timestamp.
+
+
+
+
+
6.17.1 Examples# TOC
+
+
Stress-test amerge
by randomly sending buffers on the wrong
+input, while avoiding too much of a desynchronization:
+
+
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
+[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
+[a2] [b2] amerge
+
+
+
+
6.18 asyncts# TOC
+
+
Synchronize audio data with timestamps by squeezing/stretching it and/or
+dropping samples/adding silence when needed.
+
+
This filter is not built by default, please use aresample to do squeezing/stretching.
+
+
It accepts the following parameters:
+
+compensate
+Enable stretching/squeezing the data to make it match the timestamps. Disabled
+by default. When disabled, time gaps are covered with silence.
+
+
+min_delta
+The minimum difference between timestamps and audio data (in seconds) to trigger
+adding/dropping samples. The default value is 0.1. If you get an imperfect
+sync with this filter, try setting this parameter to 0.
+
+
+max_comp
+The maximum compensation in samples per second. Only relevant with compensate=1.
+The default value is 500.
+
+
+first_pts
+Assume that the first PTS should be this value. The time base is 1 / sample
+rate. This allows for padding/trimming at the start of the stream. By default,
+no assumption is made about the first frame’s expected PTS, so no padding or
+trimming is done. For example, this could be set to 0 to pad the beginning with
+silence if an audio stream starts after the video stream or to trim any samples
+with a negative PTS due to encoder delay.
+
+
+
+
+
+
6.19 atempo# TOC
+
+
Adjust audio tempo.
+
+
The filter accepts exactly one parameter, the audio tempo. If not
+specified then the filter will assume nominal 1.0 tempo. Tempo must
+be in the [0.5, 2.0] range.
+
+
+
6.19.1 Examples# TOC
+
+
+ Slow down audio to 80% tempo:
+
+
+ To speed up audio to 125% tempo:
+
+
+
+
+
6.20 atrim# TOC
+
+
Trim the input so that the output contains one continuous subpart of the input.
+
+
It accepts the following parameters:
+
+start
+Timestamp (in seconds) of the start of the section to keep. I.e. the audio
+sample with the timestamp start will be the first sample in the output.
+
+
+end
+Specify time of the first audio sample that will be dropped, i.e. the
+audio sample immediately preceding the one with the timestamp end will be
+the last sample in the output.
+
+
+start_pts
+Same as start , except this option sets the start timestamp in samples
+instead of seconds.
+
+
+end_pts
+Same as end , except this option sets the end timestamp in samples instead
+of seconds.
+
+
+duration
+The maximum duration of the output in seconds.
+
+
+start_sample
+The number of the first sample that should be output.
+
+
+end_sample
+The number of the first sample that should be dropped.
+
+
+
+
start , end , and duration are expressed as time
+duration specifications; see
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
+
+
Note that the first two sets of the start/end options and the duration
+option look at the frame timestamp, while the _sample options simply count the
+samples that pass through the filter. So start/end_pts and start/end_sample will
+give different results when the timestamps are wrong, inexact or do not start at
+zero. Also note that this filter does not modify the timestamps. If you wish
+to have the output timestamps start at zero, insert the asetpts filter after the
+atrim filter.
+
+
If multiple start or end options are set, this filter tries to be greedy and
+keep all samples that match at least one of the specified constraints. To keep
+only the part that matches all the constraints at once, chain multiple atrim
+filters.
+
+
The defaults are such that all the input is kept. So it is possible to set e.g.
+just the end values to keep everything before the specified time.
+
+
Examples:
+
+ Drop everything except the second minute of input:
+
+
ffmpeg -i INPUT -af atrim=60:120
+
+
+ Keep only the first 1000 samples:
+
+
ffmpeg -i INPUT -af atrim=end_sample=1000
+
+
+
+
+
+
6.21 bandpass# TOC
+
+
Apply a two-pole Butterworth band-pass filter with central
+frequency frequency , and (3dB-point) band-width width.
+The csg option selects a constant skirt gain (peak gain = Q)
+instead of the default: constant 0dB peak gain.
+The filter roll off at 6dB per octave (20dB per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the filter’s central frequency. Default is 3000
.
+
+
+csg
+Constant skirt gain if set to 1. Defaults to 0.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+
+
+
6.22 bandreject# TOC
+
+
Apply a two-pole Butterworth band-reject filter with central
+frequency frequency , and (3dB-point) band-width width .
+The filter roll off at 6dB per octave (20dB per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the filter’s central frequency. Default is 3000
.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+
+
+
6.23 bass# TOC
+
+
Boost or cut the bass (lower) frequencies of the audio using a two-pole
+shelving filter with a response similar to that of a standard
+hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
+
+
The filter accepts the following options:
+
+
+gain, g
+Give the gain at 0 Hz. Its useful range is about -20
+(for a large cut) to +20 (for a large boost).
+Beware of clipping when using a positive gain.
+
+
+frequency, f
+Set the filter’s central frequency and so can be used
+to extend or reduce the frequency range to be boosted or cut.
+The default value is 100
Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Determine how steep is the filter’s shelf transition.
+
+
+
+
+
6.24 biquad# TOC
+
+
Apply a biquad IIR filter with the given coefficients.
+Where b0 , b1 , b2 and a0 , a1 , a2
+are the numerator and denominator coefficients respectively.
+
+
+
6.25 bs2b# TOC
+
Bauer stereo to binaural transformation, which improves headphone listening of
+stereo audio records.
+
+
It accepts the following parameters:
+
+profile
+Pre-defined crossfeed level.
+
+default
+Default level (fcut=700, feed=50).
+
+
+cmoy
+Chu Moy circuit (fcut=700, feed=60).
+
+
+jmeier
+Jan Meier circuit (fcut=650, feed=95).
+
+
+
+
+
+fcut
+Cut frequency (in Hz).
+
+
+feed
+Feed level (in Hz).
+
+
+
+
+
+
6.26 channelmap# TOC
+
+
Remap input channels to new locations.
+
+
It accepts the following parameters:
+
+channel_layout
+The channel layout of the output stream.
+
+
+map
+Map channels from input to output. The argument is a ’|’-separated list of
+mappings, each in the in_channel -out_channel
or
+in_channel form. in_channel can be either the name of the input
+channel (e.g. FL for front left) or its index in the input channel layout.
+out_channel is the name of the output channel or its index in the output
+channel layout. If out_channel is not given then it is implicitly an
+index, starting with zero and increasing by one for each mapping.
+
+
+
+
If no mapping is present, the filter will implicitly map input channels to
+output channels, preserving indices.
+
+
For example, assuming a 5.1+downmix input MOV file,
+
+
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
+
+
will create an output WAV file tagged as stereo from the downmix channels of
+the input.
+
+
To fix a 5.1 WAV improperly encoded in AAC’s native channel order
+
+
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
+
+
+
+
6.27 channelsplit# TOC
+
+
Split each channel from an input audio stream into a separate output stream.
+
+
It accepts the following parameters:
+
+channel_layout
+The channel layout of the input stream. The default is "stereo".
+
+
+
+
For example, assuming a stereo input MP3 file,
+
+
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
+
+
will create an output Matroska file with two audio streams, one containing only
+the left channel and the other the right channel.
+
+
Split a 5.1 WAV file into per-channel files:
+
+
ffmpeg -i in.wav -filter_complex
+'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
+-map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
+front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
+side_right.wav
+
+
+
+
6.28 compand# TOC
+
Compress or expand the audio’s dynamic range.
+
+
It accepts the following parameters:
+
+
+attacks
+decays
+A list of times in seconds for each channel over which the instantaneous level
+of the input signal is averaged to determine its volume. attacks refers to
+increase of volume and decays refers to decrease of volume. For most
+situations, the attack time (response to the audio getting louder) should be
+shorter than the decay time, because the human ear is more sensitive to sudden
+loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and
+a typical value for decay is 0.8 seconds.
+
+
+points
+A list of points for the transfer function, specified in dB relative to the
+maximum possible signal amplitude. Each key points list must be defined using
+the following syntax: x0/y0|x1/y1|x2/y2|....
or
+x0/y0 x1/y1 x2/y2 ....
+
+The input values must be in strictly increasing order but the transfer function
+does not have to be monotonically rising. The point 0/0
is assumed but
+may be overridden (by 0/out-dBn
). Typical values for the transfer
+function are -70/-70|-60/-20
.
+
+
+soft-knee
+Set the curve radius in dB for all joints. It defaults to 0.01.
+
+
+gain
+Set the additional gain in dB to be applied at all points on the transfer
+function. This allows for easy adjustment of the overall gain.
+It defaults to 0.
+
+
+volume
+Set an initial volume, in dB, to be assumed for each channel when filtering
+starts. This permits the user to supply a nominal level initially, so that, for
+example, a very large gain is not applied to initial signal levels before the
+companding has begun to operate. A typical value for audio which is initially
+quiet is -90 dB. It defaults to 0.
+
+
+delay
+Set a delay, in seconds. The input audio is analyzed immediately, but audio is
+delayed before being fed to the volume adjuster. Specifying a delay
+approximately equal to the attack/decay times allows the filter to effectively
+operate in predictive rather than reactive mode. It defaults to 0.
+
+
+
+
+
+
6.28.1 Examples# TOC
+
+
+ Make music with both quiet and loud passages suitable for listening to in a
+noisy environment:
+
+
compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
+
+
+ A noise gate for when the noise is at a lower level than the signal:
+
+
compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
+
+
+ Here is another noise gate, this time for when the noise is at a higher level
+than the signal (making it, in some ways, similar to squelch):
+
+
compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
+
+
+
+
+
6.29 earwax# TOC
+
+
Make audio easier to listen to on headphones.
+
+
This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio
+so that when listened to on headphones the stereo image is moved from
+inside your head (standard for headphones) to outside and in front of
+the listener (standard for speakers).
+
+
Ported from SoX.
+
+
+
6.30 equalizer# TOC
+
+
Apply a two-pole peaking equalisation (EQ) filter. With this
+filter, the signal-level at and around a selected frequency can
+be increased or decreased, whilst (unlike bandpass and bandreject
+filters) that at all other frequencies is unchanged.
+
+
In order to produce complex equalisation curves, this filter can
+be given several times, each with a different central frequency.
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the filter’s central frequency in Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+gain, g
+Set the required gain or attenuation in dB.
+Beware of clipping when using a positive gain.
+
+
+
+
+
6.30.1 Examples# TOC
+
+ Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz:
+
+
equalizer=f=1000:width_type=h:width=200:g=-10
+
+
+ Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2:
+
+
equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
+
+
+
+
+
6.31 flanger# TOC
+
Apply a flanging effect to the audio.
+
+
The filter accepts the following options:
+
+
+delay
+Set base delay in milliseconds. Range from 0 to 30. Default value is 0.
+
+
+depth
+Set added swep delay in milliseconds. Range from 0 to 10. Default value is 2.
+
+
+regen
+Set percentage regeneration (delayed signal feedback). Range from -95 to 95.
+Default value is 0.
+
+
+width
+Set percentage of delayed signal mixed with original. Range from 0 to 100.
+Default value is 71.
+
+
+speed
+Set sweeps per second (Hz). Range from 0.1 to 10. Default value is 0.5.
+
+
+shape
+Set swept wave shape, can be triangular or sinusoidal .
+Default value is sinusoidal .
+
+
+phase
+Set swept wave percentage-shift for multi channel. Range from 0 to 100.
+Default value is 25.
+
+
+interp
+Set delay-line interpolation, linear or quadratic .
+Default is linear .
+
+
+
+
+
6.32 highpass# TOC
+
+
Apply a high-pass filter with 3dB point frequency.
+The filter can be either single-pole, or double-pole (the default).
+The filter roll off at 6dB per pole per octave (20dB per pole per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set frequency in Hz. Default is 3000.
+
+
+poles, p
+Set number of poles. Default is 2.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+Applies only to double-pole filter.
+The default is 0.707q and gives a Butterworth response.
+
+
+
+
+
6.33 join# TOC
+
+
Join multiple input streams into one multi-channel stream.
+
+
It accepts the following parameters:
+
+inputs
+The number of input streams. It defaults to 2.
+
+
+channel_layout
+The desired output channel layout. It defaults to stereo.
+
+
+map
+Map channels from inputs to output. The argument is a ’|’-separated list of
+mappings, each in the input_idx .in_channel -out_channel
+form. input_idx is the 0-based index of the input stream. in_channel
+can be either the name of the input channel (e.g. FL for front left) or its
+index in the specified input stream. out_channel is the name of the output
+channel.
+
+
+
+
The filter will attempt to guess the mappings when they are not specified
+explicitly. It does so by first trying to find an unused matching input channel
+and if that fails it picks the first unused input channel.
+
+
Join 3 inputs (with properly set channel layouts):
+
+
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
+
+
+
Build a 5.1 output from 6 single-channel streams:
+
+
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
+'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
+out
+
+
+
+
6.34 ladspa# TOC
+
+
Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-ladspa
.
+
+
+file, f
+Specifies the name of LADSPA plugin library to load. If the environment
+variable LADSPA_PATH
is defined, the LADSPA plugin is searched in
+each one of the directories specified by the colon separated list in
+LADSPA_PATH
, otherwise in the standard LADSPA paths, which are in
+this order: HOME/.ladspa/lib/ , /usr/local/lib/ladspa/ ,
+/usr/lib/ladspa/ .
+
+
+plugin, p
+Specifies the plugin within the library. Some libraries contain only
+one plugin, but others contain many of them. If this is not set filter
+will list all available plugins within the specified library.
+
+
+controls, c
+Set the ’|’ separated list of controls which are zero or more floating point
+values that determine the behavior of the loaded plugin (for example delay,
+threshold or gain).
+Controls need to be defined using the following syntax:
+c0=value0 |c1=value1 |c2=value2 |..., where
+valuei is the value set on the i -th control.
+If controls is set to help
, all available controls and
+their valid ranges are printed.
+
+
+sample_rate, s
+Specify the sample rate, default to 44100. Only used if plugin have
+zero inputs.
+
+
+nb_samples, n
+Set the number of samples per channel per each output frame, default
+is 1024. Only used if plugin have zero inputs.
+
+
+duration, d
+Set the minimum duration of the sourced audio. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+Note that the resulting duration may be greater than the specified duration,
+as the generated audio is always cut at the end of a complete frame.
+If not specified, or the expressed duration is negative, the audio is
+supposed to be generated forever.
+Only used if plugin have zero inputs.
+
+
+
+
+
+
6.34.1 Examples# TOC
+
+
+ List all available plugins within amp (LADSPA example plugin) library:
+
+
+ List all available controls and their valid ranges for vcf_notch
+plugin from VCF
library:
+
+
ladspa=f=vcf:p=vcf_notch:c=help
+
+
+ Simulate low quality audio equipment using Computer Music Toolkit
(CMT)
+plugin library:
+
+
ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
+
+
+ Add reverberation to the audio using TAP-plugins
+(Tom’s Audio Processing plugins):
+
+
ladspa=file=tap_reverb:tap_reverb
+
+
+ Generate white noise, with 0.2 amplitude:
+
+
ladspa=file=cmt:noise_source_white:c=c0=.2
+
+
+ Generate 20 bpm clicks using plugin C* Click - Metronome
from the
+C* Audio Plugin Suite
(CAPS) library:
+
+
ladspa=file=caps:Click:c=c1=20'
+
+
+ Apply C* Eq10X2 - Stereo 10-band equaliser
effect:
+
+
ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
+
+
+
+
+
6.34.2 Commands# TOC
+
+
This filter supports the following commands:
+
+cN
+Modify the N -th control value.
+
+If the specified value is not valid, it is ignored and prior one is kept.
+
+
+
+
+
6.35 lowpass# TOC
+
+
Apply a low-pass filter with 3dB point frequency.
+The filter can be either single-pole or double-pole (the default).
+The filter roll off at 6dB per pole per octave (20dB per pole per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set frequency in Hz. Default is 500.
+
+
+poles, p
+Set number of poles. Default is 2.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+Applies only to double-pole filter.
+The default is 0.707q and gives a Butterworth response.
+
+
+
+
+
6.36 pan# TOC
+
+
Mix channels with specific gain levels. The filter accepts the output
+channel layout followed by a set of channels definitions.
+
+
This filter is also designed to efficiently remap the channels of an audio
+stream.
+
+
The filter accepts parameters of the form:
+"l |outdef |outdef |..."
+
+
+l
+output channel layout or number of channels
+
+
+outdef
+output channel specification, of the form:
+"out_name =[gain *]in_name [+[gain *]in_name ...]"
+
+
+out_name
+output channel to define, either a channel name (FL, FR, etc.) or a channel
+number (c0, c1, etc.)
+
+
+gain
+multiplicative coefficient for the channel, 1 leaving the volume unchanged
+
+
+in_name
+input channel to use, see out_name for details; it is not possible to mix
+named and numbered input channels
+
+
+
+
If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for
+that specification will be renormalized so that the total is 1, thus
+avoiding clipping noise.
+
+
+
6.36.1 Mixing examples# TOC
+
+
For example, if you want to down-mix from stereo to mono, but with a bigger
+factor for the left channel:
+
+
pan=1c|c0=0.9*c0+0.1*c1
+
+
+
A customized down-mix to stereo that works automatically for 3-, 4-, 5- and
+7-channels surround:
+
+
pan=stereo| FL < FL + 0.5*FC + 0.6*BL + 0.6*SL | FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
+
+
+
Note that ffmpeg
integrates a default down-mix (and up-mix) system
+that should be preferred (see "-ac" option) unless you have very specific
+needs.
+
+
+
6.36.2 Remapping examples# TOC
+
+
The channel remapping will be effective if, and only if:
+
+
+ gain coefficients are zeroes or ones,
+ only one input per channel output,
+
+
+
If all these conditions are satisfied, the filter will notify the user ("Pure
+channel mapping detected"), and use an optimized and lossless method to do the
+remapping.
+
+
For example, if you have a 5.1 source and want a stereo audio stream by
+dropping the extra channels:
+
+
pan="stereo| c0=FL | c1=FR"
+
+
+
Given the same source, you can also switch front left and front right channels
+and keep the input channel layout:
+
+
pan="5.1| c0=c1 | c1=c0 | c2=c2 | c3=c3 | c4=c4 | c5=c5"
+
+
+
If the input is a stereo audio stream, you can mute the front left channel (and
+still keep the stereo channel layout) with:
+
+
+
Still with a stereo audio stream input, you can copy the right channel in both
+front left and right:
+
+
pan="stereo| c0=FR | c1=FR"
+
+
+
+
6.37 replaygain# TOC
+
+
ReplayGain scanner filter. This filter takes an audio stream as an input and
+outputs it unchanged.
+At end of filtering it displays track_gain
and track_peak
.
+
+
+
6.38 resample# TOC
+
+
Convert the audio sample format, sample rate and channel layout. It is
+not meant to be used directly.
+
+
+
6.39 silencedetect# TOC
+
+
Detect silence in an audio stream.
+
+
This filter logs a message when it detects that the input audio volume is less
+or equal to a noise tolerance value for a duration greater or equal to the
+minimum detected noise duration.
+
+
The printed times and duration are expressed in seconds.
+
+
The filter accepts the following options:
+
+
+duration, d
+Set silence duration until notification (default is 2 seconds).
+
+
+noise, n
+Set noise tolerance. Can be specified in dB (in case "dB" is appended to the
+specified value) or amplitude ratio. Default is -60dB, or 0.001.
+
+
+
+
+
6.39.1 Examples# TOC
+
+
+ Detect 5 seconds of silence with -50dB noise tolerance:
+
+
silencedetect=n=-50dB:d=5
+
+
+ Complete example with ffmpeg
to detect silence with 0.0001 noise
+tolerance in silence.mp3 :
+
+
ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
+
+
+
+
+
6.40 silenceremove# TOC
+
+
Remove silence from the beginning, middle or end of the audio.
+
+
The filter accepts the following options:
+
+
+start_periods
+This value is used to indicate if audio should be trimmed at beginning of
+the audio. A value of zero indicates no silence should be trimmed from the
+beginning. When specifying a non-zero value, it trims audio up until it
+finds non-silence. Normally, when trimming silence from beginning of audio
+the start_periods will be 1
but it can be increased to higher
+values to trim all audio up to specific count of non-silence periods.
+Default value is 0
.
+
+
+start_duration
+Specify the amount of time that non-silence must be detected before it stops
+trimming audio. By increasing the duration, bursts of noises can be treated
+as silence and trimmed off. Default value is 0
.
+
+
+start_threshold
+This indicates what sample value should be treated as silence. For digital
+audio, a value of 0
may be fine but for audio recorded from analog,
+you may wish to increase the value to account for background noise.
+Can be specified in dB (in case "dB" is appended to the specified value)
+or amplitude ratio. Default value is 0
.
+
+
+stop_periods
+Set the count for trimming silence from the end of audio.
+To remove silence from the middle of a file, specify a stop_periods
+that is negative. This value is then treated as a positive value and is
+used to indicate the effect should restart processing as specified by
+start_periods , making it suitable for removing periods of silence
+in the middle of the audio.
+Default value is 0
.
+
+
+stop_duration
+Specify a duration of silence that must exist before audio is not copied any
+more. By specifying a higher duration, silence that is wanted can be left in
+the audio.
+Default value is 0
.
+
+
+stop_threshold
+This is the same as start_threshold but for trimming silence from
+the end of audio.
+Can be specified in dB (in case "dB" is appended to the specified value)
+or amplitude ratio. Default value is 0
.
+
+
+leave_silence
+This indicate that stop_duration length of audio should be left intact
+at the beginning of each period of silence.
+For example, if you want to remove long pauses between words but do not want
+to remove the pauses completely. Default value is 0
.
+
+
+
+
+
+
6.40.1 Examples# TOC
+
+
+ The following example shows how this filter can be used to start a recording
+that does not contain the delay at the start which usually occurs between
+pressing the record button and the start of the performance:
+
+
silenceremove=1:5:0.02
+
+
+
+
+
6.41 treble# TOC
+
+
Boost or cut treble (upper) frequencies of the audio using a two-pole
+shelving filter with a response similar to that of a standard
+hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
+
+
The filter accepts the following options:
+
+
+gain, g
+Give the gain at whichever is the lower of ~22 kHz and the
+Nyquist frequency. Its useful range is about -20 (for a large cut)
+to +20 (for a large boost). Beware of clipping when using a positive gain.
+
+
+frequency, f
+Set the filter’s central frequency and so can be used
+to extend or reduce the frequency range to be boosted or cut.
+The default value is 3000
Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Determine how steep is the filter’s shelf transition.
+
+
+
+
+
6.42 volume# TOC
+
+
Adjust the input audio volume.
+
+
It accepts the following parameters:
+
+volume
+Set audio volume expression.
+
+Output values are clipped to the maximum value.
+
+The output audio volume is given by the relation:
+
+
output_volume = volume * input_volume
+
+
+The default value for volume is "1.0".
+
+
+precision
+This parameter represents the mathematical precision.
+
+It determines which input sample formats will be allowed, which affects the
+precision of the volume scaling.
+
+
+fixed
+8-bit fixed-point; this limits input sample format to U8, S16, and S32.
+
+float
+32-bit floating-point; this limits input sample format to FLT. (default)
+
+double
+64-bit floating-point; this limits input sample format to DBL.
+
+
+
+
+replaygain
+Choose the behaviour on encountering ReplayGain side data in input frames.
+
+
+drop
+Remove ReplayGain side data, ignoring its contents (the default).
+
+
+ignore
+Ignore ReplayGain side data, but leave it in the frame.
+
+
+track
+Prefer the track gain, if present.
+
+
+album
+Prefer the album gain, if present.
+
+
+
+
+replaygain_preamp
+Pre-amplification gain in dB to apply to the selected replaygain gain.
+
+Default value for replaygain_preamp is 0.0.
+
+
+eval
+Set when the volume expression is evaluated.
+
+It accepts the following values:
+
+‘once ’
+only evaluate expression once during the filter initialization, or
+when the ‘volume ’ command is sent
+
+
+‘frame ’
+evaluate expression for each incoming frame
+
+
+
+Default value is ‘once ’.
+
+
+
+
The volume expression can contain the following parameters.
+
+
+n
+frame number (starting at zero)
+
+nb_channels
+number of channels
+
+nb_consumed_samples
+number of samples consumed by the filter
+
+nb_samples
+number of samples in the current frame
+
+pos
+original frame position in the file
+
+pts
+frame PTS
+
+sample_rate
+sample rate
+
+startpts
+PTS at start of stream
+
+startt
+time at start of stream
+
+t
+frame time
+
+tb
+timestamp timebase
+
+volume
+last set volume value
+
+
+
+
Note that when eval is set to ‘once ’ only the
+sample_rate and tb variables are available, all other
+variables will evaluate to NAN.
+
+
+
6.42.1 Commands# TOC
+
+
This filter supports the following commands:
+
+volume
+Modify the volume expression.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+replaygain_noclip
+Prevent clipping by limiting the gain applied.
+
+Default value for replaygain_noclip is 1.
+
+
+
+
+
+
6.42.2 Examples# TOC
+
+
+
+
+
6.43 volumedetect# TOC
+
+
Detect the volume of the input video.
+
+
The filter has no parameters. The input is not modified. Statistics about
+the volume will be printed in the log when the input stream end is reached.
+
+
In particular it will show the mean volume (root mean square), maximum
+volume (on a per-sample basis), and the beginning of a histogram of the
+registered volume values (from the maximum value to a cumulated 1/1000 of
+the samples).
+
+
All volumes are in decibels relative to the maximum PCM value.
+
+
+
6.43.1 Examples# TOC
+
+
Here is an excerpt of the output:
+
+
[Parsed_volumedetect_0 0xa23120] mean_volume: -27 dB
+[Parsed_volumedetect_0 0xa23120] max_volume: -4 dB
+[Parsed_volumedetect_0 0xa23120] histogram_4db: 6
+[Parsed_volumedetect_0 0xa23120] histogram_5db: 62
+[Parsed_volumedetect_0 0xa23120] histogram_6db: 286
+[Parsed_volumedetect_0 0xa23120] histogram_7db: 1042
+[Parsed_volumedetect_0 0xa23120] histogram_8db: 2551
+[Parsed_volumedetect_0 0xa23120] histogram_9db: 4609
+[Parsed_volumedetect_0 0xa23120] histogram_10db: 8409
+
+
+
It means that:
+
+ The mean square energy is approximately -27 dB, or 10^-2.7.
+ The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB.
+ There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc.
+
+
+
In other words, raising the volume by +4 dB does not cause any clipping,
+raising it by +5 dB causes clipping for 6 samples, etc.
+
+
+
+
7 Audio Sources# TOC
+
+
Below is a description of the currently available audio sources.
+
+
+
7.1 abuffer# TOC
+
+
Buffer audio frames, and make them available to the filter chain.
+
+
This source is mainly intended for a programmatic use, in particular
+through the interface defined in libavfilter/asrc_abuffer.h .
+
+
It accepts the following parameters:
+
+time_base
+The timebase which will be used for timestamps of submitted frames. It must be
+either a floating-point number or in numerator /denominator form.
+
+
+sample_rate
+The sample rate of the incoming audio buffers.
+
+
+sample_fmt
+The sample format of the incoming audio buffers.
+Either a sample format name or its corresponding integer representation from
+the enum AVSampleFormat in libavutil/samplefmt.h
+
+
+channel_layout
+The channel layout of the incoming audio buffers.
+Either a channel layout name from channel_layout_map in
+libavutil/channel_layout.c or its corresponding integer representation
+from the AV_CH_LAYOUT_* macros in libavutil/channel_layout.h
+
+
+channels
+The number of channels of the incoming audio buffers.
+If both channels and channel_layout are specified, then they
+must be consistent.
+
+
+
+
+
+
7.1.1 Examples# TOC
+
+
+
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
+
+
+
will instruct the source to accept planar 16bit signed stereo at 44100Hz.
+Since the sample format with name "s16p" corresponds to the number
+6 and the "stereo" channel layout corresponds to the value 0x3, this is
+equivalent to:
+
+
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
+
+
+
+
7.2 aevalsrc# TOC
+
+
Generate an audio signal specified by an expression.
+
+
This source accepts in input one or more expressions (one for each
+channel), which are evaluated and used to generate a corresponding
+audio signal.
+
+
This source accepts the following options:
+
+
+exprs
+Set the ’|’-separated expressions list for each separate channel. In case the
+channel_layout option is not specified, the selected channel layout
+depends on the number of provided expressions. Otherwise the last
+specified expression is applied to the remaining output channels.
+
+
+channel_layout, c
+Set the channel layout. The number of channels in the specified layout
+must be equal to the number of specified expressions.
+
+
+duration, d
+Set the minimum duration of the sourced audio. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+Note that the resulting duration may be greater than the specified
+duration, as the generated audio is always cut at the end of a
+complete frame.
+
+If not specified, or the expressed duration is negative, the audio is
+supposed to be generated forever.
+
+
+nb_samples, n
+Set the number of samples per channel per each output frame,
+default to 1024.
+
+
+sample_rate, s
+Specify the sample rate, default to 44100.
+
+
+
+
Each expression in exprs can contain the following constants:
+
+
+n
+number of the evaluated sample, starting from 0
+
+
+t
+time of the evaluated sample expressed in seconds, starting from 0
+
+
+s
+sample rate
+
+
+
+
+
+
7.2.1 Examples# TOC
+
+
+ Generate silence:
+
+
+ Generate a sin signal with frequency of 440 Hz, set sample rate to
+8000 Hz:
+
+
aevalsrc="sin(440*2*PI*t):s=8000"
+
+
+ Generate a two channels signal, specify the channel layout (Front
+Center + Back Center) explicitly:
+
+
aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
+
+
+ Generate white noise:
+
+
aevalsrc="-2+random(0)"
+
+
+ Generate an amplitude modulated signal:
+
+
aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
+
+
+ Generate 2.5 Hz binaural beats on a 360 Hz carrier:
+
+
aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
+
+
+
+
+
+
7.3 anullsrc# TOC
+
+
The null audio source, return unprocessed audio frames. It is mainly useful
+as a template and to be employed in analysis / debugging tools, or as
+the source for filters which ignore the input data (for example the sox
+synth filter).
+
+
This source accepts the following options:
+
+
+channel_layout, cl
+
+Specifies the channel layout, and can be either an integer or a string
+representing a channel layout. The default value of channel_layout
+is "stereo".
+
+Check the channel_layout_map definition in
+libavutil/channel_layout.c for the mapping between strings and
+channel layout values.
+
+
+sample_rate, r
+Specifies the sample rate, and defaults to 44100.
+
+
+nb_samples, n
+Set the number of samples per requested frames.
+
+
+
+
+
+
7.3.1 Examples# TOC
+
+
+ Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO.
+
+
+ Do the same operation with a more obvious syntax:
+
+
anullsrc=r=48000:cl=mono
+
+
+
+
All the parameters need to be explicitly defined.
+
+
+
7.4 flite# TOC
+
+
Synthesize a voice utterance using the libflite library.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libflite
.
+
+
Note that the flite library is not thread-safe.
+
+
The filter accepts the following options:
+
+
+list_voices
+If set to 1, list the names of the available voices and exit
+immediately. Default value is 0.
+
+
+nb_samples, n
+Set the maximum number of samples per frame. Default value is 512.
+
+
+textfile
+Set the filename containing the text to speak.
+
+
+text
+Set the text to speak.
+
+
+voice, v
+Set the voice to use for the speech synthesis. Default value is
+kal
. See also the list_voices option.
+
+
+
+
+
7.4.1 Examples# TOC
+
+
+ Read from file speech.txt , and synthesize the text using the
+standard flite voice:
+
+
flite=textfile=speech.txt
+
+
+ Read the specified text selecting the slt
voice:
+
+
flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
+
+
+ Input text to ffmpeg:
+
+
ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
+
+
+ Make ffplay speak the specified text, using flite
and
+the lavfi
device:
+
+
ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
+
+
+
+
For more information about libflite, check:
+http://www.speech.cs.cmu.edu/flite/
+
+
+
7.5 sine# TOC
+
+
Generate an audio signal made of a sine wave with amplitude 1/8.
+
+
The audio signal is bit-exact.
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the carrier frequency. Default is 440 Hz.
+
+
+beep_factor, b
+Enable a periodic beep every second with frequency beep_factor times
+the carrier frequency. Default is 0, meaning the beep is disabled.
+
+
+sample_rate, r
+Specify the sample rate, default is 44100.
+
+
+duration, d
+Specify the duration of the generated audio stream.
+
+
+samples_per_frame
+Set the number of samples per output frame, default is 1024.
+
+
+
+
+
7.5.1 Examples# TOC
+
+
+ Generate a simple 440 Hz sine wave:
+
+
+ Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds:
+
+
sine=220:4:d=5
+sine=f=220:b=4:d=5
+sine=frequency=220:beep_factor=4:duration=5
+
+
+
+
+
+
+
8 Audio Sinks# TOC
+
+
Below is a description of the currently available audio sinks.
+
+
+
8.1 abuffersink# TOC
+
+
Buffer audio frames, and make them available to the end of filter chain.
+
+
This sink is mainly intended for programmatic use, in particular
+through the interface defined in libavfilter/buffersink.h
+or the options system.
+
+
It accepts a pointer to an AVABufferSinkContext structure, which
+defines the incoming buffers’ formats, to be passed as the opaque
+parameter to avfilter_init_filter
for initialization.
+
+
8.2 anullsink# TOC
+
+
Null audio sink; do absolutely nothing with the input audio. It is
+mainly useful as a template and for use in analysis / debugging
+tools.
+
+
+
+
9 Video Filters# TOC
+
+
When you configure your FFmpeg build, you can disable any of the
+existing filters using --disable-filters
.
+The configure output will show the video filters included in your
+build.
+
+
Below is a description of the currently available video filters.
+
+
+
9.1 alphaextract# TOC
+
+
Extract the alpha component from the input as a grayscale video. This
+is especially useful with the alphamerge filter.
+
+
+
9.2 alphamerge# TOC
+
+
Add or replace the alpha component of the primary input with the
+grayscale value of a second input. This is intended for use with
+alphaextract to allow the transmission or storage of frame
+sequences that have alpha in a format that doesn’t support an alpha
+channel.
+
+
For example, to reconstruct full frames from a normal YUV-encoded video
+and a separate video created with alphaextract , you might use:
+
+
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
+
+
+
Since this filter is designed for reconstruction, it operates on frame
+sequences without considering timestamps, and terminates when either
+input reaches end of stream. This will cause problems if your encoding
+pipeline drops frames. If you’re trying to apply an image as an
+overlay to a video stream, consider the overlay filter instead.
+
+
+
+
+
Same as the subtitles filter, except that it doesn’t require libavcodec
+and libavformat to work. On the other hand, it is limited to ASS (Advanced
+Substation Alpha) subtitles files.
+
+
This filter accepts the following option in addition to the common options from
+the subtitles filter:
+
+
+shaping
+Set the shaping engine
+
+Available values are:
+
+‘auto ’
+The default libass shaping engine, which is the best available.
+
+‘simple ’
+Fast, font-agnostic shaper that can do only substitutions
+
+‘complex ’
+Slower shaper using OpenType for substitutions and positioning
+
+
+
+The default is auto
.
+
+
+
+
+
9.4 bbox# TOC
+
+
Compute the bounding box for the non-black pixels in the input frame
+luminance plane.
+
+
This filter computes the bounding box containing all the pixels with a
+luminance value greater than the minimum allowed value.
+The parameters describing the bounding box are printed on the filter
+log.
+
+
The filter accepts the following option:
+
+
+min_val
+Set the minimal luminance value. Default is 16
.
+
+
+
+
+
9.5 blackdetect# TOC
+
+
Detect video intervals that are (almost) completely black. Can be
+useful to detect chapter transitions, commercials, or invalid
+recordings. Output lines contains the time for the start, end and
+duration of the detected black interval expressed in seconds.
+
+
In order to display the output lines, you need to set the loglevel at
+least to the AV_LOG_INFO value.
+
+
The filter accepts the following options:
+
+
+black_min_duration, d
+Set the minimum detected black duration expressed in seconds. It must
+be a non-negative floating point number.
+
+Default value is 2.0.
+
+
+picture_black_ratio_th, pic_th
+Set the threshold for considering a picture "black".
+Express the minimum value for the ratio:
+
+
nb_black_pixels / nb_pixels
+
+
+for which a picture is considered black.
+Default value is 0.98.
+
+
+pixel_black_th, pix_th
+Set the threshold for considering a pixel "black".
+
+The threshold expresses the maximum pixel luminance value for which a
+pixel is considered "black". The provided value is scaled according to
+the following equation:
+
+
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
+
+
+luminance_range_size and luminance_minimum_value depend on
+the input video format, the range is [0-255] for YUV full-range
+formats and [16-235] for YUV non full-range formats.
+
+Default value is 0.10.
+
+
+
+
The following example sets the maximum pixel threshold to the minimum
+value, and detects only black intervals of 2 or more seconds:
+
+
blackdetect=d=2:pix_th=0.00
+
+
+
+
9.6 blackframe# TOC
+
+
Detect frames that are (almost) completely black. Can be useful to
+detect chapter transitions or commercials. Output lines consist of
+the frame number of the detected frame, the percentage of blackness,
+the position in the file if known or -1 and the timestamp in seconds.
+
+
In order to display the output lines, you need to set the loglevel at
+least to the AV_LOG_INFO value.
+
+
It accepts the following parameters:
+
+
+amount
+The percentage of the pixels that have to be below the threshold; it defaults to
+98
.
+
+
+threshold, thresh
+The threshold below which a pixel value is considered black; it defaults to
+32
.
+
+
+
+
+
+
9.7 blend, tblend# TOC
+
+
Blend two video frames into each other.
+
+
The blend
filter takes two input streams and outputs one
+stream, the first input is the "top" layer and second input is
+"bottom" layer. Output terminates when shortest input terminates.
+
+
The tblend
(time blend) filter takes two consecutive frames
+from one single stream, and outputs the result obtained by blending
+the new frame on top of the old frame.
+
+
A description of the accepted options follows.
+
+
+c0_mode
+c1_mode
+c2_mode
+c3_mode
+all_mode
+Set blend mode for specific pixel component or all pixel components in case
+of all_mode . Default value is normal
.
+
+Available values for component modes are:
+
+‘addition ’
+‘and ’
+‘average ’
+‘burn ’
+‘darken ’
+‘difference ’
+‘difference128 ’
+‘divide ’
+‘dodge ’
+‘exclusion ’
+‘hardlight ’
+‘lighten ’
+‘multiply ’
+‘negation ’
+‘normal ’
+‘or ’
+‘overlay ’
+‘phoenix ’
+‘pinlight ’
+‘reflect ’
+‘screen ’
+‘softlight ’
+‘subtract ’
+‘vividlight ’
+‘xor ’
+
+
+
+c0_opacity
+c1_opacity
+c2_opacity
+c3_opacity
+all_opacity
+Set blend opacity for specific pixel component or all pixel components in case
+of all_opacity . Only used in combination with pixel component blend modes.
+
+
+c0_expr
+c1_expr
+c2_expr
+c3_expr
+all_expr
+Set blend expression for specific pixel component or all pixel components in case
+of all_expr . Note that related mode options will be ignored if those are set.
+
+The expressions can use the following variables:
+
+
+N
+The sequential number of the filtered frame, starting from 0
.
+
+
+X
+Y
+the coordinates of the current sample
+
+
+W
+H
+the width and height of currently filtered plane
+
+
+SW
+SH
+Width and height scale depending on the currently filtered plane. It is the
+ratio between the corresponding luma plane number of pixels and the current
+plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
+0.5,0.5
for chroma planes.
+
+
+T
+Time of the current frame, expressed in seconds.
+
+
+TOP, A
+Value of pixel component at current location for first video frame (top layer).
+
+
+BOTTOM, B
+Value of pixel component at current location for second video frame (bottom layer).
+
+
+
+
+shortest
+Force termination when the shortest input terminates. Default is
+0
. This option is only defined for the blend
filter.
+
+
+repeatlast
+Continue applying the last bottom frame after the end of the stream. A value of
+0
disable the filter after the last frame of the bottom layer is reached.
+Default is 1
. This option is only defined for the blend
filter.
+
+
+
+
+
9.7.1 Examples# TOC
+
+
+ Apply transition from bottom layer to top layer in first 10 seconds:
+
+
blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
+
+
+ Apply 1x1 checkerboard effect:
+
+
blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
+
+
+ Apply uncover left effect:
+
+
blend=all_expr='if(gte(N*SW+X,W),A,B)'
+
+
+ Apply uncover down effect:
+
+
blend=all_expr='if(gte(Y-N*SH,0),A,B)'
+
+
+ Apply uncover up-left effect:
+
+
blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
+
+
+ Display differences between the current and the previous frame:
+
+
tblend=all_mode=difference128
+
+
+
+
+
9.8 boxblur# TOC
+
+
Apply a boxblur algorithm to the input video.
+
+
It accepts the following parameters:
+
+
+luma_radius, lr
+luma_power, lp
+chroma_radius, cr
+chroma_power, cp
+alpha_radius, ar
+alpha_power, ap
+
+
+
A description of the accepted options follows.
+
+
+luma_radius, lr
+chroma_radius, cr
+alpha_radius, ar
+Set an expression for the box radius in pixels used for blurring the
+corresponding input plane.
+
+The radius value must be a non-negative number, and must not be
+greater than the value of the expression min(w,h)/2
for the
+luma and alpha planes, and of min(cw,ch)/2
for the chroma
+planes.
+
+Default value for luma_radius is "2". If not specified,
+chroma_radius and alpha_radius default to the
+corresponding value set for luma_radius .
+
+The expressions can contain the following constants:
+
+w
+h
+The input width and height in pixels.
+
+
+cw
+ch
+The input chroma image width and height in pixels.
+
+
+hsub
+vsub
+The horizontal and vertical chroma subsample values. For example, for the
+pixel format "yuv422p", hsub is 2 and vsub is 1.
+
+
+
+
+luma_power, lp
+chroma_power, cp
+alpha_power, ap
+Specify how many times the boxblur filter is applied to the
+corresponding plane.
+
+Default value for luma_power is 2. If not specified,
+chroma_power and alpha_power default to the
+corresponding value set for luma_power .
+
+A value of 0 will disable the effect.
+
+
+
+
+
9.8.1 Examples# TOC
+
+
+ Apply a boxblur filter with the luma, chroma, and alpha radii
+set to 2:
+
+
boxblur=luma_radius=2:luma_power=1
+boxblur=2:1
+
+
+ Set the luma radius to 2, and alpha and chroma radius to 0:
+
+
+ Set the luma and chroma radii to a fraction of the video dimension:
+
+
boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
+
+
+
+
+
9.9 codecview# TOC
+
+
Visualize information exported by some codecs.
+
+
Some codecs can export information through frames using side-data or other
+means. For example, some MPEG based codecs export motion vectors through the
+export_mvs flag in the codec flags2 option.
+
+
The filter accepts the following option:
+
+
+mv
+Set motion vectors to visualize.
+
+Available flags for mv are:
+
+
+‘pf ’
+forward predicted MVs of P-frames
+
+‘bf ’
+forward predicted MVs of B-frames
+
+‘bb ’
+backward predicted MVs of B-frames
+
+
+
+
+
+
+
9.9.1 Examples# TOC
+
+
+ Visualizes multi-directionals MVs from P and B-Frames using ffplay
:
+
+
ffplay -flags2 +export_mvs input.mpg -vf codecview=mv=pf+bf+bb
+
+
+
+
+
9.10 colorbalance# TOC
+
Modify intensity of primary colors (red, green and blue) of input frames.
+
+
The filter allows an input frame to be adjusted in the shadows, midtones or highlights
+regions for the red-cyan, green-magenta or blue-yellow balance.
+
+
A positive adjustment value shifts the balance towards the primary color, a negative
+value towards the complementary color.
+
+
The filter accepts the following options:
+
+
+rs
+gs
+bs
+Adjust red, green and blue shadows (darkest pixels).
+
+
+rm
+gm
+bm
+Adjust red, green and blue midtones (medium pixels).
+
+
+rh
+gh
+bh
+Adjust red, green and blue highlights (brightest pixels).
+
+Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
+
+
+
+
+
9.10.1 Examples# TOC
+
+
+ Add red color cast to shadows:
+
+
+
+
+
9.11 colorlevels# TOC
+
+
Adjust video input frames using levels.
+
+
The filter accepts the following options:
+
+
+rimin
+gimin
+bimin
+aimin
+Adjust red, green, blue and alpha input black point.
+Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
+
+
+rimax
+gimax
+bimax
+aimax
+Adjust red, green, blue and alpha input white point.
+Allowed ranges for options are [-1.0, 1.0]
. Defaults are 1
.
+
+Input levels are used to lighten highlights (bright tones), darken shadows
+(dark tones), change the balance of bright and dark tones.
+
+
+romin
+gomin
+bomin
+aomin
+Adjust red, green, blue and alpha output black point.
+Allowed ranges for options are [0, 1.0]
. Defaults are 0
.
+
+
+romax
+gomax
+bomax
+aomax
+Adjust red, green, blue and alpha output white point.
+Allowed ranges for options are [0, 1.0]
. Defaults are 1
.
+
+Output levels allows manual selection of a constrained output level range.
+
+
+
+
+
9.11.1 Examples# TOC
+
+
+ Make video output darker:
+
+
colorlevels=rimin=0.058:gimin=0.058:bimin=0.058
+
+
+ Increase contrast:
+
+
colorlevels=rimin=0.039:gimin=0.039:bimin=0.039:rimax=0.96:gimax=0.96:bimax=0.96
+
+
+ Make video output lighter:
+
+
colorlevels=rimax=0.902:gimax=0.902:bimax=0.902
+
+
+ Increase brightness:
+
+
colorlevels=romin=0.5:gomin=0.5:bomin=0.5
+
+
+
+
+
9.12 colorchannelmixer# TOC
+
+
Adjust video input frames by re-mixing color channels.
+
+
This filter modifies a color channel by adding the values associated to
+the other channels of the same pixels. For example if the value to
+modify is red, the output value will be:
+
+
red =red *rr + blue *rb + green *rg + alpha *ra
+
+
+
The filter accepts the following options:
+
+
+rr
+rg
+rb
+ra
+Adjust contribution of input red, green, blue and alpha channels for output red channel.
+Default is 1
for rr , and 0
for rg , rb and ra .
+
+
+gr
+gg
+gb
+ga
+Adjust contribution of input red, green, blue and alpha channels for output green channel.
+Default is 1
for gg , and 0
for gr , gb and ga .
+
+
+br
+bg
+bb
+ba
+Adjust contribution of input red, green, blue and alpha channels for output blue channel.
+Default is 1
for bb , and 0
for br , bg and ba .
+
+
+ar
+ag
+ab
+aa
+Adjust contribution of input red, green, blue and alpha channels for output alpha channel.
+Default is 1
for aa , and 0
for ar , ag and ab .
+
+Allowed ranges for options are [-2.0, 2.0]
.
+
+
+
+
+
9.12.1 Examples# TOC
+
+
+ Convert source to grayscale:
+
+
colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
+
+ Simulate sepia tones:
+
+
colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
+
+
+
+
+
9.13 colormatrix# TOC
+
+
Convert color matrix.
+
+
The filter accepts the following options:
+
+
+src
+dst
+Specify the source and destination color matrix. Both values must be
+specified.
+
+The accepted values are:
+
+‘bt709 ’
+BT.709
+
+
+‘bt601 ’
+BT.601
+
+
+‘smpte240m ’
+SMPTE-240M
+
+
+‘fcc ’
+FCC
+
+
+
+
+
+
For example to convert from BT.601 to SMPTE-240M, use the command:
+
+
colormatrix=bt601:smpte240m
+
+
+
+
9.14 copy# TOC
+
+
Copy the input source unchanged to the output. This is mainly useful for
+testing purposes.
+
+
+
9.15 crop# TOC
+
+
Crop the input video to given dimensions.
+
+
It accepts the following parameters:
+
+
+w, out_w
+The width of the output video. It defaults to iw
.
+This expression is evaluated only once during the filter
+configuration.
+
+
+h, out_h
+The height of the output video. It defaults to ih
.
+This expression is evaluated only once during the filter
+configuration.
+
+
+x
+The horizontal position, in the input video, of the left edge of the output
+video. It defaults to (in_w-out_w)/2
.
+This expression is evaluated per-frame.
+
+
+y
+The vertical position, in the input video, of the top edge of the output video.
+It defaults to (in_h-out_h)/2
.
+This expression is evaluated per-frame.
+
+
+keep_aspect
+If set to 1 will force the output display aspect ratio
+to be the same of the input, by changing the output sample aspect
+ratio. It defaults to 0.
+
+
+
+
The out_w , out_h , x , y parameters are
+expressions containing the following constants:
+
+
+x
+y
+The computed values for x and y . They are evaluated for
+each new frame.
+
+
+in_w
+in_h
+The input width and height.
+
+
+iw
+ih
+These are the same as in_w and in_h .
+
+
+out_w
+out_h
+The output (cropped) width and height.
+
+
+ow
+oh
+These are the same as out_w and out_h .
+
+
+a
+same as iw / ih
+
+
+sar
+input sample aspect ratio
+
+
+dar
+input display aspect ratio, it is the same as (iw / ih ) * sar
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+n
+The number of the input frame, starting from 0.
+
+
+pos
+the position in the file of the input frame, NAN if unknown
+
+
+t
+The timestamp expressed in seconds. It’s NAN if the input timestamp is unknown.
+
+
+
+
+
The expression for out_w may depend on the value of out_h ,
+and the expression for out_h may depend on out_w , but they
+cannot depend on x and y , as x and y are
+evaluated after out_w and out_h .
+
+
The x and y parameters specify the expressions for the
+position of the top-left corner of the output (non-cropped) area. They
+are evaluated for each frame. If the evaluated value is not valid, it
+is approximated to the nearest valid value.
+
+
The expression for x may depend on y , and the expression
+for y may depend on x .
+
+
+
9.15.1 Examples# TOC
+
+
+
+
+
9.16 cropdetect# TOC
+
+
Auto-detect the crop size.
+
+
It calculates the necessary cropping parameters and prints the
+recommended parameters via the logging system. The detected dimensions
+correspond to the non-black area of the input video.
+
+
It accepts the following parameters:
+
+
+limit
+Set higher black value threshold, which can be optionally specified
+from nothing (0) to everything (255 for 8bit based formats). An intensity
+value greater to the set value is considered non-black. It defaults to 24.
+You can also specify a value between 0.0 and 1.0 which will be scaled depending
+on the bitdepth of the pixel format.
+
+
+round
+The value which the width/height should be divisible by. It defaults to
+16. The offset is automatically adjusted to center the video. Use 2 to
+get only even dimensions (needed for 4:2:2 video). 16 is best when
+encoding to most video codecs.
+
+
+reset_count, reset
+Set the counter that determines after how many frames cropdetect will
+reset the previously detected largest video area and start over to
+detect the current optimal crop area. Default value is 0.
+
+This can be useful when channel logos distort the video area. 0
+indicates ’never reset’, and returns the largest area encountered during
+playback.
+
+
+
+
+
9.17 curves# TOC
+
+
Apply color adjustments using curves.
+
+
This filter is similar to the Adobe Photoshop and GIMP curves tools. Each
+component (red, green and blue) has its values defined by N key points
+tied from each other using a smooth curve. The x-axis represents the pixel
+values from the input frame, and the y-axis the new pixel values to be set for
+the output frame.
+
+
By default, a component curve is defined by the two points (0;0) and
+(1;1) . This creates a straight line where each original pixel value is
+"adjusted" to its own value, which means no change to the image.
+
+
The filter allows you to redefine these two points and add some more. A new
+curve (using a natural cubic spline interpolation) will be define to pass
+smoothly through all these new coordinates. The new defined points needs to be
+strictly increasing over the x-axis, and their x and y values must
+be in the [0;1] interval. If the computed curves happened to go outside
+the vector spaces, the values will be clipped accordingly.
+
+
If there is no key point defined in x=0
, the filter will automatically
+insert a (0;0) point. In the same way, if there is no key point defined
+in x=1
, the filter will automatically insert a (1;1) point.
+
+
The filter accepts the following options:
+
+
+preset
+Select one of the available color presets. This option can be used in addition
+to the r , g , b parameters; in this case, the later
+options takes priority on the preset values.
+Available presets are:
+
+‘none ’
+‘color_negative ’
+‘cross_process ’
+‘darker ’
+‘increase_contrast ’
+‘lighter ’
+‘linear_contrast ’
+‘medium_contrast ’
+‘negative ’
+‘strong_contrast ’
+‘vintage ’
+
+Default is none
.
+
+master, m
+Set the master key points. These points will define a second pass mapping. It
+is sometimes called a "luminance" or "value" mapping. It can be used with
+r , g , b or all since it acts like a
+post-processing LUT.
+
+red, r
+Set the key points for the red component.
+
+green, g
+Set the key points for the green component.
+
+blue, b
+Set the key points for the blue component.
+
+all
+Set the key points for all components (not including master).
+Can be used in addition to the other key points component
+options. In this case, the unset component(s) will fallback on this
+all setting.
+
+psfile
+Specify a Photoshop curves file (.asv
) to import the settings from.
+
+
+
+
To avoid some filtergraph syntax conflicts, each key points list need to be
+defined using the following syntax: x0/y0 x1/y1 x2/y2 ...
.
+
+
+
9.17.1 Examples# TOC
+
+
+
+
+
9.18 dctdnoiz# TOC
+
+
Denoise frames using 2D DCT (frequency domain filtering).
+
+
This filter is not designed for real time.
+
+
The filter accepts the following options:
+
+
+sigma, s
+Set the noise sigma constant.
+
+This sigma defines a hard threshold of 3 * sigma
; every DCT
+coefficient (absolute value) below this threshold with be dropped.
+
+If you need a more advanced filtering, see expr .
+
+Default is 0
.
+
+
+overlap
+Set number overlapping pixels for each block. Since the filter can be slow, you
+may want to reduce this value, at the cost of a less effective filter and the
+risk of various artefacts.
+
+If the overlapping value doesn’t allow to process the whole input width or
+height, a warning will be displayed and according borders won’t be denoised.
+
+Default value is blocksize -1, which is the best possible setting.
+
+
+expr, e
+Set the coefficient factor expression.
+
+For each coefficient of a DCT block, this expression will be evaluated as a
+multiplier value for the coefficient.
+
+If this is option is set, the sigma option will be ignored.
+
+The absolute value of the coefficient can be accessed through the c
+variable.
+
+
+n
+Set the blocksize using the number of bits. 1<<n
defines the
+blocksize , which is the width and height of the processed blocks.
+
+The default value is 3 (8x8) and can be raised to 4 for a
+blocksize of 16x16. Note that changing this setting has huge consequences
+on the speed processing. Also, a larger block size does not necessarily means a
+better de-noising.
+
+
+
+
+
9.18.1 Examples# TOC
+
+
Apply a denoise with a sigma of 4.5
:
+
+
+
The same operation can be achieved using the expression system:
+
+
dctdnoiz=e='gte(c, 4.5*3)'
+
+
+
Violent denoise using a block size of 16x16
:
+
+
+
+
9.19 decimate# TOC
+
+
Drop duplicated frames at regular intervals.
+
+
The filter accepts the following options:
+
+
+cycle
+Set the number of frames from which one will be dropped. Setting this to
+N means one frame in every batch of N frames will be dropped.
+Default is 5
.
+
+
+dupthresh
+Set the threshold for duplicate detection. If the difference metric for a frame
+is less than or equal to this value, then it is declared as duplicate. Default
+is 1.1
+
+
+scthresh
+Set scene change threshold. Default is 15
.
+
+
+blockx
+blocky
+Set the size of the x and y-axis blocks used during metric calculations.
+Larger blocks give better noise suppression, but also give worse detection of
+small movements. Must be a power of two. Default is 32
.
+
+
+ppsrc
+Mark main input as a pre-processed input and activate clean source input
+stream. This allows the input to be pre-processed with various filters to help
+the metrics calculation while keeping the frame selection lossless. When set to
+1
, the first stream is for the pre-processed input, and the second
+stream is the clean source from where the kept frames are chosen. Default is
+0
.
+
+
+chroma
+Set whether or not chroma is considered in the metric calculations. Default is
+1
.
+
+
+
+
+
9.20 dejudder# TOC
+
+
Remove judder produced by partially interlaced telecined content.
+
+
Judder can be introduced, for instance, by pullup filter. If the original
+source was partially telecined content then the output of pullup,dejudder
+will have a variable frame rate. May change the recorded frame rate of the
+container. Aside from that change, this filter will not affect constant frame
+rate video.
+
+
The option available in this filter is:
+
+cycle
+Specify the length of the window over which the judder repeats.
+
+Accepts any integer greater than 1. Useful values are:
+
+‘4 ’
+If the original was telecined from 24 to 30 fps (Film to NTSC).
+
+
+‘5 ’
+If the original was telecined from 25 to 30 fps (PAL to NTSC).
+
+
+‘20 ’
+If a mixture of the two.
+
+
+
+The default is ‘4 ’.
+
+
+
+
+
9.21 delogo# TOC
+
+
Suppress a TV station logo by a simple interpolation of the surrounding
+pixels. Just set a rectangle covering the logo and watch it disappear
+(and sometimes something even uglier appear - your mileage may vary).
+
+
It accepts the following parameters:
+
+x
+y
+Specify the top left corner coordinates of the logo. They must be
+specified.
+
+
+w
+h
+Specify the width and height of the logo to clear. They must be
+specified.
+
+
+band, t
+Specify the thickness of the fuzzy edge of the rectangle (added to
+w and h ). The default value is 4.
+
+
+show
+When set to 1, a green rectangle is drawn on the screen to simplify
+finding the right x , y , w , and h parameters.
+The default value is 0.
+
+The rectangle is drawn on the outermost pixels which will be (partly)
+replaced with interpolated values. The values of the next pixels
+immediately outside this rectangle in each direction will be used to
+compute the interpolated pixel values inside the rectangle.
+
+
+
+
+
+
9.21.1 Examples# TOC
+
+
+ Set a rectangle covering the area with top left corner coordinates 0,0
+and size 100x77, and a band of size 10:
+
+
delogo=x=0:y=0:w=100:h=77:band=10
+
+
+
+
+
+
9.22 deshake# TOC
+
+
Attempt to fix small changes in horizontal and/or vertical shift. This
+filter helps remove camera shake from hand-holding a camera, bumping a
+tripod, moving on a vehicle, etc.
+
+
The filter accepts the following options:
+
+
+x
+y
+w
+h
+Specify a rectangular area where to limit the search for motion
+vectors.
+If desired the search for motion vectors can be limited to a
+rectangular area of the frame defined by its top left corner, width
+and height. These parameters have the same meaning as the drawbox
+filter which can be used to visualise the position of the bounding
+box.
+
+This is useful when simultaneous movement of subjects within the frame
+might be confused for camera motion by the motion vector search.
+
+If any or all of x , y , w and h are set to -1
+then the full frame is used. This allows later options to be set
+without specifying the bounding box for the motion vector search.
+
+Default - search the whole frame.
+
+
+rx
+ry
+Specify the maximum extent of movement in x and y directions in the
+range 0-64 pixels. Default 16.
+
+
+edge
+Specify how to generate pixels to fill blanks at the edge of the
+frame. Available values are:
+
+‘blank, 0 ’
+Fill zeroes at blank locations
+
+‘original, 1 ’
+Original image at blank locations
+
+‘clamp, 2 ’
+Extruded edge value at blank locations
+
+‘mirror, 3 ’
+Mirrored edge at blank locations
+
+
+Default value is ‘mirror ’.
+
+
+blocksize
+Specify the blocksize to use for motion search. Range 4-128 pixels,
+default 8.
+
+
+contrast
+Specify the contrast threshold for blocks. Only blocks with more than
+the specified contrast (difference between darkest and lightest
+pixels) will be considered. Range 1-255, default 125.
+
+
+search
+Specify the search strategy. Available values are:
+
+‘exhaustive, 0 ’
+Set exhaustive search
+
+‘less, 1 ’
+Set less exhaustive search.
+
+
+Default value is ‘exhaustive ’.
+
+
+filename
+If set then a detailed log of the motion search is written to the
+specified file.
+
+
+opencl
+If set to 1, specify using OpenCL capabilities, only available if
+FFmpeg was configured with --enable-opencl
. Default value is 0.
+
+
+
+
+
+
9.23 drawbox# TOC
+
+
Draw a colored box on the input image.
+
+
It accepts the following parameters:
+
+
+x
+y
+The expressions which specify the top left corner coordinates of the box. It defaults to 0.
+
+
+width, w
+height, h
+The expressions which specify the width and height of the box; if 0 they are interpreted as
+the input width and height. It defaults to 0.
+
+
+color, c
+Specify the color of the box to write. For the general syntax of this option,
+check the "Color" section in the ffmpeg-utils manual. If the special
+value invert
is used, the box edge color is the same as the
+video with inverted luma.
+
+
+thickness, t
+The expression which sets the thickness of the box edge. Default value is 3
.
+
+See below for the list of accepted constants.
+
+
+
+
The parameters for x , y , w and h and t are expressions containing the
+following constants:
+
+
+dar
+The input display aspect ratio, it is the same as (w / h ) * sar .
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+in_h, ih
+in_w, iw
+The input width and height.
+
+
+sar
+The input sample aspect ratio.
+
+
+x
+y
+The x and y offset coordinates where the box is drawn.
+
+
+w
+h
+The width and height of the drawn box.
+
+
+t
+The thickness of the drawn box.
+
+These constants allow the x , y , w , h and t expressions to refer to
+each other, so you may for example specify y=x/dar
or h=w/dar
.
+
+
+
+
+
+
9.23.1 Examples# TOC
+
+
+
+
+
9.24 drawgrid# TOC
+
+
Draw a grid on the input image.
+
+
It accepts the following parameters:
+
+
+x
+y
+The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0.
+
+
+width, w
+height, h
+The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the
+input width and height, respectively, minus thickness
, so image gets
+framed. Default to 0.
+
+
+color, c
+Specify the color of the grid. For the general syntax of this option,
+check the "Color" section in the ffmpeg-utils manual. If the special
+value invert
is used, the grid color is the same as the
+video with inverted luma.
+
+
+thickness, t
+The expression which sets the thickness of the grid line. Default value is 1
.
+
+See below for the list of accepted constants.
+
+
+
+
The parameters for x , y , w and h and t are expressions containing the
+following constants:
+
+
+dar
+The input display aspect ratio, it is the same as (w / h ) * sar .
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+in_h, ih
+in_w, iw
+The input grid cell width and height.
+
+
+sar
+The input sample aspect ratio.
+
+
+x
+y
+The x and y coordinates of some point of grid intersection (meant to configure offset).
+
+
+w
+h
+The width and height of the drawn cell.
+
+
+t
+The thickness of the drawn cell.
+
+These constants allow the x , y , w , h and t expressions to refer to
+each other, so you may for example specify y=x/dar
or h=w/dar
.
+
+
+
+
+
+
9.24.1 Examples# TOC
+
+
+ Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%:
+
+
drawgrid=width=100:height=100:thickness=2:color=red@0.5
+
+
+ Draw a white 3x3 grid with an opacity of 50%:
+
+
drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
+
+
+
+
+
9.25 drawtext# TOC
+
+
Draw a text string or text from a specified file on top of a video, using the
+libfreetype library.
+
+
To enable compilation of this filter, you need to configure FFmpeg with
+--enable-libfreetype
.
+To enable default font fallback and the font option you need to
+configure FFmpeg with --enable-libfontconfig
.
+To enable the text_shaping option, you need to configure FFmpeg with
+--enable-libfribidi
.
+
+
+
9.25.1 Syntax# TOC
+
+
It accepts the following parameters:
+
+
+box
+Used to draw a box around text using the background color.
+The value must be either 1 (enable) or 0 (disable).
+The default value of box is 0.
+
+
+boxcolor
+The color to be used for drawing box around text. For the syntax of this
+option, check the "Color" section in the ffmpeg-utils manual.
+
+The default value of boxcolor is "white".
+
+
+borderw
+Set the width of the border to be drawn around the text using bordercolor .
+The default value of borderw is 0.
+
+
+bordercolor
+Set the color to be used for drawing border around text. For the syntax of this
+option, check the "Color" section in the ffmpeg-utils manual.
+
+The default value of bordercolor is "black".
+
+
+expansion
+Select how the text is expanded. Can be either none
,
+strftime
(deprecated) or
+normal
(default). See the Text expansion section
+below for details.
+
+
+fix_bounds
+If true, check and fix text coords to avoid clipping.
+
+
+fontcolor
+The color to be used for drawing fonts. For the syntax of this option, check
+the "Color" section in the ffmpeg-utils manual.
+
+The default value of fontcolor is "black".
+
+
+fontcolor_expr
+String which is expanded the same way as text to obtain dynamic
+fontcolor value. By default this option has empty value and is not
+processed. When this option is set, it overrides fontcolor option.
+
+
+font
+The font family to be used for drawing text. By default Sans.
+
+
+fontfile
+The font file to be used for drawing text. The path must be included.
+This parameter is mandatory if the fontconfig support is disabled.
+
+
+fontsize
+The font size to be used for drawing text.
+The default value of fontsize is 16.
+
+
+text_shaping
+If set to 1, attempt to shape the text (for example, reverse the order of
+right-to-left text and join Arabic characters) before drawing it.
+Otherwise, just draw the text exactly as given.
+By default 1 (if supported).
+
+
+ft_load_flags
+The flags to be used for loading the fonts.
+
+The flags map the corresponding flags supported by libfreetype, and are
+a combination of the following values:
+
+default
+no_scale
+no_hinting
+render
+no_bitmap
+vertical_layout
+force_autohint
+crop_bitmap
+pedantic
+ignore_global_advance_width
+no_recurse
+ignore_transform
+monochrome
+linear_design
+no_autohint
+
+
+Default value is "default".
+
+For more information consult the documentation for the FT_LOAD_*
+libfreetype flags.
+
+
+shadowcolor
+The color to be used for drawing a shadow behind the drawn text. For the
+syntax of this option, check the "Color" section in the ffmpeg-utils manual.
+
+The default value of shadowcolor is "black".
+
+
+shadowx
+shadowy
+The x and y offsets for the text shadow position with respect to the
+position of the text. They can be either positive or negative
+values. The default value for both is "0".
+
+
+start_number
+The starting frame number for the n/frame_num variable. The default value
+is "0".
+
+
+tabsize
+The size in number of spaces to use for rendering the tab.
+Default value is 4.
+
+
+timecode
+Set the initial timecode representation in "hh:mm:ss[:;.]ff"
+format. It can be used with or without text parameter. timecode_rate
+option must be specified.
+
+
+timecode_rate, rate, r
+Set the timecode frame rate (timecode only).
+
+
+text
+The text string to be drawn. The text must be a sequence of UTF-8
+encoded characters.
+This parameter is mandatory if no file is specified with the parameter
+textfile .
+
+
+textfile
+A text file containing text to be drawn. The text must be a sequence
+of UTF-8 encoded characters.
+
+This parameter is mandatory if no text string is specified with the
+parameter text .
+
+If both text and textfile are specified, an error is thrown.
+
+
+reload
+If set to 1, the textfile will be reloaded before each frame.
+Be sure to update it atomically, or it may be read partially, or even fail.
+
+
+x
+y
+The expressions which specify the offsets where text will be drawn
+within the video frame. They are relative to the top/left border of the
+output image.
+
+The default value of x and y is "0".
+
+See below for the list of accepted constants and functions.
+
+
+
+
The parameters for x and y are expressions containing the
+following constants and functions:
+
+
+dar
+input display aspect ratio, it is the same as (w / h ) * sar
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+line_h, lh
+the height of each text line
+
+
+main_h, h, H
+the input height
+
+
+main_w, w, W
+the input width
+
+
+max_glyph_a, ascent
+the maximum distance from the baseline to the highest/upper grid
+coordinate used to place a glyph outline point, for all the rendered
+glyphs.
+It is a positive value, due to the grid’s orientation with the Y axis
+upwards.
+
+
+max_glyph_d, descent
+the maximum distance from the baseline to the lowest grid coordinate
+used to place a glyph outline point, for all the rendered glyphs.
+This is a negative value, due to the grid’s orientation, with the Y axis
+upwards.
+
+
+max_glyph_h
+maximum glyph height, that is the maximum height for all the glyphs
+contained in the rendered text, it is equivalent to ascent -
+descent .
+
+
+max_glyph_w
+maximum glyph width, that is the maximum width for all the glyphs
+contained in the rendered text
+
+
+n
+the number of input frame, starting from 0
+
+
+rand(min, max)
+return a random number included between min and max
+
+
+sar
+The input sample aspect ratio.
+
+
+t
+timestamp expressed in seconds, NAN if the input timestamp is unknown
+
+
+text_h, th
+the height of the rendered text
+
+
+text_w, tw
+the width of the rendered text
+
+
+x
+y
+the x and y offset coordinates where the text is drawn.
+
+These parameters allow the x and y expressions to refer
+each other, so you can for example specify y=x/dar
.
+
+
+
+
+
9.25.2 Text expansion# TOC
+
+
If expansion is set to strftime
,
+the filter recognizes strftime() sequences in the provided text and
+expands them accordingly. Check the documentation of strftime(). This
+feature is deprecated.
+
+
If expansion is set to none
, the text is printed verbatim.
+
+
If expansion is set to normal
(which is the default),
+the following expansion mechanism is used.
+
+
The backslash character ’\’, followed by any character, always expands to
+the second character.
+
+
Sequence of the form %{...}
are expanded. The text between the
+braces is a function name, possibly followed by arguments separated by ’:’.
+If the arguments contain special characters or delimiters (’:’ or ’}’),
+they should be escaped.
+
+
Note that they probably must also be escaped as the value for the
+text option in the filter argument string and as the filter
+argument in the filtergraph description, and possibly also for the shell,
+that makes up to four levels of escaping; using a text file avoids these
+problems.
+
+
The following functions are available:
+
+
+expr, e
+The expression evaluation result.
+
+It must take one argument specifying the expression to be evaluated,
+which accepts the same constants and functions as the x and
+y values. Note that not all constants should be used, for
+example the text size is not known when evaluating the expression, so
+the constants text_w and text_h will have an undefined
+value.
+
+
+expr_int_format, eif
+Evaluate the expression’s value and output as formatted integer.
+
+The first argument is the expression to be evaluated, just as for the expr function.
+The second argument specifies the output format. Allowed values are ’x’, ’X’, ’d’ and
+’u’. They are treated exactly as in the printf function.
+The third parameter is optional and sets the number of positions taken by the output.
+It can be used to add padding with zeros from the left.
+
+
+gmtime
+The time at which the filter is running, expressed in UTC.
+It can accept an argument: a strftime() format string.
+
+
+localtime
+The time at which the filter is running, expressed in the local time zone.
+It can accept an argument: a strftime() format string.
+
+
+metadata
+Frame metadata. It must take one argument specifying metadata key.
+
+
+n, frame_num
+The frame number, starting from 0.
+
+
+pict_type
+A 1 character description of the current picture type.
+
+
+pts
+The timestamp of the current frame.
+It can take up to two arguments.
+
+The first argument is the format of the timestamp; it defaults to flt
+for seconds as a decimal number with microsecond accuracy; hms
stands
+for a formatted [-]HH:MM:SS.mmm timestamp with millisecond accuracy.
+
+The second argument is an offset added to the timestamp.
+
+
+
+
+
+
9.25.3 Examples# TOC
+
+
+
+
For more information about libfreetype, check:
+http://www.freetype.org/ .
+
+
For more information about fontconfig, check:
+http://freedesktop.org/software/fontconfig/fontconfig-user.html .
+
+
For more information about libfribidi, check:
+http://fribidi.org/ .
+
+
+
9.26 edgedetect# TOC
+
+
Detect and draw edges. The filter uses the Canny Edge Detection algorithm.
+
+
The filter accepts the following options:
+
+
+low
+high
+Set low and high threshold values used by the Canny thresholding
+algorithm.
+
+The high threshold selects the "strong" edge pixels, which are then
+connected through 8-connectivity with the "weak" edge pixels selected
+by the low threshold.
+
+low and high threshold values must be chosen in the range
+[0,1], and low should be lesser or equal to high .
+
+Default value for low is 20/255
, and default value for high
+is 50/255
.
+
+
+mode
+Define the drawing mode.
+
+
+‘wires ’
+Draw white/gray wires on black background.
+
+
+‘colormix ’
+Mix the colors to create a paint/cartoon effect.
+
+
+
+Default value is wires .
+
+
+
+
+
9.26.1 Examples# TOC
+
+
+ Standard edge detection with custom values for the hysteresis thresholding:
+
+
edgedetect=low=0.1:high=0.4
+
+
+ Painting effect without thresholding:
+
+
edgedetect=mode=colormix:high=0
+
+
+
+
+
9.27 extractplanes# TOC
+
+
Extract color channel components from input video stream into
+separate grayscale video streams.
+
+
The filter accepts the following option:
+
+
+planes
+Set plane(s) to extract.
+
+Available values for planes are:
+
+‘y ’
+‘u ’
+‘v ’
+‘a ’
+‘r ’
+‘g ’
+‘b ’
+
+
+Choosing planes not available in the input will result in an error.
+That means you cannot select r
, g
, b
planes
+with y
, u
, v
planes at same time.
+
+
+
+
+
9.27.1 Examples# TOC
+
+
+ Extract luma, u and v color channel component from input video frame
+into 3 grayscale outputs:
+
+
ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
+
+
+
+
+
9.28 elbg# TOC
+
+
Apply a posterize effect using the ELBG (Enhanced LBG) algorithm.
+
+
For each input image, the filter will compute the optimal mapping from
+the input to the output given the codebook length, that is the number
+of distinct output colors.
+
+
This filter accepts the following options.
+
+
+codebook_length, l
+Set codebook length. The value must be a positive integer, and
+represents the number of distinct output colors. Default value is 256.
+
+
+nb_steps, n
+Set the maximum number of iterations to apply for computing the optimal
+mapping. The higher the value the better the result and the higher the
+computation time. Default value is 1.
+
+
+seed, s
+Set a random seed, must be an integer included between 0 and
+UINT32_MAX. If not specified, or if explicitly set to -1, the filter
+will try to use a good random seed on a best effort basis.
+
+
+
+
+
9.29 fade# TOC
+
+
Apply a fade-in/out effect to the input video.
+
+
It accepts the following parameters:
+
+
+type, t
+The effect type can be either "in" for a fade-in, or "out" for a fade-out
+effect.
+Default is in
.
+
+
+start_frame, s
+Specify the number of the frame to start applying the fade
+effect at. Default is 0.
+
+
+nb_frames, n
+The number of frames that the fade effect lasts. At the end of the
+fade-in effect, the output video will have the same intensity as the input video.
+At the end of the fade-out transition, the output video will be filled with the
+selected color .
+Default is 25.
+
+
+alpha
+If set to 1, fade only alpha channel, if one exists on the input.
+Default value is 0.
+
+
+start_time, st
+Specify the timestamp (in seconds) of the frame to start to apply the fade
+effect. If both start_frame and start_time are specified, the fade will start at
+whichever comes last. Default is 0.
+
+
+duration, d
+The number of seconds for which the fade effect has to last. At the end of the
+fade-in effect the output video will have the same intensity as the input video,
+at the end of the fade-out transition the output video will be filled with the
+selected color .
+If both duration and nb_frames are specified, duration is used. Default is 0.
+
+
+color, c
+Specify the color of the fade. Default is "black".
+
+
+
+
+
9.29.1 Examples# TOC
+
+
+
+
+
9.30 field# TOC
+
+
Extract a single field from an interlaced image using stride
+arithmetic to avoid wasting CPU time. The output frames are marked as
+non-interlaced.
+
+
The filter accepts the following options:
+
+
+type
+Specify whether to extract the top (if the value is 0
or
+top
) or the bottom field (if the value is 1
or
+bottom
).
+
+
+
+
+
9.31 fieldmatch# TOC
+
+
Field matching filter for inverse telecine. It is meant to reconstruct the
+progressive frames from a telecined stream. The filter does not drop duplicated
+frames, so to achieve a complete inverse telecine fieldmatch
needs to be
+followed by a decimation filter such as decimate in the filtergraph.
+
+
The separation of the field matching and the decimation is notably motivated by
+the possibility of inserting a de-interlacing filter fallback between the two.
+If the source has mixed telecined and real interlaced content,
+fieldmatch
will not be able to match fields for the interlaced parts.
+But these remaining combed frames will be marked as interlaced, and thus can be
+de-interlaced by a later filter such as yadif before decimation.
+
+
In addition to the various configuration options, fieldmatch
can take an
+optional second stream, activated through the ppsrc option. If
+enabled, the frames reconstruction will be based on the fields and frames from
+this second stream. This allows the first input to be pre-processed in order to
+help the various algorithms of the filter, while keeping the output lossless
+(assuming the fields are matched properly). Typically, a field-aware denoiser,
+or brightness/contrast adjustments can help.
+
+
Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project)
+and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from
+which fieldmatch
is based on. While the semantic and usage are very
+close, some behaviour and options names can differ.
+
+
The decimate filter currently only works for constant frame rate input.
+Do not use fieldmatch
and decimate if your input has mixed
+telecined and progressive content with changing framerate.
+
+
The filter accepts the following options:
+
+
+order
+Specify the assumed field order of the input stream. Available values are:
+
+
+‘auto ’
+Auto detect parity (use FFmpeg’s internal parity value).
+
+‘bff ’
+Assume bottom field first.
+
+‘tff ’
+Assume top field first.
+
+
+
+Note that it is sometimes recommended not to trust the parity announced by the
+stream.
+
+Default value is auto .
+
+
+mode
+Set the matching mode or strategy to use. pc mode is the safest in the
+sense that it won’t risk creating jerkiness due to duplicate frames when
+possible, but if there are bad edits or blended fields it will end up
+outputting combed frames when a good match might actually exist. On the other
+hand, pcn_ub mode is the most risky in terms of creating jerkiness,
+but will almost always find a good frame if there is one. The other values are
+all somewhere in between pc and pcn_ub in terms of risking
+jerkiness and creating duplicate frames versus finding good matches in sections
+with bad edits, orphaned fields, blended fields, etc.
+
+More details about p/c/n/u/b are available in p/c/n/u/b meaning section.
+
+Available values are:
+
+
+‘pc ’
+2-way matching (p/c)
+
+‘pc_n ’
+2-way matching, and trying 3rd match if still combed (p/c + n)
+
+‘pc_u ’
+2-way matching, and trying 3rd match (same order) if still combed (p/c + u)
+
+‘pc_n_ub ’
+2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if
+still combed (p/c + n + u/b)
+
+‘pcn ’
+3-way matching (p/c/n)
+
+‘pcn_ub ’
+3-way matching, and trying 4th/5th matches if all 3 of the original matches are
+detected as combed (p/c/n + u/b)
+
+
+
+The parenthesis at the end indicate the matches that would be used for that
+mode assuming order =tff (and field on auto or
+top ).
+
+In terms of speed pc mode is by far the fastest and pcn_ub is
+the slowest.
+
+Default value is pc_n .
+
+
+ppsrc
+Mark the main input stream as a pre-processed input, and enable the secondary
+input stream as the clean source to pick the fields from. See the filter
+introduction for more details. It is similar to the clip2 feature from
+VFM/TFM.
+
+Default value is 0
(disabled).
+
+
+field
+Set the field to match from. It is recommended to set this to the same value as
+order unless you experience matching failures with that setting. In
+certain circumstances changing the field that is used to match from can have a
+large impact on matching performance. Available values are:
+
+
+‘auto ’
+Automatic (same value as order ).
+
+‘bottom ’
+Match from the bottom field.
+
+‘top ’
+Match from the top field.
+
+
+
+Default value is auto .
+
+
+mchroma
+Set whether or not chroma is included during the match comparisons. In most
+cases it is recommended to leave this enabled. You should set this to 0
+only if your clip has bad chroma problems such as heavy rainbowing or other
+artifacts. Setting this to 0
could also be used to speed things up at
+the cost of some accuracy.
+
+Default value is 1
.
+
+
+y0
+y1
+These define an exclusion band which excludes the lines between y0 and
+y1 from being included in the field matching decision. An exclusion
+band can be used to ignore subtitles, a logo, or other things that may
+interfere with the matching. y0 sets the starting scan line and
+y1 sets the ending line; all lines in between y0 and
+y1 (including y0 and y1 ) will be ignored. Setting
+y0 and y1 to the same value will disable the feature.
+y0 and y1 defaults to 0
.
+
+
+scthresh
+Set the scene change detection threshold as a percentage of maximum change on
+the luma plane. Good values are in the [8.0, 14.0]
range. Scene change
+detection is only relevant in case combmatch =sc . The range for
+scthresh is [0.0, 100.0]
.
+
+Default value is 12.0
.
+
+
+combmatch
+When combatch is not none , fieldmatch
will take into
+account the combed scores of matches when deciding what match to use as the
+final match. Available values are:
+
+
+‘none ’
+No final matching based on combed scores.
+
+‘sc ’
+Combed scores are only used when a scene change is detected.
+
+‘full ’
+Use combed scores all the time.
+
+
+
+Default is sc .
+
+
+combdbg
+Force fieldmatch
to calculate the combed metrics for certain matches and
+print them. This setting is known as micout in TFM/VFM vocabulary.
+Available values are:
+
+
+‘none ’
+No forced calculation.
+
+‘pcn ’
+Force p/c/n calculations.
+
+‘pcnub ’
+Force p/c/n/u/b calculations.
+
+
+
+Default value is none .
+
+
+cthresh
+This is the area combing threshold used for combed frame detection. This
+essentially controls how "strong" or "visible" combing must be to be detected.
+Larger values mean combing must be more visible and smaller values mean combing
+can be less visible or strong and still be detected. Valid settings are from
+-1
(every pixel will be detected as combed) to 255
(no pixel will
+be detected as combed). This is basically a pixel difference value. A good
+range is [8, 12]
.
+
+Default value is 9
.
+
+
+chroma
+Sets whether or not chroma is considered in the combed frame decision. Only
+disable this if your source has chroma problems (rainbowing, etc.) that are
+causing problems for the combed frame detection with chroma enabled. Actually,
+using chroma =0 is usually more reliable, except for the case
+where there is chroma only combing in the source.
+
+Default value is 0
.
+
+
+blockx
+blocky
+Respectively set the x-axis and y-axis size of the window used during combed
+frame detection. This has to do with the size of the area in which
+combpel pixels are required to be detected as combed for a frame to be
+declared combed. See the combpel parameter description for more info.
+Possible values are any number that is a power of 2 starting at 4 and going up
+to 512.
+
+Default value is 16
.
+
+
+combpel
+The number of combed pixels inside any of the blocky by
+blockx size blocks on the frame for the frame to be detected as
+combed. While cthresh controls how "visible" the combing must be, this
+setting controls "how much" combing there must be in any localized area (a
+window defined by the blockx and blocky settings) on the
+frame. Minimum value is 0
and maximum is blocky x blockx
(at
+which point no frames will ever be detected as combed). This setting is known
+as MI in TFM/VFM vocabulary.
+
+Default value is 80
.
+
+
+
+
+
9.31.1 p/c/n/u/b meaning# TOC
+
+
+
9.31.1.1 p/c/n# TOC
+
+
We assume the following telecined stream:
+
+
+
Top fields: 1 2 2 3 4
+Bottom fields: 1 2 3 4 4
+
+
+
The numbers correspond to the progressive frame the fields relate to. Here, the
+first two frames are progressive, the 3rd and 4th are combed, and so on.
+
+
When fieldmatch
is configured to run a matching from bottom
+(field =bottom ) this is how this input stream get transformed:
+
+
+
Input stream:
+ T 1 2 2 3 4
+ B 1 2 3 4 4 <-- matching reference
+
+Matches: c c n n c
+
+Output stream:
+ T 1 2 3 4 4
+ B 1 2 3 4 4
+
+
+
As a result of the field matching, we can see that some frames get duplicated.
+To perform a complete inverse telecine, you need to rely on a decimation filter
+after this operation. See for instance the decimate filter.
+
+
The same operation now matching from top fields (field =top )
+looks like this:
+
+
+
Input stream:
+ T 1 2 2 3 4 <-- matching reference
+ B 1 2 3 4 4
+
+Matches: c c p p c
+
+Output stream:
+ T 1 2 2 3 4
+ B 1 2 2 3 4
+
+
+
In these examples, we can see what p , c and n mean;
+basically, they refer to the frame and field of the opposite parity:
+
+
+ p matches the field of the opposite parity in the previous frame
+ c matches the field of the opposite parity in the current frame
+ n matches the field of the opposite parity in the next frame
+
+
+
+
9.31.1.2 u/b# TOC
+
+
The u and b matching are a bit special in the sense that they match
+from the opposite parity flag. In the following examples, we assume that we are
+currently matching the 2nd frame (Top:2, bottom:2). According to the match, a
+’x’ is placed above and below each matched fields.
+
+
With bottom matching (field =bottom ):
+
+
Match: c p n b u
+
+ x x x x x
+ Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
+ Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
+ x x x x x
+
+Output frames:
+ 2 1 2 2 2
+ 2 2 2 1 3
+
+
+
With top matching (field =top ):
+
+
Match: c p n b u
+
+ x x x x x
+ Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
+ Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
+ x x x x x
+
+Output frames:
+ 2 2 2 1 2
+ 2 1 3 2 2
+
+
+
+
9.31.2 Examples# TOC
+
+
Simple IVTC of a top field first telecined stream:
+
+
fieldmatch=order=tff:combmatch=none, decimate
+
+
+
Advanced IVTC, with fallback on yadif for still combed frames:
+
+
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
+
+
+
+
9.32 fieldorder# TOC
+
+
Transform the field order of the input video.
+
+
It accepts the following parameters:
+
+
+order
+The output field order. Valid values are tff for top field first or bff
+for bottom field first.
+
+
+
+
The default value is ‘tff ’.
+
+
The transformation is done by shifting the picture content up or down
+by one line, and filling the remaining line with appropriate picture content.
+This method is consistent with most broadcast field order converters.
+
+
If the input video is not flagged as being interlaced, or it is already
+flagged as being of the required output field order, then this filter does
+not alter the incoming video.
+
+
It is very useful when converting to or from PAL DV material,
+which is bottom field first.
+
+
For example:
+
+
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
+
+
+
+
9.33 fifo# TOC
+
+
Buffer input images and send them when they are requested.
+
+
It is mainly useful when auto-inserted by the libavfilter
+framework.
+
+
It does not take parameters.
+
+
+
9.34 format# TOC
+
+
Convert the input video to one of the specified pixel formats.
+Libavfilter will try to pick one that is suitable as input to
+the next filter.
+
+
It accepts the following parameters:
+
+pix_fmts
+A ’|’-separated list of pixel format names, such as
+"pix_fmts=yuv420p|monow|rgb24".
+
+
+
+
+
+
9.34.1 Examples# TOC
+
+
+
+
+
9.35 fps# TOC
+
+
Convert the video to specified constant frame rate by duplicating or dropping
+frames as necessary.
+
+
It accepts the following parameters:
+
+fps
+The desired output frame rate. The default is 25
.
+
+
+round
+Rounding method.
+
+Possible values are:
+
+zero
+zero round towards 0
+
+inf
+round away from 0
+
+down
+round towards -infinity
+
+up
+round towards +infinity
+
+near
+round to nearest
+
+
+The default is near
.
+
+
+start_time
+Assume the first PTS should be the given value, in seconds. This allows for
+padding/trimming at the start of stream. By default, no assumption is made
+about the first frame’s expected PTS, so no padding or trimming is done.
+For example, this could be set to 0 to pad the beginning with duplicates of
+the first frame if a video stream starts after the audio stream or to trim any
+frames with a negative PTS.
+
+
+
+
+
Alternatively, the options can be specified as a flat string:
+fps [:round ].
+
+
See also the setpts filter.
+
+
+
9.35.1 Examples# TOC
+
+
+ A typical usage in order to set the fps to 25:
+
+
+ Sets the fps to 24, using abbreviation and rounding method to round to nearest:
+
+
fps=fps=film:round=near
+
+
+
+
+
9.36 framepack# TOC
+
+
Pack two different video streams into a stereoscopic video, setting proper
+metadata on supported codecs. The two views should have the same size and
+framerate and processing will stop when the shorter video ends. Please note
+that you may conveniently adjust view properties with the scale and
+fps filters.
+
+
It accepts the following parameters:
+
+format
+The desired packing format. Supported values are:
+
+
+sbs
+The views are next to each other (default).
+
+
+tab
+The views are on top of each other.
+
+
+lines
+The views are packed by line.
+
+
+columns
+The views are packed by column.
+
+
+frameseq
+The views are temporally interleaved.
+
+
+
+
+
+
+
+
Some examples:
+
+
+
# Convert left and right views into a frame-sequential video
+ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
+
+# Convert views into a side-by-side video with the same output resolution as the input
+ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
+
+
+
+
9.37 framestep# TOC
+
+
Select one frame every N-th frame.
+
+
This filter accepts the following option:
+
+step
+Select frame after every step
frames.
+Allowed values are positive integers higher than 0. Default value is 1
.
+
+
+
+
+
9.38 frei0r# TOC
+
+
Apply a frei0r effect to the input video.
+
+
To enable the compilation of this filter, you need to install the frei0r
+header and configure FFmpeg with --enable-frei0r
.
+
+
It accepts the following parameters:
+
+
+filter_name
+The name of the frei0r effect to load. If the environment variable
+FREI0R_PATH
is defined, the frei0r effect is searched for in each of the
+directories specified by the colon-separated list in FREIOR_PATH
.
+Otherwise, the standard frei0r paths are searched, in this order:
+HOME/.frei0r-1/lib/ , /usr/local/lib/frei0r-1/ ,
+/usr/lib/frei0r-1/ .
+
+
+filter_params
+A ’|’-separated list of parameters to pass to the frei0r effect.
+
+
+
+
+
A frei0r effect parameter can be a boolean (its value is either
+"y" or "n"), a double, a color (specified as
+R /G /B , where R , G , and B are floating point
+numbers between 0.0 and 1.0, inclusive) or by a color description specified in the "Color"
+section in the ffmpeg-utils manual), a position (specified as X /Y , where
+X and Y are floating point numbers) and/or a string.
+
+
The number and types of parameters depend on the loaded effect. If an
+effect parameter is not specified, the default value is set.
+
+
+
9.38.1 Examples# TOC
+
+
+ Apply the distort0r effect, setting the first two double parameters:
+
+
frei0r=filter_name=distort0r:filter_params=0.5|0.01
+
+
+ Apply the colordistance effect, taking a color as the first parameter:
+
+
frei0r=colordistance:0.2/0.3/0.4
+frei0r=colordistance:violet
+frei0r=colordistance:0x112233
+
+
+ Apply the perspective effect, specifying the top left and top right image
+positions:
+
+
frei0r=perspective:0.2/0.2|0.8/0.2
+
+
+
+
For more information, see
+http://frei0r.dyne.org
+
+
+
9.39 fspp# TOC
+
+
Apply fast and simple postprocessing. It is a faster version of spp .
+
+
It splits (I)DCT into horizontal/vertical passes. Unlike the simple post-
+processing filter, one of them is performed once per block, not per pixel.
+This allows for much higher speed.
+
+
The filter accepts the following options:
+
+
+quality
+Set quality. This option defines the number of levels for averaging. It accepts
+an integer in the range 4-5. Default value is 4
.
+
+
+qp
+Force a constant quantization parameter. It accepts an integer in range 0-63.
+If not set, the filter will use the QP from the video stream (if available).
+
+
+strength
+Set filter strength. It accepts an integer in range -15 to 32. Lower values mean
+more details but also more artifacts, while higher values make the image smoother
+but also blurrier. Default value is 0
− PSNR optimal.
+
+
+use_bframe_qp
+Enable the use of the QP from the B-Frames if set to 1
. Using this
+option may cause flicker since the B-Frames have often larger QP. Default is
+0
(not enabled).
+
+
+
+
+
+
9.40 geq# TOC
+
+
The filter accepts the following options:
+
+
+lum_expr, lum
+Set the luminance expression.
+
+cb_expr, cb
+Set the chrominance blue expression.
+
+cr_expr, cr
+Set the chrominance red expression.
+
+alpha_expr, a
+Set the alpha expression.
+
+red_expr, r
+Set the red expression.
+
+green_expr, g
+Set the green expression.
+
+blue_expr, b
+Set the blue expression.
+
+
+
+
The colorspace is selected according to the specified options. If one
+of the lum_expr , cb_expr , or cr_expr
+options is specified, the filter will automatically select a YCbCr
+colorspace. If one of the red_expr , green_expr , or
+blue_expr options is specified, it will select an RGB
+colorspace.
+
+
If one of the chrominance expression is not defined, it falls back on the other
+one. If no alpha expression is specified it will evaluate to opaque value.
+If none of chrominance expressions are specified, they will evaluate
+to the luminance expression.
+
+
The expressions can use the following variables and functions:
+
+
+N
+The sequential number of the filtered frame, starting from 0
.
+
+
+X
+Y
+The coordinates of the current sample.
+
+
+W
+H
+The width and height of the image.
+
+
+SW
+SH
+Width and height scale depending on the currently filtered plane. It is the
+ratio between the corresponding luma plane number of pixels and the current
+plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
+0.5,0.5
for chroma planes.
+
+
+T
+Time of the current frame, expressed in seconds.
+
+
+p(x, y)
+Return the value of the pixel at location (x ,y ) of the current
+plane.
+
+
+lum(x, y)
+Return the value of the pixel at location (x ,y ) of the luminance
+plane.
+
+
+cb(x, y)
+Return the value of the pixel at location (x ,y ) of the
+blue-difference chroma plane. Return 0 if there is no such plane.
+
+
+cr(x, y)
+Return the value of the pixel at location (x ,y ) of the
+red-difference chroma plane. Return 0 if there is no such plane.
+
+
+r(x, y)
+g(x, y)
+b(x, y)
+Return the value of the pixel at location (x ,y ) of the
+red/green/blue component. Return 0 if there is no such component.
+
+
+alpha(x, y)
+Return the value of the pixel at location (x ,y ) of the alpha
+plane. Return 0 if there is no such plane.
+
+
+
+
For functions, if x and y are outside the area, the value will be
+automatically clipped to the closer edge.
+
+
+
9.40.1 Examples# TOC
+
+
+ Flip the image horizontally:
+
+
+ Generate a bidimensional sine wave, with angle PI/3
and a
+wavelength of 100 pixels:
+
+
geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
+
+
+ Generate a fancy enigmatic moving light:
+
+
nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
+
+
+ Generate a quick emboss effect:
+
+
format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
+
+
+ Modify RGB components depending on pixel position:
+
+
geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
+
+
+ Create a radial gradient that is the same size as the input (also see
+the vignette filter):
+
+
geq=lum=255*gauss((X/W-0.5)*3)*gauss((Y/H-0.5)*3)/gauss(0)/gauss(0),format=gray
+
+
+ Create a linear gradient to use as a mask for another filter, then
+compose with overlay . In this example the video will gradually
+become more blurry from the top to the bottom of the y-axis as defined
+by the linear gradient:
+
+
ffmpeg -i input.mp4 -filter_complex "geq=lum=255*(Y/H),format=gray[grad];[0:v]boxblur=4[blur];[blur][grad]alphamerge[alpha];[0:v][alpha]overlay" output.mp4
+
+
+
+
+
9.41 gradfun# TOC
+
+
Fix the banding artifacts that are sometimes introduced into nearly flat
+regions by truncation to 8bit color depth.
+Interpolate the gradients that should go where the bands are, and
+dither them.
+
+
It is designed for playback only. Do not use it prior to
+lossy compression, because compression tends to lose the dither and
+bring back the bands.
+
+
It accepts the following parameters:
+
+
+strength
+The maximum amount by which the filter will change any one pixel. This is also
+the threshold for detecting nearly flat regions. Acceptable values range from
+.51 to 64; the default value is 1.2. Out-of-range values will be clipped to the
+valid range.
+
+
+radius
+The neighborhood to fit the gradient to. A larger radius makes for smoother
+gradients, but also prevents the filter from modifying the pixels near detailed
+regions. Acceptable values are 8-32; the default value is 16. Out-of-range
+values will be clipped to the valid range.
+
+
+
+
+
Alternatively, the options can be specified as a flat string:
+strength [:radius ]
+
+
+
9.41.1 Examples# TOC
+
+
+ Apply the filter with a 3.5
strength and radius of 8
:
+
+
+ Specify radius, omitting the strength (which will fall-back to the default
+value):
+
+
+
+
+
+
9.42 haldclut# TOC
+
+
Apply a Hald CLUT to a video stream.
+
+
First input is the video stream to process, and second one is the Hald CLUT.
+The Hald CLUT input can be a simple picture or a complete video stream.
+
+
The filter accepts the following options:
+
+
+shortest
+Force termination when the shortest input terminates. Default is 0
.
+
+repeatlast
+Continue applying the last CLUT after the end of the stream. A value of
+0
disable the filter after the last frame of the CLUT is reached.
+Default is 1
.
+
+
+
+
haldclut
also has the same interpolation options as lut3d (both
+filters share the same internals).
+
+
More information about the Hald CLUT can be found on Eskil Steenberg’s website
+(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html .
+
+
+
9.42.1 Workflow examples# TOC
+
+
+
9.42.1.1 Hald CLUT video stream# TOC
+
+
Generate an identity Hald CLUT stream altered with various effects:
+
+
ffmpeg -f lavfi -i haldclutsrc =8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
+
+
+
Note: make sure you use a lossless codec.
+
+
Then use it with haldclut
to apply it on some random stream:
+
+
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
+
+
+
The Hald CLUT will be applied to the 10 first seconds (duration of
+clut.nut ), then the latest picture of that CLUT stream will be applied
+to the remaining frames of the mandelbrot
stream.
+
+
+
9.42.1.2 Hald CLUT with preview# TOC
+
+
A Hald CLUT is supposed to be a squared image of Level*Level*Level
by
+Level*Level*Level
pixels. For a given Hald CLUT, FFmpeg will select the
+biggest possible square starting at the top left of the picture. The remaining
+padding pixels (bottom or right) will be ignored. This area can be used to add
+a preview of the Hald CLUT.
+
+
Typically, the following generated Hald CLUT will be supported by the
+haldclut
filter:
+
+
+
ffmpeg -f lavfi -i haldclutsrc =8 -vf "
+ pad=iw+320 [padded_clut];
+ smptebars=s=320x256, split [a][b];
+ [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
+ [main][b] overlay=W-320" -frames:v 1 clut.png
+
+
+
It contains the original and a preview of the effect of the CLUT: SMPTE color
+bars are displayed on the right-top, and below the same color bars processed by
+the color changes.
+
+
Then, the effect of this Hald CLUT can be visualized with:
+
+
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
+
+
+
+
9.43 hflip# TOC
+
+
Flip the input video horizontally.
+
+
For example, to horizontally flip the input video with ffmpeg
:
+
+
ffmpeg -i in.avi -vf "hflip" out.avi
+
+
+
+
9.44 histeq# TOC
+
This filter applies a global color histogram equalization on a
+per-frame basis.
+
+
It can be used to correct video that has a compressed range of pixel
+intensities. The filter redistributes the pixel intensities to
+equalize their distribution across the intensity range. It may be
+viewed as an "automatically adjusting contrast filter". This filter is
+useful only for correcting degraded or poorly captured source
+video.
+
+
The filter accepts the following options:
+
+
+strength
+Determine the amount of equalization to be applied. As the strength
+is reduced, the distribution of pixel intensities more-and-more
+approaches that of the input frame. The value must be a float number
+in the range [0,1] and defaults to 0.200.
+
+
+intensity
+Set the maximum intensity that can generated and scale the output
+values appropriately. The strength should be set as desired and then
+the intensity can be limited if needed to avoid washing-out. The value
+must be a float number in the range [0,1] and defaults to 0.210.
+
+
+antibanding
+Set the antibanding level. If enabled the filter will randomly vary
+the luminance of output pixels by a small amount to avoid banding of
+the histogram. Possible values are none
, weak
or
+strong
. It defaults to none
.
+
+
+
+
+
9.45 histogram# TOC
+
+
Compute and draw a color distribution histogram for the input video.
+
+
The computed histogram is a representation of the color component
+distribution in an image.
+
+
The filter accepts the following options:
+
+
+mode
+Set histogram mode.
+
+It accepts the following values:
+
+‘levels ’
+Standard histogram that displays the color components distribution in an
+image. Displays color graph for each color component. Shows distribution of
+the Y, U, V, A or R, G, B components, depending on input format, in the
+current frame. Below each graph a color component scale meter is shown.
+
+
+‘color ’
+Displays chroma values (U/V color placement) in a two dimensional
+graph (which is called a vectorscope). The brighter a pixel in the
+vectorscope, the more pixels of the input frame correspond to that pixel
+(i.e., more pixels have this chroma value). The V component is displayed on
+the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost
+side being V = 255. The U component is displayed on the vertical (Y) axis,
+with the top representing U = 0 and the bottom representing U = 255.
+
+The position of a white pixel in the graph corresponds to the chroma value of
+a pixel of the input clip. The graph can therefore be used to read the hue
+(color flavor) and the saturation (the dominance of the hue in the color). As
+the hue of a color changes, it moves around the square. At the center of the
+square the saturation is zero, which means that the corresponding pixel has no
+color. If the amount of a specific color is increased (while leaving the other
+colors unchanged) the saturation increases, and the indicator moves towards
+the edge of the square.
+
+
+‘color2 ’
+Chroma values in vectorscope, similar as color
but actual chroma values
+are displayed.
+
+
+‘waveform ’
+Per row/column color component graph. In row mode, the graph on the left side
+represents color component value 0 and the right side represents value = 255.
+In column mode, the top side represents color component value = 0 and bottom
+side represents value = 255.
+
+
+Default value is levels
.
+
+
+level_height
+Set height of level in levels
. Default value is 200
.
+Allowed range is [50, 2048].
+
+
+scale_height
+Set height of color scale in levels
. Default value is 12
.
+Allowed range is [0, 40].
+
+
+step
+Set step for waveform
mode. Smaller values are useful to find out how
+many values of the same luminance are distributed across input rows/columns.
+Default value is 10
. Allowed range is [1, 255].
+
+
+waveform_mode
+Set mode for waveform
. Can be either row
, or column
.
+Default is row
.
+
+
+waveform_mirror
+Set mirroring mode for waveform
. 0
means unmirrored, 1
+means mirrored. In mirrored mode, higher values will be represented on the left
+side for row
mode and at the top for column
mode. Default is
+0
(unmirrored).
+
+
+display_mode
+Set display mode for waveform
and levels
.
+It accepts the following values:
+
+‘parade ’
+Display separate graph for the color components side by side in
+row
waveform mode or one below the other in column
waveform mode
+for waveform
histogram mode. For levels
histogram mode,
+per color component graphs are placed below each other.
+
+Using this display mode in waveform
histogram mode makes it easy to
+spot color casts in the highlights and shadows of an image, by comparing the
+contours of the top and the bottom graphs of each waveform. Since whites,
+grays, and blacks are characterized by exactly equal amounts of red, green,
+and blue, neutral areas of the picture should display three waveforms of
+roughly equal width/height. If not, the correction is easy to perform by
+making level adjustments the three waveforms.
+
+
+‘overlay ’
+Presents information identical to that in the parade
, except
+that the graphs representing color components are superimposed directly
+over one another.
+
+This display mode in waveform
histogram mode makes it easier to spot
+relative differences or similarities in overlapping areas of the color
+components that are supposed to be identical, such as neutral whites, grays,
+or blacks.
+
+
+Default is parade
.
+
+
+levels_mode
+Set mode for levels
. Can be either linear
, or logarithmic
.
+Default is linear
.
+
+
+
+
+
9.45.1 Examples# TOC
+
+
+ Calculate and draw histogram:
+
+
ffplay -i input -vf histogram
+
+
+
+
+
+
9.46 hqdn3d# TOC
+
+
This is a high precision/quality 3d denoise filter. It aims to reduce
+image noise, producing smooth images and making still images really
+still. It should enhance compressibility.
+
+
It accepts the following optional parameters:
+
+
+luma_spatial
+A non-negative floating point number which specifies spatial luma strength.
+It defaults to 4.0.
+
+
+chroma_spatial
+A non-negative floating point number which specifies spatial chroma strength.
+It defaults to 3.0*luma_spatial /4.0.
+
+
+luma_tmp
+A floating point number which specifies luma temporal strength. It defaults to
+6.0*luma_spatial /4.0.
+
+
+chroma_tmp
+A floating point number which specifies chroma temporal strength. It defaults to
+luma_tmp *chroma_spatial /luma_spatial .
+
+
+
+
+
9.47 hqx# TOC
+
+
Apply a high-quality magnification filter designed for pixel art. This filter
+was originally created by Maxim Stepin.
+
+
It accepts the following option:
+
+
+n
+Set the scaling dimension: 2
for hq2x
, 3
for
+hq3x
and 4
for hq4x
.
+Default is 3
.
+
+
+
+
+
9.48 hue# TOC
+
+
Modify the hue and/or the saturation of the input.
+
+
It accepts the following parameters:
+
+
+h
+Specify the hue angle as a number of degrees. It accepts an expression,
+and defaults to "0".
+
+
+s
+Specify the saturation in the [-10,10] range. It accepts an expression and
+defaults to "1".
+
+
+H
+Specify the hue angle as a number of radians. It accepts an
+expression, and defaults to "0".
+
+
+b
+Specify the brightness in the [-10,10] range. It accepts an expression and
+defaults to "0".
+
+
+
+
h and H are mutually exclusive, and can’t be
+specified at the same time.
+
+
The b , h , H and s option values are
+expressions containing the following constants:
+
+
+n
+frame count of the input frame starting from 0
+
+
+pts
+presentation timestamp of the input frame expressed in time base units
+
+
+r
+frame rate of the input video, NAN if the input frame rate is unknown
+
+
+t
+timestamp expressed in seconds, NAN if the input timestamp is unknown
+
+
+tb
+time base of the input video
+
+
+
+
+
9.48.1 Examples# TOC
+
+
+
+
+
9.48.2 Commands# TOC
+
+
This filter supports the following commands:
+
+b
+s
+h
+H
+Modify the hue and/or the saturation and/or brightness of the input video.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+
+
+
+
9.49 idet# TOC
+
+
Detect video interlacing type.
+
+
This filter tries to detect if the input frames as interlaced, progressive,
+top or bottom field first. It will also try and detect fields that are
+repeated between adjacent frames (a sign of telecine).
+
+
Single frame detection considers only immediately adjacent frames when classifying each frame.
+Multiple frame detection incorporates the classification history of previous frames.
+
+
The filter will log these metadata values:
+
+
+single.current_frame
+Detected type of current frame using single-frame detection. One of:
+“tff” (top field first), “bff” (bottom field first),
+“progressive”, or “undetermined”
+
+
+single.tff
+Cumulative number of frames detected as top field first using single-frame detection.
+
+
+multiple.tff
+Cumulative number of frames detected as top field first using multiple-frame detection.
+
+
+single.bff
+Cumulative number of frames detected as bottom field first using single-frame detection.
+
+
+multiple.current_frame
+Detected type of current frame using multiple-frame detection. One of:
+“tff” (top field first), “bff” (bottom field first),
+“progressive”, or “undetermined”
+
+
+multiple.bff
+Cumulative number of frames detected as bottom field first using multiple-frame detection.
+
+
+single.progressive
+Cumulative number of frames detected as progressive using single-frame detection.
+
+
+multiple.progressive
+Cumulative number of frames detected as progressive using multiple-frame detection.
+
+
+single.undetermined
+Cumulative number of frames that could not be classified using single-frame detection.
+
+
+multiple.undetermined
+Cumulative number of frames that could not be classified using multiple-frame detection.
+
+
+repeated.current_frame
+Which field in the current frame is repeated from the last. One of “neither”, “top”, or “bottom”.
+
+
+repeated.neither
+Cumulative number of frames with no repeated field.
+
+
+repeated.top
+Cumulative number of frames with the top field repeated from the previous frame’s top field.
+
+
+repeated.bottom
+Cumulative number of frames with the bottom field repeated from the previous frame’s bottom field.
+
+
+
+
The filter accepts the following options:
+
+
+intl_thres
+Set interlacing threshold.
+
+prog_thres
+Set progressive threshold.
+
+repeat_thres
+Threshold for repeated field detection.
+
+half_life
+Number of frames after which a given frame’s contribution to the
+statistics is halved (i.e., it contributes only 0.5 to it’s
+classification). The default of 0 means that all frames seen are given
+full weight of 1.0 forever.
+
+analyze_interlaced_flag
+When this is not 0 then idet will use the specified number of frames to determine
+if the interlaced flag is accurate, it will not count undetermined frames.
+If the flag is found to be accurate it will be used without any further
+computations, if it is found to be inaccuarte it will be cleared without any
+further computations. This allows inserting the idet filter as a low computational
+method to clean up the interlaced flag
+
+
+
+
+
+
+
Deinterleave or interleave fields.
+
+
This filter allows one to process interlaced images fields without
+deinterlacing them. Deinterleaving splits the input frame into 2
+fields (so called half pictures). Odd lines are moved to the top
+half of the output image, even lines to the bottom half.
+You can process (filter) them independently and then re-interleave them.
+
+
The filter accepts the following options:
+
+
+luma_mode, l
+chroma_mode, c
+alpha_mode, a
+Available values for luma_mode , chroma_mode and
+alpha_mode are:
+
+
+‘none ’
+Do nothing.
+
+
+‘deinterleave, d ’
+Deinterleave fields, placing one above the other.
+
+
+‘interleave, i ’
+Interleave fields. Reverse the effect of deinterleaving.
+
+
+Default value is none
.
+
+
+luma_swap, ls
+chroma_swap, cs
+alpha_swap, as
+Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0
.
+
+
+
+
+
9.51 interlace# TOC
+
+
Simple interlacing filter from progressive contents. This interleaves upper (or
+lower) lines from odd frames with lower (or upper) lines from even frames,
+halving the frame rate and preserving image height.
+
+
+
Original Original New Frame
+ Frame 'j' Frame 'j+1' (tff)
+ ========== =========== ==================
+ Line 0 --------------------> Frame 'j' Line 0
+ Line 1 Line 1 ----> Frame 'j+1' Line 1
+ Line 2 ---------------------> Frame 'j' Line 2
+ Line 3 Line 3 ----> Frame 'j+1' Line 3
+ ... ... ...
+New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
+
+
+
It accepts the following optional parameters:
+
+
+scan
+This determines whether the interlaced frame is taken from the even
+(tff - default) or odd (bff) lines of the progressive frame.
+
+
+lowpass
+Enable (default) or disable the vertical lowpass filter to avoid twitter
+interlacing and reduce moire patterns.
+
+
+
+
+
9.52 kerndeint# TOC
+
+
Deinterlace input video by applying Donald Graft’s adaptive kernel
+deinterling. Work on interlaced parts of a video to produce
+progressive frames.
+
+
The description of the accepted parameters follows.
+
+
+thresh
+Set the threshold which affects the filter’s tolerance when
+determining if a pixel line must be processed. It must be an integer
+in the range [0,255] and defaults to 10. A value of 0 will result in
+applying the process on every pixels.
+
+
+map
+Paint pixels exceeding the threshold value to white if set to 1.
+Default is 0.
+
+
+order
+Set the fields order. Swap fields if set to 1, leave fields alone if
+0. Default is 0.
+
+
+sharp
+Enable additional sharpening if set to 1. Default is 0.
+
+
+twoway
+Enable twoway sharpening if set to 1. Default is 0.
+
+
+
+
+
9.52.1 Examples# TOC
+
+
+ Apply default values:
+
+
kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
+
+
+ Enable additional sharpening:
+
+
+ Paint processed pixels in white:
+
+
+
+
+
9.53 lenscorrection# TOC
+
+
Correct radial lens distortion
+
+
This filter can be used to correct for radial distortion as can result from the use
+of wide angle lenses, and thereby re-rectify the image. To find the right parameters
+one can use tools available for example as part of opencv or simply trial-and-error.
+To use opencv use the calibration sample (under samples/cpp) from the opencv sources
+and extract the k1 and k2 coefficients from the resulting matrix.
+
+
Note that effectively the same filter is available in the open-source tools Krita and
+Digikam from the KDE project.
+
+
In contrast to the vignette filter, which can also be used to compensate lens errors,
+this filter corrects the distortion of the image, whereas vignette corrects the
+brightness distribution, so you may want to use both filters together in certain
+cases, though you will have to take care of ordering, i.e. whether vignetting should
+be applied before or after lens correction.
+
+
+
9.53.1 Options# TOC
+
+
The filter accepts the following options:
+
+
+cx
+Relative x-coordinate of the focal point of the image, and thereby the center of the
+distortion. This value has a range [0,1] and is expressed as fractions of the image
+width.
+
+cy
+Relative y-coordinate of the focal point of the image, and thereby the center of the
+distortion. This value has a range [0,1] and is expressed as fractions of the image
+height.
+
+k1
+Coefficient of the quadratic correction term. 0.5 means no correction.
+
+k2
+Coefficient of the double quadratic correction term. 0.5 means no correction.
+
+
+
+
The formula that generates the correction is:
+
+
r_src = r_tgt * (1 + k1 * (r_tgt / r_0 )^2 + k2 * (r_tgt / r_0 )^4)
+
+
where r_0 is halve of the image diagonal and r_src and r_tgt are the
+distances from the focal point in the source and target images, respectively.
+
+
+
9.54 lut3d# TOC
+
+
Apply a 3D LUT to an input video.
+
+
The filter accepts the following options:
+
+
+file
+Set the 3D LUT file name.
+
+Currently supported formats:
+
+‘3dl ’
+AfterEffects
+
+‘cube ’
+Iridas
+
+‘dat ’
+DaVinci
+
+‘m3d ’
+Pandora
+
+
+
+interp
+Select interpolation mode.
+
+Available values are:
+
+
+‘nearest ’
+Use values from the nearest defined point.
+
+‘trilinear ’
+Interpolate values using the 8 points defining a cube.
+
+‘tetrahedral ’
+Interpolate values using a tetrahedron.
+
+
+
+
+
+
+
9.55 lut, lutrgb, lutyuv# TOC
+
+
Compute a look-up table for binding each pixel component input value
+to an output value, and apply it to the input video.
+
+
lutyuv applies a lookup table to a YUV input video, lutrgb
+to an RGB input video.
+
+
These filters accept the following parameters:
+
+c0
+set first pixel component expression
+
+c1
+set second pixel component expression
+
+c2
+set third pixel component expression
+
+c3
+set fourth pixel component expression, corresponds to the alpha component
+
+
+r
+set red component expression
+
+g
+set green component expression
+
+b
+set blue component expression
+
+a
+alpha component expression
+
+
+y
+set Y/luminance component expression
+
+u
+set U/Cb component expression
+
+v
+set V/Cr component expression
+
+
+
+
Each of them specifies the expression to use for computing the lookup table for
+the corresponding pixel component values.
+
+
The exact component associated to each of the c* options depends on the
+format in input.
+
+
The lut filter requires either YUV or RGB pixel formats in input,
+lutrgb requires RGB pixel formats in input, and lutyuv requires YUV.
+
+
The expressions can contain the following constants and functions:
+
+
+w
+h
+The input width and height.
+
+
+val
+The input value for the pixel component.
+
+
+clipval
+The input value, clipped to the minval -maxval range.
+
+
+maxval
+The maximum value for the pixel component.
+
+
+minval
+The minimum value for the pixel component.
+
+
+negval
+The negated value for the pixel component value, clipped to the
+minval -maxval range; it corresponds to the expression
+"maxval-clipval+minval".
+
+
+clip(val)
+The computed value in val , clipped to the
+minval -maxval range.
+
+
+gammaval(gamma)
+The computed gamma correction value of the pixel component value,
+clipped to the minval -maxval range. It corresponds to the
+expression
+"pow((clipval-minval)/(maxval-minval)\,gamma )*(maxval-minval)+minval"
+
+
+
+
+
All expressions default to "val".
+
+
+
9.55.1 Examples# TOC
+
+
+
+
+
9.56 mergeplanes# TOC
+
+
Merge color channel components from several video streams.
+
+
The filter accepts up to 4 input streams, and merge selected input
+planes to the output video.
+
+
This filter accepts the following options:
+
+mapping
+Set input to output plane mapping. Default is 0
.
+
+The mappings is specified as a bitmap. It should be specified as a
+hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the
+mapping for the first plane of the output stream. ’A’ sets the number of
+the input stream to use (from 0 to 3), and ’a’ the plane number of the
+corresponding input to use (from 0 to 3). The rest of the mappings is
+similar, ’Bb’ describes the mapping for the output stream second
+plane, ’Cc’ describes the mapping for the output stream third plane and
+’Dd’ describes the mapping for the output stream fourth plane.
+
+
+format
+Set output pixel format. Default is yuva444p
.
+
+
+
+
+
9.56.1 Examples# TOC
+
+
+ Merge three gray video streams of same width and height into single video stream:
+
+
[a0][a1][a2]mergeplanes=0x001020:yuv444p
+
+
+ Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream:
+
+
[a0][a1]mergeplanes=0x00010210:yuva444p
+
+
+ Swap Y and A plane in yuva444p stream:
+
+
format=yuva444p,mergeplanes=0x03010200:yuva444p
+
+
+ Swap U and V plane in yuv420p stream:
+
+
format=yuv420p,mergeplanes=0x000201:yuv420p
+
+
+ Cast a rgb24 clip to yuv444p:
+
+
format=rgb24,mergeplanes=0x000102:yuv444p
+
+
+
+
+
9.57 mcdeint# TOC
+
+
Apply motion-compensation deinterlacing.
+
+
It needs one field per frame as input and must thus be used together
+with yadif=1/3 or equivalent.
+
+
This filter accepts the following options:
+
+mode
+Set the deinterlacing mode.
+
+It accepts one of the following values:
+
+‘fast ’
+‘medium ’
+‘slow ’
+use iterative motion estimation
+
+‘extra_slow ’
+like ‘slow ’, but use multiple reference frames.
+
+
+Default value is ‘fast ’.
+
+
+parity
+Set the picture field parity assumed for the input video. It must be
+one of the following values:
+
+
+‘0, tff ’
+assume top field first
+
+‘1, bff ’
+assume bottom field first
+
+
+
+Default value is ‘bff ’.
+
+
+qp
+Set per-block quantization parameter (QP) used by the internal
+encoder.
+
+Higher values should result in a smoother motion vector field but less
+optimal individual vectors. Default value is 1.
+
+
+
+
+
+
+
Apply an MPlayer filter to the input video.
+
+
This filter provides a wrapper around some of the filters of
+MPlayer/MEncoder.
+
+
This wrapper is considered experimental. Some of the wrapped filters
+may not work properly and we may drop support for them, as they will
+be implemented natively into FFmpeg. Thus you should avoid
+depending on them when writing portable scripts.
+
+
The filter accepts the parameters:
+filter_name [:=]filter_params
+
+
filter_name is the name of a supported MPlayer filter,
+filter_params is a string containing the parameters accepted by
+the named filter.
+
+
The list of the currently supported filters follows:
+
+eq2
+eq
+ilpack
+softpulldown
+
+
+
The parameter syntax and behavior for the listed filters are the same
+of the corresponding MPlayer filters. For detailed instructions check
+the "VIDEO FILTERS" section in the MPlayer manual.
+
+
+
9.58.1 Examples# TOC
+
+
+ Adjust gamma, brightness, contrast:
+
+
+
+
See also mplayer(1), http://www.mplayerhq.hu/ .
+
+
+
9.59 mpdecimate# TOC
+
+
Drop frames that do not differ greatly from the previous frame in
+order to reduce frame rate.
+
+
The main use of this filter is for very-low-bitrate encoding
+(e.g. streaming over dialup modem), but it could in theory be used for
+fixing movies that were inverse-telecined incorrectly.
+
+
A description of the accepted options follows.
+
+
+max
+Set the maximum number of consecutive frames which can be dropped (if
+positive), or the minimum interval between dropped frames (if
+negative). If the value is 0, the frame is dropped unregarding the
+number of previous sequentially dropped frames.
+
+Default value is 0.
+
+
+hi
+lo
+frac
+Set the dropping threshold values.
+
+Values for hi and lo are for 8x8 pixel blocks and
+represent actual pixel value differences, so a threshold of 64
+corresponds to 1 unit of difference for each pixel, or the same spread
+out differently over the block.
+
+A frame is a candidate for dropping if no 8x8 blocks differ by more
+than a threshold of hi , and if no more than frac blocks (1
+meaning the whole image) differ by more than a threshold of lo .
+
+Default value for hi is 64*12, default value for lo is
+64*5, and default value for frac is 0.33.
+
+
+
+
+
+
9.60 negate# TOC
+
+
Negate input video.
+
+
It accepts an integer in input; if non-zero it negates the
+alpha component (if available). The default value in input is 0.
+
+
+
9.61 noformat# TOC
+
+
Force libavfilter not to use any of the specified pixel formats for the
+input to the next filter.
+
+
It accepts the following parameters:
+
+pix_fmts
+A ’|’-separated list of pixel format names, such as
+apix_fmts=yuv420p|monow|rgb24".
+
+
+
+
+
+
9.61.1 Examples# TOC
+
+
+ Force libavfilter to use a format different from yuv420p for the
+input to the vflip filter:
+
+
noformat=pix_fmts=yuv420p,vflip
+
+
+ Convert the input video to any of the formats not contained in the list:
+
+
noformat=yuv420p|yuv444p|yuv410p
+
+
+
+
+
9.62 noise# TOC
+
+
Add noise on video input frame.
+
+
The filter accepts the following options:
+
+
+all_seed
+c0_seed
+c1_seed
+c2_seed
+c3_seed
+Set noise seed for specific pixel component or all pixel components in case
+of all_seed . Default value is 123457
.
+
+
+all_strength, alls
+c0_strength, c0s
+c1_strength, c1s
+c2_strength, c2s
+c3_strength, c3s
+Set noise strength for specific pixel component or all pixel components in case
+all_strength . Default value is 0
. Allowed range is [0, 100].
+
+
+all_flags, allf
+c0_flags, c0f
+c1_flags, c1f
+c2_flags, c2f
+c3_flags, c3f
+Set pixel component flags or set flags for all components if all_flags .
+Available values for component flags are:
+
+‘a ’
+averaged temporal noise (smoother)
+
+‘p ’
+mix random noise with a (semi)regular pattern
+
+‘t ’
+temporal noise (noise pattern changes between frames)
+
+‘u ’
+uniform noise (gaussian otherwise)
+
+
+
+
+
+
+
9.62.1 Examples# TOC
+
+
Add temporal and uniform noise to input video:
+
+
noise=alls=20:allf=t+u
+
+
+
+
9.63 null# TOC
+
+
Pass the video source unchanged to the output.
+
+
+
9.64 ocv# TOC
+
+
Apply a video transform using libopencv.
+
+
To enable this filter, install the libopencv library and headers and
+configure FFmpeg with --enable-libopencv
.
+
+
It accepts the following parameters:
+
+
+filter_name
+The name of the libopencv filter to apply.
+
+
+filter_params
+The parameters to pass to the libopencv filter. If not specified, the default
+values are assumed.
+
+
+
+
+
Refer to the official libopencv documentation for more precise
+information:
+http://docs.opencv.org/master/modules/imgproc/doc/filtering.html
+
+
Several libopencv filters are supported; see the following subsections.
+
+
+
9.64.1 dilate# TOC
+
+
Dilate an image by using a specific structuring element.
+It corresponds to the libopencv function cvDilate
.
+
+
It accepts the parameters: struct_el |nb_iterations .
+
+
struct_el represents a structuring element, and has the syntax:
+cols xrows +anchor_x xanchor_y /shape
+
+
cols and rows represent the number of columns and rows of
+the structuring element, anchor_x and anchor_y the anchor
+point, and shape the shape for the structuring element. shape
+must be "rect", "cross", "ellipse", or "custom".
+
+
If the value for shape is "custom", it must be followed by a
+string of the form "=filename ". The file with name
+filename is assumed to represent a binary image, with each
+printable character corresponding to a bright pixel. When a custom
+shape is used, cols and rows are ignored, the number
+or columns and rows of the read file are assumed instead.
+
+
The default value for struct_el is "3x3+0x0/rect".
+
+
nb_iterations specifies the number of times the transform is
+applied to the image, and defaults to 1.
+
+
Some examples:
+
+
# Use the default values
+ocv=dilate
+
+# Dilate using a structuring element with a 5x5 cross, iterating two times
+ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
+
+# Read the shape from the file diamond.shape, iterating two times.
+# The file diamond.shape may contain a pattern of characters like this
+# *
+# ***
+# *****
+# ***
+# *
+# The specified columns and rows are ignored
+# but the anchor point coordinates are not
+ocv=dilate:0x0+2x2/custom=diamond.shape|2
+
+
+
+
9.64.2 erode# TOC
+
+
Erode an image by using a specific structuring element.
+It corresponds to the libopencv function cvErode
.
+
+
It accepts the parameters: struct_el :nb_iterations ,
+with the same syntax and semantics as the dilate filter.
+
+
+
9.64.3 smooth# TOC
+
+
Smooth the input video.
+
+
The filter takes the following parameters:
+type |param1 |param2 |param3 |param4 .
+
+
type is the type of smooth filter to apply, and must be one of
+the following values: "blur", "blur_no_scale", "median", "gaussian",
+or "bilateral". The default value is "gaussian".
+
+
The meaning of param1 , param2 , param3 , and param4
+depend on the smooth type. param1 and
+param2 accept integer positive values or 0. param3 and
+param4 accept floating point values.
+
+
The default value for param1 is 3. The default value for the
+other parameters is 0.
+
+
These parameters correspond to the parameters assigned to the
+libopencv function cvSmooth
.
+
+
+
9.65 overlay# TOC
+
+
Overlay one video on top of another.
+
+
It takes two inputs and has one output. The first input is the "main"
+video on which the second input is overlaid.
+
+
It accepts the following parameters:
+
+
A description of the accepted options follows.
+
+
+x
+y
+Set the expression for the x and y coordinates of the overlaid video
+on the main video. Default value is "0" for both expressions. In case
+the expression is invalid, it is set to a huge value (meaning that the
+overlay will not be displayed within the output visible area).
+
+
+eof_action
+The action to take when EOF is encountered on the secondary input; it accepts
+one of the following values:
+
+
+repeat
+Repeat the last frame (the default).
+
+endall
+End both streams.
+
+pass
+Pass the main input through.
+
+
+
+
+eval
+Set when the expressions for x , and y are evaluated.
+
+It accepts the following values:
+
+‘init ’
+only evaluate expressions once during the filter initialization or
+when a command is processed
+
+
+‘frame ’
+evaluate expressions for each incoming frame
+
+
+
+Default value is ‘frame ’.
+
+
+shortest
+If set to 1, force the output to terminate when the shortest input
+terminates. Default value is 0.
+
+
+format
+Set the format for the output video.
+
+It accepts the following values:
+
+‘yuv420 ’
+force YUV420 output
+
+
+‘yuv422 ’
+force YUV422 output
+
+
+‘yuv444 ’
+force YUV444 output
+
+
+‘rgb ’
+force RGB output
+
+
+
+Default value is ‘yuv420 ’.
+
+
+rgb (deprecated)
+If set to 1, force the filter to accept inputs in the RGB
+color space. Default value is 0. This option is deprecated, use
+format instead.
+
+
+repeatlast
+If set to 1, force the filter to draw the last overlay frame over the
+main input until the end of the stream. A value of 0 disables this
+behavior. Default value is 1.
+
+
+
+
The x , and y expressions can contain the following
+parameters.
+
+
+main_w, W
+main_h, H
+The main input width and height.
+
+
+overlay_w, w
+overlay_h, h
+The overlay input width and height.
+
+
+x
+y
+The computed values for x and y . They are evaluated for
+each new frame.
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values of the output
+format. For example for the pixel format "yuv422p" hsub is 2 and
+vsub is 1.
+
+
+n
+the number of input frame, starting from 0
+
+
+pos
+the position in the file of the input frame, NAN if unknown
+
+
+t
+The timestamp, expressed in seconds. It’s NAN if the input timestamp is unknown.
+
+
+
+
+
Note that the n , pos , t variables are available only
+when evaluation is done per frame , and will evaluate to NAN
+when eval is set to ‘init ’.
+
+
Be aware that frames are taken from each input video in timestamp
+order, hence, if their initial timestamps differ, it is a good idea
+to pass the two inputs through a setpts=PTS-STARTPTS filter to
+have them begin in the same zero timestamp, as the example for
+the movie filter does.
+
+
You can chain together more overlays but you should test the
+efficiency of such approach.
+
+
+
9.65.1 Commands# TOC
+
+
This filter supports the following commands:
+
+x
+y
+Modify the x and y of the overlay input.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+
+
+
+
9.65.2 Examples# TOC
+
+
+
+
+
9.66 owdenoise# TOC
+
+
Apply Overcomplete Wavelet denoiser.
+
+
The filter accepts the following options:
+
+
+depth
+Set depth.
+
+Larger depth values will denoise lower frequency components more, but
+slow down filtering.
+
+Must be an int in the range 8-16, default is 8
.
+
+
+luma_strength, ls
+Set luma strength.
+
+Must be a double value in the range 0-1000, default is 1.0
.
+
+
+chroma_strength, cs
+Set chroma strength.
+
+Must be a double value in the range 0-1000, default is 1.0
.
+
+
+
+
+
9.67 pad# TOC
+
+
Add paddings to the input image, and place the original input at the
+provided x , y coordinates.
+
+
It accepts the following parameters:
+
+
+width, w
+height, h
+Specify an expression for the size of the output image with the
+paddings added. If the value for width or height is 0, the
+corresponding input size is used for the output.
+
+The width expression can reference the value set by the
+height expression, and vice versa.
+
+The default value of width and height is 0.
+
+
+x
+y
+Specify the offsets to place the input image at within the padded area,
+with respect to the top/left border of the output image.
+
+The x expression can reference the value set by the y
+expression, and vice versa.
+
+The default value of x and y is 0.
+
+
+color
+Specify the color of the padded area. For the syntax of this option,
+check the "Color" section in the ffmpeg-utils manual.
+
+The default value of color is "black".
+
+
+
+
The value for the width , height , x , and y
+options are expressions containing the following constants:
+
+
+in_w
+in_h
+The input video width and height.
+
+
+iw
+ih
+These are the same as in_w and in_h .
+
+
+out_w
+out_h
+The output width and height (the size of the padded area), as
+specified by the width and height expressions.
+
+
+ow
+oh
+These are the same as out_w and out_h .
+
+
+x
+y
+The x and y offsets as specified by the x and y
+expressions, or NAN if not yet specified.
+
+
+a
+same as iw / ih
+
+
+sar
+input sample aspect ratio
+
+
+dar
+input display aspect ratio, it is the same as (iw / ih ) * sar
+
+
+hsub
+vsub
+The horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+
+
+
9.67.1 Examples# TOC
+
+
+
+
+
9.68 perspective# TOC
+
+
Correct perspective of video not recorded perpendicular to the screen.
+
+
A description of the accepted parameters follows.
+
+
+x0
+y0
+x1
+y1
+x2
+y2
+x3
+y3
+Set coordinates expression for top left, top right, bottom left and bottom right corners.
+Default values are 0:0:W:0:0:H:W:H
with which perspective will remain unchanged.
+If the sense
option is set to source
, then the specified points will be sent
+to the corners of the destination. If the sense
option is set to destination
,
+then the corners of the source will be sent to the specified coordinates.
+
+The expressions can use the following variables:
+
+
+W
+H
+the width and height of video frame.
+
+
+
+
+interpolation
+Set interpolation for perspective correction.
+
+It accepts the following values:
+
+‘linear ’
+‘cubic ’
+
+
+Default value is ‘linear ’.
+
+
+sense
+Set interpretation of coordinate options.
+
+It accepts the following values:
+
+‘0, source ’
+
+Send point in the source specified by the given coordinates to
+the corners of the destination.
+
+
+‘1, destination ’
+
+Send the corners of the source to the point in the destination specified
+by the given coordinates.
+
+Default value is ‘source ’.
+
+
+
+
+
+
+
9.69 phase# TOC
+
+
Delay interlaced video by one field time so that the field order changes.
+
+
The intended use is to fix PAL movies that have been captured with the
+opposite field order to the film-to-video transfer.
+
+
A description of the accepted parameters follows.
+
+
+mode
+Set phase mode.
+
+It accepts the following values:
+
+‘t ’
+Capture field order top-first, transfer bottom-first.
+Filter will delay the bottom field.
+
+
+‘b ’
+Capture field order bottom-first, transfer top-first.
+Filter will delay the top field.
+
+
+‘p ’
+Capture and transfer with the same field order. This mode only exists
+for the documentation of the other options to refer to, but if you
+actually select it, the filter will faithfully do nothing.
+
+
+‘a ’
+Capture field order determined automatically by field flags, transfer
+opposite.
+Filter selects among ‘t ’ and ‘b ’ modes on a frame by frame
+basis using field flags. If no field information is available,
+then this works just like ‘u ’.
+
+
+‘u ’
+Capture unknown or varying, transfer opposite.
+Filter selects among ‘t ’ and ‘b ’ on a frame by frame basis by
+analyzing the images and selecting the alternative that produces best
+match between the fields.
+
+
+‘T ’
+Capture top-first, transfer unknown or varying.
+Filter selects among ‘t ’ and ‘p ’ using image analysis.
+
+
+‘B ’
+Capture bottom-first, transfer unknown or varying.
+Filter selects among ‘b ’ and ‘p ’ using image analysis.
+
+
+‘A ’
+Capture determined by field flags, transfer unknown or varying.
+Filter selects among ‘t ’, ‘b ’ and ‘p ’ using field flags and
+image analysis. If no field information is available, then this works just
+like ‘U ’. This is the default mode.
+
+
+‘U ’
+Both capture and transfer unknown or varying.
+Filter selects among ‘t ’, ‘b ’ and ‘p ’ using image analysis only.
+
+
+
+
+
+
+
9.70 pixdesctest# TOC
+
+
Pixel format descriptor test filter, mainly useful for internal
+testing. The output video should be equal to the input video.
+
+
For example:
+
+
format=monow, pixdesctest
+
+
+
can be used to test the monowhite pixel format descriptor definition.
+
+
+
+
+
Enable the specified chain of postprocessing subfilters using libpostproc. This
+library should be automatically selected with a GPL build (--enable-gpl
).
+Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’.
+Each subfilter and some options have a short and a long name that can be used
+interchangeably, i.e. dr/dering are the same.
+
+
The filters accept the following options:
+
+
+subfilters
+Set postprocessing subfilters string.
+
+
+
+
All subfilters share common options to determine their scope:
+
+
+a/autoq
+Honor the quality commands for this subfilter.
+
+
+c/chrom
+Do chrominance filtering, too (default).
+
+
+y/nochrom
+Do luminance filtering only (no chrominance).
+
+
+n/noluma
+Do chrominance filtering only (no luminance).
+
+
+
+
These options can be appended after the subfilter name, separated by a ’|’.
+
+
Available subfilters are:
+
+
+hb/hdeblock[|difference[|flatness]]
+Horizontal deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+vb/vdeblock[|difference[|flatness]]
+Vertical deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+ha/hadeblock[|difference[|flatness]]
+Accurate horizontal deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+va/vadeblock[|difference[|flatness]]
+Accurate vertical deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+
+
The horizontal and vertical deblocking filters share the difference and
+flatness values so you cannot set different horizontal and vertical
+thresholds.
+
+
+h1/x1hdeblock
+Experimental horizontal deblocking filter
+
+
+v1/x1vdeblock
+Experimental vertical deblocking filter
+
+
+dr/dering
+Deringing filter
+
+
+tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
+
+threshold1
+larger -> stronger filtering
+
+threshold2
+larger -> stronger filtering
+
+threshold3
+larger -> stronger filtering
+
+
+
+
+al/autolevels[:f/fullyrange], automatic brightness / contrast correction
+
+f/fullyrange
+Stretch luminance to 0-255
.
+
+
+
+
+lb/linblenddeint
+Linear blend deinterlacing filter that deinterlaces the given block by
+filtering all lines with a (1 2 1)
filter.
+
+
+li/linipoldeint
+Linear interpolating deinterlacing filter that deinterlaces the given block by
+linearly interpolating every second line.
+
+
+ci/cubicipoldeint
+Cubic interpolating deinterlacing filter deinterlaces the given block by
+cubically interpolating every second line.
+
+
+md/mediandeint
+Median deinterlacing filter that deinterlaces the given block by applying a
+median filter to every second line.
+
+
+fd/ffmpegdeint
+FFmpeg deinterlacing filter that deinterlaces the given block by filtering every
+second line with a (-1 4 2 4 -1)
filter.
+
+
+l5/lowpass5
+Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given
+block by filtering all lines with a (-1 2 6 2 -1)
filter.
+
+
+fq/forceQuant[|quantizer]
+Overrides the quantizer table from the input with the constant quantizer you
+specify.
+
+quantizer
+Quantizer to use
+
+
+
+
+de/default
+Default pp filter combination (hb|a,vb|a,dr|a
)
+
+
+fa/fast
+Fast pp filter combination (h1|a,v1|a,dr|a
)
+
+
+ac
+High quality pp filter combination (ha|a|128|7,va|a,dr|a
)
+
+
+
+
+
9.71.1 Examples# TOC
+
+
+ Apply horizontal and vertical deblocking, deringing and automatic
+brightness/contrast:
+
+
+ Apply default filters without brightness/contrast correction:
+
+
+ Apply default filters and temporal denoiser:
+
+
pp=default/tmpnoise|1|2|3
+
+
+ Apply deblocking on luminance only, and switch vertical deblocking on or off
+automatically depending on available CPU time:
+
+
+
+
+
9.72 pp7# TOC
+
Apply Postprocessing filter 7. It is variant of the spp filter,
+similar to spp = 6 with 7 point DCT, where only the center sample is
+used after IDCT.
+
+
The filter accepts the following options:
+
+
+qp
+Force a constant quantization parameter. It accepts an integer in range
+0 to 63. If not set, the filter will use the QP from the video stream
+(if available).
+
+
+mode
+Set thresholding mode. Available modes are:
+
+
+‘hard ’
+Set hard thresholding.
+
+‘soft ’
+Set soft thresholding (better de-ringing effect, but likely blurrier).
+
+‘medium ’
+Set medium thresholding (good results, default).
+
+
+
+
+
+
+
9.73 psnr# TOC
+
+
Obtain the average, maximum and minimum PSNR (Peak Signal to Noise
+Ratio) between two input videos.
+
+
This filter takes in input two input videos, the first input is
+considered the "main" source and is passed unchanged to the
+output. The second input is used as a "reference" video for computing
+the PSNR.
+
+
Both video inputs must have the same resolution and pixel format for
+this filter to work correctly. Also it assumes that both inputs
+have the same number of frames, which are compared one by one.
+
+
The obtained average PSNR is printed through the logging system.
+
+
The filter stores the accumulated MSE (mean squared error) of each
+frame, and at the end of the processing it is averaged across all frames
+equally, and the following formula is applied to obtain the PSNR:
+
+
+
PSNR = 10*log10(MAX^2/MSE)
+
+
+
Where MAX is the average of the maximum values of each component of the
+image.
+
+
The description of the accepted parameters follows.
+
+
+stats_file, f
+If specified the filter will use the named file to save the PSNR of
+each individual frame.
+
+
+
+
The file printed if stats_file is selected, contains a sequence of
+key/value pairs of the form key :value for each compared
+couple of frames.
+
+
A description of each shown parameter follows:
+
+
+n
+sequential number of the input frame, starting from 1
+
+
+mse_avg
+Mean Square Error pixel-by-pixel average difference of the compared
+frames, averaged over all the image components.
+
+
+mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
+Mean Square Error pixel-by-pixel average difference of the compared
+frames for the component specified by the suffix.
+
+
+psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
+Peak Signal to Noise ratio of the compared frames for the component
+specified by the suffix.
+
+
+
+
For example:
+
+
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
+[main][ref] psnr="stats_file=stats.log" [out]
+
+
+
On this example the input file being processed is compared with the
+reference file ref_movie.mpg . The PSNR of each individual frame
+is stored in stats.log .
+
+
+
9.74 pullup# TOC
+
+
Pulldown reversal (inverse telecine) filter, capable of handling mixed
+hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive
+content.
+
+
The pullup filter is designed to take advantage of future context in making
+its decisions. This filter is stateless in the sense that it does not lock
+onto a pattern to follow, but it instead looks forward to the following
+fields in order to identify matches and rebuild progressive frames.
+
+
To produce content with an even framerate, insert the fps filter after
+pullup, use fps=24000/1001
if the input frame rate is 29.97fps,
+fps=24
for 30fps and the (rare) telecined 25fps input.
+
+
The filter accepts the following options:
+
+
+jl
+jr
+jt
+jb
+These options set the amount of "junk" to ignore at the left, right, top, and
+bottom of the image, respectively. Left and right are in units of 8 pixels,
+while top and bottom are in units of 2 lines.
+The default is 8 pixels on each side.
+
+
+sb
+Set the strict breaks. Setting this option to 1 will reduce the chances of
+filter generating an occasional mismatched frame, but it may also cause an
+excessive number of frames to be dropped during high motion sequences.
+Conversely, setting it to -1 will make filter match fields more easily.
+This may help processing of video where there is slight blurring between
+the fields, but may also cause there to be interlaced frames in the output.
+Default value is 0
.
+
+
+mp
+Set the metric plane to use. It accepts the following values:
+
+‘l ’
+Use luma plane.
+
+
+‘u ’
+Use chroma blue plane.
+
+
+‘v ’
+Use chroma red plane.
+
+
+
+This option may be set to use chroma plane instead of the default luma plane
+for doing filter’s computations. This may improve accuracy on very clean
+source material, but more likely will decrease accuracy, especially if there
+is chroma noise (rainbow effect) or any grayscale video.
+The main purpose of setting mp to a chroma plane is to reduce CPU
+load and make pullup usable in realtime on slow machines.
+
+
+
+
For best results (without duplicated frames in the output file) it is
+necessary to change the output frame rate. For example, to inverse
+telecine NTSC input:
+
+
ffmpeg -i input -vf pullup -r 24000/1001 ...
+
+
+
+
+
+
Change video quantization parameters (QP).
+
+
The filter accepts the following option:
+
+
+qp
+Set expression for quantization parameter.
+
+
+
+
The expression is evaluated through the eval API and can contain, among others,
+the following constants:
+
+
+known
+1 if index is not 129, 0 otherwise.
+
+
+qp
+Sequentional index starting from -129 to 128.
+
+
+
+
+
9.75.1 Examples# TOC
+
+
+ Some equation like:
+
+
+
+
+
9.76 removelogo# TOC
+
+
Suppress a TV station logo, using an image file to determine which
+pixels comprise the logo. It works by filling in the pixels that
+comprise the logo with neighboring pixels.
+
+
The filter accepts the following options:
+
+
+filename, f
+Set the filter bitmap file, which can be any image format supported by
+libavformat. The width and height of the image file must match those of the
+video stream being processed.
+
+
+
+
Pixels in the provided bitmap image with a value of zero are not
+considered part of the logo, non-zero pixels are considered part of
+the logo. If you use white (255) for the logo and black (0) for the
+rest, you will be safe. For making the filter bitmap, it is
+recommended to take a screen capture of a black frame with the logo
+visible, and then using a threshold filter followed by the erode
+filter once or twice.
+
+
If needed, little splotches can be fixed manually. Remember that if
+logo pixels are not covered, the filter quality will be much
+reduced. Marking too many pixels as part of the logo does not hurt as
+much, but it will increase the amount of blurring needed to cover over
+the image and will destroy more information than necessary, and extra
+pixels will slow things down on a large logo.
+
+
+
9.77 rotate# TOC
+
+
Rotate video by an arbitrary angle expressed in radians.
+
+
The filter accepts the following options:
+
+
A description of the optional parameters follows.
+
+angle, a
+Set an expression for the angle by which to rotate the input video
+clockwise, expressed as a number of radians. A negative value will
+result in a counter-clockwise rotation. By default it is set to "0".
+
+This expression is evaluated for each frame.
+
+
+out_w, ow
+Set the output width expression, default value is "iw".
+This expression is evaluated just once during configuration.
+
+
+out_h, oh
+Set the output height expression, default value is "ih".
+This expression is evaluated just once during configuration.
+
+
+bilinear
+Enable bilinear interpolation if set to 1, a value of 0 disables
+it. Default value is 1.
+
+
+fillcolor, c
+Set the color used to fill the output area not covered by the rotated
+image. For the general syntax of this option, check the "Color" section in the
+ffmpeg-utils manual. If the special value "none" is selected then no
+background is printed (useful for example if the background is never shown).
+
+Default value is "black".
+
+
+
+
The expressions for the angle and the output size can contain the
+following constants and functions:
+
+
+n
+sequential number of the input frame, starting from 0. It is always NAN
+before the first frame is filtered.
+
+
+t
+time in seconds of the input frame, it is set to 0 when the filter is
+configured. It is always NAN before the first frame is filtered.
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+in_w, iw
+in_h, ih
+the input video width and height
+
+
+out_w, ow
+out_h, oh
+the output width and height, that is the size of the padded area as
+specified by the width and height expressions
+
+
+rotw(a)
+roth(a)
+the minimal width/height required for completely containing the input
+video rotated by a radians.
+
+These are only available when computing the out_w and
+out_h expressions.
+
+
+
+
+
9.77.1 Examples# TOC
+
+
+ Rotate the input by PI/6 radians clockwise:
+
+
+ Rotate the input by PI/6 radians counter-clockwise:
+
+
+ Rotate the input by 45 degrees clockwise:
+
+
+ Apply a constant rotation with period T, starting from an angle of PI/3:
+
+
+ Make the input video rotation oscillating with a period of T
+seconds and an amplitude of A radians:
+
+
rotate=A*sin(2*PI/T*t)
+
+
+ Rotate the video, output size is chosen so that the whole rotating
+input video is always completely contained in the output:
+
+
rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
+
+
+ Rotate the video, reduce the output size so that no background is ever
+shown:
+
+
rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
+
+
+
+
+
9.77.2 Commands# TOC
+
+
The filter supports the following commands:
+
+
+a, angle
+Set the angle expression.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+
+
+
+
9.78 sab# TOC
+
+
Apply Shape Adaptive Blur.
+
+
The filter accepts the following options:
+
+
+luma_radius, lr
+Set luma blur filter strength, must be a value in range 0.1-4.0, default
+value is 1.0. A greater value will result in a more blurred image, and
+in slower processing.
+
+
+luma_pre_filter_radius, lpfr
+Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default
+value is 1.0.
+
+
+luma_strength, ls
+Set luma maximum difference between pixels to still be considered, must
+be a value in the 0.1-100.0 range, default value is 1.0.
+
+
+chroma_radius, cr
+Set chroma blur filter strength, must be a value in range 0.1-4.0. A
+greater value will result in a more blurred image, and in slower
+processing.
+
+
+chroma_pre_filter_radius, cpfr
+Set chroma pre-filter radius, must be a value in the 0.1-2.0 range.
+
+
+chroma_strength, cs
+Set chroma maximum difference between pixels to still be considered,
+must be a value in the 0.1-100.0 range.
+
+
+
+
Each chroma option value, if not explicitly specified, is set to the
+corresponding luma option value.
+
+
+
9.79 scale# TOC
+
+
Scale (resize) the input video, using the libswscale library.
+
+
The scale filter forces the output display aspect ratio to be the same
+of the input, by changing the output sample aspect ratio.
+
+
If the input image format is different from the format requested by
+the next filter, the scale filter will convert the input to the
+requested format.
+
+
+
9.79.1 Options# TOC
+
The filter accepts the following options, or any of the options
+supported by the libswscale scaler.
+
+
See (ffmpeg-scaler)the ffmpeg-scaler manual for
+the complete list of scaler options.
+
+
+width, w
+height, h
+Set the output video dimension expression. Default value is the input
+dimension.
+
+If the value is 0, the input width is used for the output.
+
+If one of the values is -1, the scale filter will use a value that
+maintains the aspect ratio of the input image, calculated from the
+other specified dimension. If both of them are -1, the input size is
+used
+
+If one of the values is -n with n > 1, the scale filter will also use a value
+that maintains the aspect ratio of the input image, calculated from the other
+specified dimension. After that it will, however, make sure that the calculated
+dimension is divisible by n and adjust the value if necessary.
+
+See below for the list of accepted constants for use in the dimension
+expression.
+
+
+interl
+Set the interlacing mode. It accepts the following values:
+
+
+‘1 ’
+Force interlaced aware scaling.
+
+
+‘0 ’
+Do not apply interlaced scaling.
+
+
+‘-1 ’
+Select interlaced aware scaling depending on whether the source frames
+are flagged as interlaced or not.
+
+
+
+Default value is ‘0 ’.
+
+
+flags
+Set libswscale scaling flags. See
+(ffmpeg-scaler)the ffmpeg-scaler manual for the
+complete list of values. If not explicitly specified the filter applies
+the default flags.
+
+
+size, s
+Set the video size. For the syntax of this option, check the "Video size"
+section in the ffmpeg-utils manual.
+
+
+in_color_matrix
+out_color_matrix
+Set in/output YCbCr color space type.
+
+This allows the autodetected value to be overridden as well as allows forcing
+a specific value used for the output and encoder.
+
+If not specified, the color space type depends on the pixel format.
+
+Possible values:
+
+
+‘auto ’
+Choose automatically.
+
+
+‘bt709 ’
+Format conforming to International Telecommunication Union (ITU)
+Recommendation BT.709.
+
+
+‘fcc ’
+Set color space conforming to the United States Federal Communications
+Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a).
+
+
+‘bt601 ’
+Set color space conforming to:
+
+
+ ITU Radiocommunication Sector (ITU-R) Recommendation BT.601
+
+ ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G
+
+ Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004
+
+
+
+
+‘smpte240m ’
+Set color space conforming to SMPTE ST 240:1999.
+
+
+
+
+in_range
+out_range
+Set in/output YCbCr sample range.
+
+This allows the autodetected value to be overridden as well as allows forcing
+a specific value used for the output and encoder. If not specified, the
+range depends on the pixel format. Possible values:
+
+
+‘auto ’
+Choose automatically.
+
+
+‘jpeg/full/pc ’
+Set full range (0-255 in case of 8-bit luma).
+
+
+‘mpeg/tv ’
+Set "MPEG" range (16-235 in case of 8-bit luma).
+
+
+
+
+force_original_aspect_ratio
+Enable decreasing or increasing output video width or height if necessary to
+keep the original aspect ratio. Possible values:
+
+
+‘disable ’
+Scale the video as specified and disable this feature.
+
+
+‘decrease ’
+The output video dimensions will automatically be decreased if needed.
+
+
+‘increase ’
+The output video dimensions will automatically be increased if needed.
+
+
+
+
+One useful instance of this option is that when you know a specific device’s
+maximum allowed resolution, you can use this to limit the output video to
+that, while retaining the aspect ratio. For example, device A allows
+1280x720 playback, and your video is 1920x800. Using this option (set it to
+decrease) and specifying 1280x720 to the command line makes the output
+1280x533.
+
+Please note that this is a different thing than specifying -1 for w
+or h , you still need to specify the output resolution for this option
+to work.
+
+
+
+
+
The values of the w and h options are expressions
+containing the following constants:
+
+
+in_w
+in_h
+The input width and height
+
+
+iw
+ih
+These are the same as in_w and in_h .
+
+
+out_w
+out_h
+The output (scaled) width and height
+
+
+ow
+oh
+These are the same as out_w and out_h
+
+
+a
+The same as iw / ih
+
+
+sar
+input sample aspect ratio
+
+
+dar
+The input display aspect ratio. Calculated from (iw / ih) * sar
.
+
+
+hsub
+vsub
+horizontal and vertical input chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+ohsub
+ovsub
+horizontal and vertical output chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+
+
+
9.79.2 Examples# TOC
+
+
+
+
+
9.80 separatefields# TOC
+
+
The separatefields
takes a frame-based video input and splits
+each frame into its components fields, producing a new half height clip
+with twice the frame rate and twice the frame count.
+
+
This filter use field-dominance information in frame to decide which
+of each pair of fields to place first in the output.
+If it gets it wrong use setfield filter before separatefields
filter.
+
+
+
9.81 setdar, setsar# TOC
+
+
The setdar
filter sets the Display Aspect Ratio for the filter
+output video.
+
+
This is done by changing the specified Sample (aka Pixel) Aspect
+Ratio, according to the following equation:
+
+
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
+
+
+
Keep in mind that the setdar
filter does not modify the pixel
+dimensions of the video frame. Also, the display aspect ratio set by
+this filter may be changed by later filters in the filterchain,
+e.g. in case of scaling or if another "setdar" or a "setsar" filter is
+applied.
+
+
The setsar
filter sets the Sample (aka Pixel) Aspect Ratio for
+the filter output video.
+
+
Note that as a consequence of the application of this filter, the
+output display aspect ratio will change according to the equation
+above.
+
+
Keep in mind that the sample aspect ratio set by the setsar
+filter may be changed by later filters in the filterchain, e.g. if
+another "setsar" or a "setdar" filter is applied.
+
+
It accepts the following parameters:
+
+
+r, ratio, dar (setdar
only), sar (setsar
only)
+Set the aspect ratio used by the filter.
+
+The parameter can be a floating point number string, an expression, or
+a string of the form num :den , where num and
+den are the numerator and denominator of the aspect ratio. If
+the parameter is not specified, it is assumed the value "0".
+In case the form "num :den " is used, the :
character
+should be escaped.
+
+
+max
+Set the maximum integer value to use for expressing numerator and
+denominator when reducing the expressed aspect ratio to a rational.
+Default value is 100
.
+
+
+
+
+
The parameter sar is an expression containing
+the following constants:
+
+
+E, PI, PHI
+These are approximated values for the mathematical constants e
+(Euler’s number), pi (Greek pi), and phi (the golden ratio).
+
+
+w, h
+The input width and height.
+
+
+a
+These are the same as w / h .
+
+
+sar
+The input sample aspect ratio.
+
+
+dar
+The input display aspect ratio. It is the same as
+(w / h ) * sar .
+
+
+hsub, vsub
+Horizontal and vertical chroma subsample values. For example, for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+
+
+
9.81.1 Examples# TOC
+
+
+ To change the display aspect ratio to 16:9, specify one of the following:
+
+
setdar=dar=1.77777
+setdar=dar=16/9
+setdar=dar=1.77777
+
+
+ To change the sample aspect ratio to 10:11, specify:
+
+
+ To set a display aspect ratio of 16:9, and specify a maximum integer value of
+1000 in the aspect ratio reduction, use the command:
+
+
setdar=ratio=16/9:max=1000
+
+
+
+
+
+
9.82 setfield# TOC
+
+
Force field for the output video frame.
+
+
The setfield
filter marks the interlace type field for the
+output frames. It does not change the input frame, but only sets the
+corresponding property, which affects how the frame is treated by
+following filters (e.g. fieldorder
or yadif
).
+
+
The filter accepts the following options:
+
+
+mode
+Available values are:
+
+
+‘auto ’
+Keep the same field property.
+
+
+‘bff ’
+Mark the frame as bottom-field-first.
+
+
+‘tff ’
+Mark the frame as top-field-first.
+
+
+‘prog ’
+Mark the frame as progressive.
+
+
+
+
+
+
+
9.83 showinfo# TOC
+
+
Show a line containing various information for each input video frame.
+The input video is not modified.
+
+
The shown line contains a sequence of key/value pairs of the form
+key :value .
+
+
The following values are shown in the output:
+
+
+n
+The (sequential) number of the input frame, starting from 0.
+
+
+pts
+The Presentation TimeStamp of the input frame, expressed as a number of
+time base units. The time base unit depends on the filter input pad.
+
+
+pts_time
+The Presentation TimeStamp of the input frame, expressed as a number of
+seconds.
+
+
+pos
+The position of the frame in the input stream, or -1 if this information is
+unavailable and/or meaningless (for example in case of synthetic video).
+
+
+fmt
+The pixel format name.
+
+
+sar
+The sample aspect ratio of the input frame, expressed in the form
+num /den .
+
+
+s
+The size of the input frame. For the syntax of this option, check the "Video size"
+section in the ffmpeg-utils manual.
+
+
+i
+The type of interlaced mode ("P" for "progressive", "T" for top field first, "B"
+for bottom field first).
+
+
+iskey
+This is 1 if the frame is a key frame, 0 otherwise.
+
+
+type
+The picture type of the input frame ("I" for an I-frame, "P" for a
+P-frame, "B" for a B-frame, or "?" for an unknown type).
+Also refer to the documentation of the AVPictureType
enum and of
+the av_get_picture_type_char
function defined in
+libavutil/avutil.h .
+
+
+checksum
+The Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame.
+
+
+plane_checksum
+The Adler-32 checksum (printed in hexadecimal) of each plane of the input frame,
+expressed in the form "[c0 c1 c2 c3 ]".
+
+
+
+
+
9.84 shuffleplanes# TOC
+
+
Reorder and/or duplicate video planes.
+
+
It accepts the following parameters:
+
+
+map0
+The index of the input plane to be used as the first output plane.
+
+
+map1
+The index of the input plane to be used as the second output plane.
+
+
+map2
+The index of the input plane to be used as the third output plane.
+
+
+map3
+The index of the input plane to be used as the fourth output plane.
+
+
+
+
+
The first plane has the index 0. The default is to keep the input unchanged.
+
+
Swap the second and third planes of the input:
+
+
ffmpeg -i INPUT -vf shuffleplanes=0:2:1:3 OUTPUT
+
+
+
+
9.85 signalstats# TOC
+
Evaluate various visual metrics that assist in determining issues associated
+with the digitization of analog video media.
+
+
By default the filter will log these metadata values:
+
+
+YMIN
+Display the minimal Y value contained within the input frame. Expressed in
+range of [0-255].
+
+
+YLOW
+Display the Y value at the 10% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+YAVG
+Display the average Y value within the input frame. Expressed in range of
+[0-255].
+
+
+YHIGH
+Display the Y value at the 90% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+YMAX
+Display the maximum Y value contained within the input frame. Expressed in
+range of [0-255].
+
+
+UMIN
+Display the minimal U value contained within the input frame. Expressed in
+range of [0-255].
+
+
+ULOW
+Display the U value at the 10% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+UAVG
+Display the average U value within the input frame. Expressed in range of
+[0-255].
+
+
+UHIGH
+Display the U value at the 90% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+UMAX
+Display the maximum U value contained within the input frame. Expressed in
+range of [0-255].
+
+
+VMIN
+Display the minimal V value contained within the input frame. Expressed in
+range of [0-255].
+
+
+VLOW
+Display the V value at the 10% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+VAVG
+Display the average V value within the input frame. Expressed in range of
+[0-255].
+
+
+VHIGH
+Display the V value at the 90% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+VMAX
+Display the maximum V value contained within the input frame. Expressed in
+range of [0-255].
+
+
+SATMIN
+Display the minimal saturation value contained within the input frame.
+Expressed in range of [0-~181.02].
+
+
+SATLOW
+Display the saturation value at the 10% percentile within the input frame.
+Expressed in range of [0-~181.02].
+
+
+SATAVG
+Display the average saturation value within the input frame. Expressed in range
+of [0-~181.02].
+
+
+SATHIGH
+Display the saturation value at the 90% percentile within the input frame.
+Expressed in range of [0-~181.02].
+
+
+SATMAX
+Display the maximum saturation value contained within the input frame.
+Expressed in range of [0-~181.02].
+
+
+HUEMED
+Display the median value for hue within the input frame. Expressed in range of
+[0-360].
+
+
+HUEAVG
+Display the average value for hue within the input frame. Expressed in range of
+[0-360].
+
+
+YDIF
+Display the average of sample value difference between all values of the Y
+plane in the current frame and corresponding values of the previous input frame.
+Expressed in range of [0-255].
+
+
+UDIF
+Display the average of sample value difference between all values of the U
+plane in the current frame and corresponding values of the previous input frame.
+Expressed in range of [0-255].
+
+
+VDIF
+Display the average of sample value difference between all values of the V
+plane in the current frame and corresponding values of the previous input frame.
+Expressed in range of [0-255].
+
+
+
+
The filter accepts the following options:
+
+
+stat
+out
+
+stat specify an additional form of image analysis.
+out output video with the specified type of pixel highlighted.
+
+Both options accept the following values:
+
+
+‘tout ’
+Identify temporal outliers pixels. A temporal outlier is a pixel
+unlike the neighboring pixels of the same field. Examples of temporal outliers
+include the results of video dropouts, head clogs, or tape tracking issues.
+
+
+‘vrep ’
+Identify vertical line repetition . Vertical line repetition includes
+similar rows of pixels within a frame. In born-digital video vertical line
+repetition is common, but this pattern is uncommon in video digitized from an
+analog source. When it occurs in video that results from the digitization of an
+analog source it can indicate concealment from a dropout compensator.
+
+
+‘brng ’
+Identify pixels that fall outside of legal broadcast range.
+
+
+
+
+color, c
+Set the highlight color for the out option. The default color is
+yellow.
+
+
+
+
+
9.85.1 Examples# TOC
+
+
+
+
+
9.86 smartblur# TOC
+
+
Blur the input video without impacting the outlines.
+
+
It accepts the following options:
+
+
+luma_radius, lr
+Set the luma radius. The option value must be a float number in
+the range [0.1,5.0] that specifies the variance of the gaussian filter
+used to blur the image (slower if larger). Default value is 1.0.
+
+
+luma_strength, ls
+Set the luma strength. The option value must be a float number
+in the range [-1.0,1.0] that configures the blurring. A value included
+in [0.0,1.0] will blur the image whereas a value included in
+[-1.0,0.0] will sharpen the image. Default value is 1.0.
+
+
+luma_threshold, lt
+Set the luma threshold used as a coefficient to determine
+whether a pixel should be blurred or not. The option value must be an
+integer in the range [-30,30]. A value of 0 will filter all the image,
+a value included in [0,30] will filter flat areas and a value included
+in [-30,0] will filter edges. Default value is 0.
+
+
+chroma_radius, cr
+Set the chroma radius. The option value must be a float number in
+the range [0.1,5.0] that specifies the variance of the gaussian filter
+used to blur the image (slower if larger). Default value is 1.0.
+
+
+chroma_strength, cs
+Set the chroma strength. The option value must be a float number
+in the range [-1.0,1.0] that configures the blurring. A value included
+in [0.0,1.0] will blur the image whereas a value included in
+[-1.0,0.0] will sharpen the image. Default value is 1.0.
+
+
+chroma_threshold, ct
+Set the chroma threshold used as a coefficient to determine
+whether a pixel should be blurred or not. The option value must be an
+integer in the range [-30,30]. A value of 0 will filter all the image,
+a value included in [0,30] will filter flat areas and a value included
+in [-30,0] will filter edges. Default value is 0.
+
+
+
+
If a chroma option is not explicitly set, the corresponding luma value
+is set.
+
+
+
9.87 stereo3d# TOC
+
+
Convert between different stereoscopic image formats.
+
+
The filters accept the following options:
+
+
+in
+Set stereoscopic image format of input.
+
+Available values for input image formats are:
+
+‘sbsl ’
+side by side parallel (left eye left, right eye right)
+
+
+‘sbsr ’
+side by side crosseye (right eye left, left eye right)
+
+
+‘sbs2l ’
+side by side parallel with half width resolution
+(left eye left, right eye right)
+
+
+‘sbs2r ’
+side by side crosseye with half width resolution
+(right eye left, left eye right)
+
+
+‘abl ’
+above-below (left eye above, right eye below)
+
+
+‘abr ’
+above-below (right eye above, left eye below)
+
+
+‘ab2l ’
+above-below with half height resolution
+(left eye above, right eye below)
+
+
+‘ab2r ’
+above-below with half height resolution
+(right eye above, left eye below)
+
+
+‘al ’
+alternating frames (left eye first, right eye second)
+
+
+‘ar ’
+alternating frames (right eye first, left eye second)
+
+Default value is ‘sbsl ’.
+
+
+
+
+out
+Set stereoscopic image format of output.
+
+Available values for output image formats are all the input formats as well as:
+
+‘arbg ’
+anaglyph red/blue gray
+(red filter on left eye, blue filter on right eye)
+
+
+‘argg ’
+anaglyph red/green gray
+(red filter on left eye, green filter on right eye)
+
+
+‘arcg ’
+anaglyph red/cyan gray
+(red filter on left eye, cyan filter on right eye)
+
+
+‘arch ’
+anaglyph red/cyan half colored
+(red filter on left eye, cyan filter on right eye)
+
+
+‘arcc ’
+anaglyph red/cyan color
+(red filter on left eye, cyan filter on right eye)
+
+
+‘arcd ’
+anaglyph red/cyan color optimized with the least squares projection of dubois
+(red filter on left eye, cyan filter on right eye)
+
+
+‘agmg ’
+anaglyph green/magenta gray
+(green filter on left eye, magenta filter on right eye)
+
+
+‘agmh ’
+anaglyph green/magenta half colored
+(green filter on left eye, magenta filter on right eye)
+
+
+‘agmc ’
+anaglyph green/magenta colored
+(green filter on left eye, magenta filter on right eye)
+
+
+‘agmd ’
+anaglyph green/magenta color optimized with the least squares projection of dubois
+(green filter on left eye, magenta filter on right eye)
+
+
+‘aybg ’
+anaglyph yellow/blue gray
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘aybh ’
+anaglyph yellow/blue half colored
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘aybc ’
+anaglyph yellow/blue colored
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘aybd ’
+anaglyph yellow/blue color optimized with the least squares projection of dubois
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘irl ’
+interleaved rows (left eye has top row, right eye starts on next row)
+
+
+‘irr ’
+interleaved rows (right eye has top row, left eye starts on next row)
+
+
+‘ml ’
+mono output (left eye only)
+
+
+‘mr ’
+mono output (right eye only)
+
+
+
+Default value is ‘arcd ’.
+
+
+
+
+
9.87.1 Examples# TOC
+
+
+ Convert input video from side by side parallel to anaglyph yellow/blue dubois:
+
+
+ Convert input video from above bellow (left eye above, right eye below) to side by side crosseye.
+
+
+
+
+
9.88 spp# TOC
+
+
Apply a simple postprocessing filter that compresses and decompresses the image
+at several (or - in the case of quality level 6
- all) shifts
+and average the results.
+
+
The filter accepts the following options:
+
+
+quality
+Set quality. This option defines the number of levels for averaging. It accepts
+an integer in the range 0-6. If set to 0
, the filter will have no
+effect. A value of 6
means the higher quality. For each increment of
+that value the speed drops by a factor of approximately 2. Default value is
+3
.
+
+
+qp
+Force a constant quantization parameter. If not set, the filter will use the QP
+from the video stream (if available).
+
+
+mode
+Set thresholding mode. Available modes are:
+
+
+‘hard ’
+Set hard thresholding (default).
+
+‘soft ’
+Set soft thresholding (better de-ringing effect, but likely blurrier).
+
+
+
+
+use_bframe_qp
+Enable the use of the QP from the B-Frames if set to 1
. Using this
+option may cause flicker since the B-Frames have often larger QP. Default is
+0
(not enabled).
+
+
+
+
+
9.89 subtitles# TOC
+
+
Draw subtitles on top of input video using the libass library.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libass
. This filter also requires a build with libavcodec and
+libavformat to convert the passed subtitles file to ASS (Advanced Substation
+Alpha) subtitles format.
+
+
The filter accepts the following options:
+
+
+filename, f
+Set the filename of the subtitle file to read. It must be specified.
+
+
+original_size
+Specify the size of the original video, the video for which the ASS file
+was composed. For the syntax of this option, check the "Video size" section in
+the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic,
+this is necessary to correctly scale the fonts if the aspect ratio has been
+changed.
+
+
+charenc
+Set subtitles input character encoding. subtitles
filter only. Only
+useful if not UTF-8.
+
+
+stream_index, si
+Set subtitles stream index. subtitles
filter only.
+
+
+
+
If the first key is not specified, it is assumed that the first value
+specifies the filename .
+
+
For example, to render the file sub.srt on top of the input
+video, use the command:
+
+
+
which is equivalent to:
+
+
subtitles=filename=sub.srt
+
+
+
To render the default subtitles stream from file video.mkv , use:
+
+
+
To render the second subtitles stream from that file, use:
+
+
subtitles=video.mkv:si=1
+
+
+
+
9.90 super2xsai# TOC
+
+
Scale the input by 2x and smooth using the Super2xSaI (Scale and
+Interpolate) pixel art scaling algorithm.
+
+
Useful for enlarging pixel art images without reducing sharpness.
+
+
+
9.91 swapuv# TOC
+
Swap U & V plane.
+
+
+
9.92 telecine# TOC
+
+
Apply telecine process to the video.
+
+
This filter accepts the following options:
+
+
+first_field
+
+‘top, t ’
+top field first
+
+‘bottom, b ’
+bottom field first
+The default value is top
.
+
+
+
+
+pattern
+A string of numbers representing the pulldown pattern you wish to apply.
+The default value is 23
.
+
+
+
+
+
Some typical patterns:
+
+NTSC output (30i):
+27.5p: 32222
+24p: 23 (classic)
+24p: 2332 (preferred)
+20p: 33
+18p: 334
+16p: 3444
+
+PAL output (25i):
+27.5p: 12222
+24p: 222222222223 ("Euro pulldown")
+16.67p: 33
+16p: 33333334
+
+
+
+
9.93 thumbnail# TOC
+
Select the most representative frame in a given sequence of consecutive frames.
+
+
The filter accepts the following options:
+
+
+n
+Set the frames batch size to analyze; in a set of n frames, the filter
+will pick one of them, and then handle the next batch of n frames until
+the end. Default is 100
.
+
+
+
+
Since the filter keeps track of the whole frames sequence, a bigger n
+value will result in a higher memory usage, so a high value is not recommended.
+
+
+
9.93.1 Examples# TOC
+
+
+ Extract one picture each 50 frames:
+
+
+ Complete example of a thumbnail creation with ffmpeg
:
+
+
ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
+
+
+
+
+
9.94 tile# TOC
+
+
Tile several successive frames together.
+
+
The filter accepts the following options:
+
+
+layout
+Set the grid size (i.e. the number of lines and columns). For the syntax of
+this option, check the "Video size" section in the ffmpeg-utils manual.
+
+
+nb_frames
+Set the maximum number of frames to render in the given area. It must be less
+than or equal to w xh . The default value is 0
, meaning all
+the area will be used.
+
+
+margin
+Set the outer border margin in pixels.
+
+
+padding
+Set the inner border thickness (i.e. the number of pixels between frames). For
+more advanced padding options (such as having different values for the edges),
+refer to the pad video filter.
+
+
+color
+Specify the color of the unused area. For the syntax of this option, check the
+"Color" section in the ffmpeg-utils manual. The default value of color
+is "black".
+
+
+
+
+
9.94.1 Examples# TOC
+
+
+
+
+
9.95 tinterlace# TOC
+
+
Perform various types of temporal field interlacing.
+
+
Frames are counted starting from 1, so the first input frame is
+considered odd.
+
+
The filter accepts the following options:
+
+
+mode
+Specify the mode of the interlacing. This option can also be specified
+as a value alone. See below for a list of values for this option.
+
+Available values are:
+
+
+‘merge, 0 ’
+Move odd frames into the upper field, even into the lower field,
+generating a double height frame at half frame rate.
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+
+
+
+‘drop_odd, 1 ’
+Only output even frames, odd frames are dropped, generating a frame with
+unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+ 22222 44444
+ 22222 44444
+ 22222 44444
+ 22222 44444
+
+
+
+‘drop_even, 2 ’
+Only output odd frames, even frames are dropped, generating a frame with
+unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+11111 33333
+11111 33333
+11111 33333
+11111 33333
+
+
+
+‘pad, 3 ’
+Expand each frame to full height, but pad alternate lines with black,
+generating a frame with double height at the same input frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+
+
+
+
+‘interleave_top, 4 ’
+Interleave the upper field from odd frames with the lower field from
+even frames, generating a frame with unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111<- 22222 33333<- 44444
+11111 22222<- 33333 44444<-
+11111<- 22222 33333<- 44444
+11111 22222<- 33333 44444<-
+
+Output:
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+
+
+
+
+‘interleave_bottom, 5 ’
+Interleave the lower field from odd frames with the upper field from
+even frames, generating a frame with unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222<- 33333 44444<-
+11111<- 22222 33333<- 44444
+11111 22222<- 33333 44444<-
+11111<- 22222 33333<- 44444
+
+Output:
+22222 44444
+11111 33333
+22222 44444
+11111 33333
+
+
+
+
+‘interlacex2, 6 ’
+Double frame rate with unchanged height. Frames are inserted each
+containing the second temporal field from the previous input frame and
+the first temporal field from the next input frame. This mode relies on
+the top_field_first flag. Useful for interlaced video displays with no
+field synchronisation.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+ 11111 22222 33333 44444
+11111 22222 33333 44444
+ 11111 22222 33333 44444
+
+Output:
+11111 22222 22222 33333 33333 44444 44444
+ 11111 11111 22222 22222 33333 33333 44444
+11111 22222 22222 33333 33333 44444 44444
+ 11111 11111 22222 22222 33333 33333 44444
+
+
+
+
+
+
+Numeric values are deprecated but are accepted for backward
+compatibility reasons.
+
+Default mode is merge
.
+
+
+flags
+Specify flags influencing the filter process.
+
+Available value for flags is:
+
+
+low_pass_filter, vlfp
+Enable vertical low-pass filtering in the filter.
+Vertical low-pass filtering is required when creating an interlaced
+destination from a progressive source which contains high-frequency
+vertical detail. Filtering will reduce interlace ’twitter’ and Moire
+patterning.
+
+Vertical low-pass filtering can only be enabled for mode
+interleave_top and interleave_bottom .
+
+
+
+
+
+
+
+
9.96 transpose# TOC
+
+
Transpose rows with columns in the input video and optionally flip it.
+
+
It accepts the following parameters:
+
+
+dir
+Specify the transposition direction.
+
+Can assume the following values:
+
+‘0, 4, cclock_flip ’
+Rotate by 90 degrees counterclockwise and vertically flip (default), that is:
+
+
L.R L.l
+. . -> . .
+l.r R.r
+
+
+
+‘1, 5, clock ’
+Rotate by 90 degrees clockwise, that is:
+
+
L.R l.L
+. . -> . .
+l.r r.R
+
+
+
+‘2, 6, cclock ’
+Rotate by 90 degrees counterclockwise, that is:
+
+
L.R R.r
+. . -> . .
+l.r L.l
+
+
+
+‘3, 7, clock_flip ’
+Rotate by 90 degrees clockwise and vertically flip, that is:
+
+
L.R r.R
+. . -> . .
+l.r l.L
+
+
+
+
+For values between 4-7, the transposition is only done if the input
+video geometry is portrait and not landscape. These values are
+deprecated, the passthrough
option should be used instead.
+
+Numerical values are deprecated, and should be dropped in favor of
+symbolic constants.
+
+
+passthrough
+Do not apply the transposition if the input geometry matches the one
+specified by the specified value. It accepts the following values:
+
+‘none ’
+Always apply transposition.
+
+‘portrait ’
+Preserve portrait geometry (when height >= width ).
+
+‘landscape ’
+Preserve landscape geometry (when width >= height ).
+
+
+
+Default value is none
.
+
+
+
+
For example to rotate by 90 degrees clockwise and preserve portrait
+layout:
+
+
transpose=dir=1:passthrough=portrait
+
+
+
The command above can also be specified as:
+
+
+
+
9.97 trim# TOC
+
Trim the input so that the output contains one continuous subpart of the input.
+
+
It accepts the following parameters:
+
+start
+Specify the time of the start of the kept section, i.e. the frame with the
+timestamp start will be the first frame in the output.
+
+
+end
+Specify the time of the first frame that will be dropped, i.e. the frame
+immediately preceding the one with the timestamp end will be the last
+frame in the output.
+
+
+start_pts
+This is the same as start , except this option sets the start timestamp
+in timebase units instead of seconds.
+
+
+end_pts
+This is the same as end , except this option sets the end timestamp
+in timebase units instead of seconds.
+
+
+duration
+The maximum duration of the output in seconds.
+
+
+start_frame
+The number of the first frame that should be passed to the output.
+
+
+end_frame
+The number of the first frame that should be dropped.
+
+
+
+
start , end , and duration are expressed as time
+duration specifications; see
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+
+
Note that the first two sets of the start/end options and the duration
+option look at the frame timestamp, while the _frame variants simply count the
+frames that pass through the filter. Also note that this filter does not modify
+the timestamps. If you wish for the output timestamps to start at zero, insert a
+setpts filter after the trim filter.
+
+
If multiple start or end options are set, this filter tries to be greedy and
+keep all the frames that match at least one of the specified constraints. To keep
+only the part that matches all the constraints at once, chain multiple trim
+filters.
+
+
The defaults are such that all the input is kept. So it is possible to set e.g.
+just the end values to keep everything before the specified time.
+
+
Examples:
+
+ Drop everything except the second minute of input:
+
+
ffmpeg -i INPUT -vf trim=60:120
+
+
+ Keep only the first second:
+
+
ffmpeg -i INPUT -vf trim=duration=1
+
+
+
+
+
+
+
9.98 unsharp# TOC
+
+
Sharpen or blur the input video.
+
+
It accepts the following parameters:
+
+
+luma_msize_x, lx
+Set the luma matrix horizontal size. It must be an odd integer between
+3 and 63. The default value is 5.
+
+
+luma_msize_y, ly
+Set the luma matrix vertical size. It must be an odd integer between 3
+and 63. The default value is 5.
+
+
+luma_amount, la
+Set the luma effect strength. It must be a floating point number, reasonable
+values lay between -1.5 and 1.5.
+
+Negative values will blur the input video, while positive values will
+sharpen it, a value of zero will disable the effect.
+
+Default value is 1.0.
+
+
+chroma_msize_x, cx
+Set the chroma matrix horizontal size. It must be an odd integer
+between 3 and 63. The default value is 5.
+
+
+chroma_msize_y, cy
+Set the chroma matrix vertical size. It must be an odd integer
+between 3 and 63. The default value is 5.
+
+
+chroma_amount, ca
+Set the chroma effect strength. It must be a floating point number, reasonable
+values lay between -1.5 and 1.5.
+
+Negative values will blur the input video, while positive values will
+sharpen it, a value of zero will disable the effect.
+
+Default value is 0.0.
+
+
+opencl
+If set to 1, specify using OpenCL capabilities, only available if
+FFmpeg was configured with --enable-opencl
. Default value is 0.
+
+
+
+
+
All parameters are optional and default to the equivalent of the
+string ’5:5:1.0:5:5:0.0’.
+
+
+
9.98.1 Examples# TOC
+
+
+ Apply strong luma sharpen effect:
+
+
unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
+
+
+ Apply a strong blur of both luma and chroma parameters:
+
+
+
+
+
9.99 uspp# TOC
+
+
Apply ultra slow/simple postprocessing filter that compresses and decompresses
+the image at several (or - in the case of quality level 8
- all)
+shifts and average the results.
+
+
The way this differs from the behavior of spp is that uspp actually encodes &
+decodes each case with libavcodec Snow, whereas spp uses a simplified intra only 8x8
+DCT similar to MJPEG.
+
+
The filter accepts the following options:
+
+
+quality
+Set quality. This option defines the number of levels for averaging. It accepts
+an integer in the range 0-8. If set to 0
, the filter will have no
+effect. A value of 8
means the higher quality. For each increment of
+that value the speed drops by a factor of approximately 2. Default value is
+3
.
+
+
+qp
+Force a constant quantization parameter. If not set, the filter will use the QP
+from the video stream (if available).
+
+
+
+
+
9.100 vidstabdetect# TOC
+
+
Analyze video stabilization/deshaking. Perform pass 1 of 2, see
+vidstabtransform for pass 2.
+
+
This filter generates a file with relative translation and rotation
+transform information about subsequent frames, which is then used by
+the vidstabtransform filter.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libvidstab
.
+
+
This filter accepts the following options:
+
+
+result
+Set the path to the file used to write the transforms information.
+Default value is transforms.trf .
+
+
+shakiness
+Set how shaky the video is and how quick the camera is. It accepts an
+integer in the range 1-10, a value of 1 means little shakiness, a
+value of 10 means strong shakiness. Default value is 5.
+
+
+accuracy
+Set the accuracy of the detection process. It must be a value in the
+range 1-15. A value of 1 means low accuracy, a value of 15 means high
+accuracy. Default value is 15.
+
+
+stepsize
+Set stepsize of the search process. The region around minimum is
+scanned with 1 pixel resolution. Default value is 6.
+
+
+mincontrast
+Set minimum contrast. Below this value a local measurement field is
+discarded. Must be a floating point value in the range 0-1. Default
+value is 0.3.
+
+
+tripod
+Set reference frame number for tripod mode.
+
+If enabled, the motion of the frames is compared to a reference frame
+in the filtered stream, identified by the specified number. The idea
+is to compensate all movements in a more-or-less static scene and keep
+the camera view absolutely still.
+
+If set to 0, it is disabled. The frames are counted starting from 1.
+
+
+show
+Show fields and transforms in the resulting frames. It accepts an
+integer in the range 0-2. Default value is 0, which disables any
+visualization.
+
+
+
+
+
9.100.1 Examples# TOC
+
+
+ Use default values:
+
+
+ Analyze strongly shaky movie and put the results in file
+mytransforms.trf :
+
+
vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
+
+
+ Visualize the result of internal transformations in the resulting
+video:
+
+
+ Analyze a video with medium shakiness using ffmpeg
:
+
+
ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
+
+
+
+
+
9.101 vidstabtransform# TOC
+
+
Video stabilization/deshaking: pass 2 of 2,
+see vidstabdetect for pass 1.
+
+
Read a file with transform information for each frame and
+apply/compensate them. Together with the vidstabdetect
+filter this can be used to deshake videos. See also
+http://public.hronopik.de/vid.stab . It is important to also use
+the unsharp filter, see below.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libvidstab
.
+
+
+
9.101.1 Options# TOC
+
+
+input
+Set path to the file used to read the transforms. Default value is
+transforms.trf .
+
+
+smoothing
+Set the number of frames (value*2 + 1) used for lowpass filtering the
+camera movements. Default value is 10.
+
+For example a number of 10 means that 21 frames are used (10 in the
+past and 10 in the future) to smoothen the motion in the video. A
+larger value leads to a smoother video, but limits the acceleration of
+the camera (pan/tilt movements). 0 is a special case where a static
+camera is simulated.
+
+
+optalgo
+Set the camera path optimization algorithm.
+
+Accepted values are:
+
+‘gauss ’
+gaussian kernel low-pass filter on camera motion (default)
+
+‘avg ’
+averaging on transformations
+
+
+
+
+maxshift
+Set maximal number of pixels to translate frames. Default value is -1,
+meaning no limit.
+
+
+maxangle
+Set maximal angle in radians (degree*PI/180) to rotate frames. Default
+value is -1, meaning no limit.
+
+
+crop
+Specify how to deal with borders that may be visible due to movement
+compensation.
+
+Available values are:
+
+‘keep ’
+keep image information from previous frame (default)
+
+‘black ’
+fill the border black
+
+
+
+
+invert
+Invert transforms if set to 1. Default value is 0.
+
+
+relative
+Consider transforms as relative to previous frame if set to 1,
+absolute if set to 0. Default value is 0.
+
+
+zoom
+Set percentage to zoom. A positive value will result in a zoom-in
+effect, a negative value in a zoom-out effect. Default value is 0 (no
+zoom).
+
+
+optzoom
+Set optimal zooming to avoid borders.
+
+Accepted values are:
+
+‘0 ’
+disabled
+
+‘1 ’
+optimal static zoom value is determined (only very strong movements
+will lead to visible borders) (default)
+
+‘2 ’
+optimal adaptive zoom value is determined (no borders will be
+visible), see zoomspeed
+
+
+
+Note that the value given at zoom is added to the one calculated here.
+
+
+zoomspeed
+Set percent to zoom maximally each frame (enabled when
+optzoom is set to 2). Range is from 0 to 5, default value is
+0.25.
+
+
+interpol
+Specify type of interpolation.
+
+Available values are:
+
+‘no ’
+no interpolation
+
+‘linear ’
+linear only horizontal
+
+‘bilinear ’
+linear in both directions (default)
+
+‘bicubic ’
+cubic in both directions (slow)
+
+
+
+
+tripod
+Enable virtual tripod mode if set to 1, which is equivalent to
+relative=0:smoothing=0
. Default value is 0.
+
+Use also tripod
option of vidstabdetect .
+
+
+debug
+Increase log verbosity if set to 1. Also the detected global motions
+are written to the temporary file global_motions.trf . Default
+value is 0.
+
+
+
+
+
9.101.2 Examples# TOC
+
+
+
+
+
9.102 vflip# TOC
+
+
Flip the input video vertically.
+
+
For example, to vertically flip a video with ffmpeg
:
+
+
ffmpeg -i in.avi -vf "vflip" out.avi
+
+
+
+
9.103 vignette# TOC
+
+
Make or reverse a natural vignetting effect.
+
+
The filter accepts the following options:
+
+
+angle, a
+Set lens angle expression as a number of radians.
+
+The value is clipped in the [0,PI/2]
range.
+
+Default value: "PI/5"
+
+
+x0
+y0
+Set center coordinates expressions. Respectively "w/2"
and "h/2"
+by default.
+
+
+mode
+Set forward/backward mode.
+
+Available modes are:
+
+‘forward ’
+The larger the distance from the central point, the darker the image becomes.
+
+
+‘backward ’
+The larger the distance from the central point, the brighter the image becomes.
+This can be used to reverse a vignette effect, though there is no automatic
+detection to extract the lens angle and other settings (yet). It can
+also be used to create a burning effect.
+
+
+
+Default value is ‘forward ’.
+
+
+eval
+Set evaluation mode for the expressions (angle , x0 , y0 ).
+
+It accepts the following values:
+
+‘init ’
+Evaluate expressions only once during the filter initialization.
+
+
+‘frame ’
+Evaluate expressions for each incoming frame. This is way slower than the
+‘init ’ mode since it requires all the scalers to be re-computed, but it
+allows advanced dynamic expressions.
+
+
+
+Default value is ‘init ’.
+
+
+dither
+Set dithering to reduce the circular banding effects. Default is 1
+(enabled).
+
+
+aspect
+Set vignette aspect. This setting allows one to adjust the shape of the vignette.
+Setting this value to the SAR of the input will make a rectangular vignetting
+following the dimensions of the video.
+
+Default is 1/1
.
+
+
+
+
+
9.103.1 Expressions# TOC
+
+
The alpha , x0 and y0 expressions can contain the
+following parameters.
+
+
+w
+h
+input width and height
+
+
+n
+the number of input frame, starting from 0
+
+
+pts
+the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in
+TB units, NAN if undefined
+
+
+r
+frame rate of the input video, NAN if the input frame rate is unknown
+
+
+t
+the PTS (Presentation TimeStamp) of the filtered video frame,
+expressed in seconds, NAN if undefined
+
+
+tb
+time base of the input video
+
+
+
+
+
+
9.103.2 Examples# TOC
+
+
+ Apply simple strong vignetting effect:
+
+
+ Make a flickering vignetting:
+
+
vignette='PI/4+random(1)*PI/50':eval=frame
+
+
+
+
+
+
9.104 w3fdif# TOC
+
+
Deinterlace the input video ("w3fdif" stands for "Weston 3 Field
+Deinterlacing Filter").
+
+
Based on the process described by Martin Weston for BBC R&D, and
+implemented based on the de-interlace algorithm written by Jim
+Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter
+uses filter coefficients calculated by BBC R&D.
+
+
There are two sets of filter coefficients, so called "simple":
+and "complex". Which set of filter coefficients is used can
+be set by passing an optional parameter:
+
+
+filter
+Set the interlacing filter coefficients. Accepts one of the following values:
+
+
+‘simple ’
+Simple filter coefficient set.
+
+‘complex ’
+More-complex filter coefficient set.
+
+
+Default value is ‘complex ’.
+
+
+deint
+Specify which frames to deinterlace. Accept one of the following values:
+
+
+‘all ’
+Deinterlace all frames,
+
+‘interlaced ’
+Only deinterlace frames marked as interlaced.
+
+
+
+Default value is ‘all ’.
+
+
+
+
+
9.105 xbr# TOC
+
Apply the xBR high-quality magnification filter which is designed for pixel
+art. It follows a set of edge-detection rules, see
+http://www.libretro.com/forums/viewtopic.php?f=6&t=134 .
+
+
It accepts the following option:
+
+
+n
+Set the scaling dimension: 2
for 2xBR
, 3
for
+3xBR
and 4
for 4xBR
.
+Default is 3
.
+
+
+
+
+
9.106 yadif# TOC
+
+
Deinterlace the input video ("yadif" means "yet another deinterlacing
+filter").
+
+
It accepts the following parameters:
+
+
+
+mode
+The interlacing mode to adopt. It accepts one of the following values:
+
+
+0, send_frame
+Output one frame for each frame.
+
+1, send_field
+Output one frame for each field.
+
+2, send_frame_nospatial
+Like send_frame
, but it skips the spatial interlacing check.
+
+3, send_field_nospatial
+Like send_field
, but it skips the spatial interlacing check.
+
+
+
+The default value is send_frame
.
+
+
+parity
+The picture field parity assumed for the input interlaced video. It accepts one
+of the following values:
+
+
+0, tff
+Assume the top field is first.
+
+1, bff
+Assume the bottom field is first.
+
+-1, auto
+Enable automatic detection of field parity.
+
+
+
+The default value is auto
.
+If the interlacing is unknown or the decoder does not export this information,
+top field first will be assumed.
+
+
+deint
+Specify which frames to deinterlace. Accept one of the following
+values:
+
+
+0, all
+Deinterlace all frames.
+
+1, interlaced
+Only deinterlace frames marked as interlaced.
+
+
+
+The default value is all
.
+
+
+
+
+
9.107 zoompan# TOC
+
+
Apply Zoom & Pan effect.
+
+
This filter accepts the following options:
+
+
+zoom, z
+Set the zoom expression. Default is 1.
+
+
+x
+y
+Set the x and y expression. Default is 0.
+
+
+d
+Set the duration expression in number of frames.
+This sets for how many number of frames effect will last for
+single input image.
+
+
+s
+Set the output image size, default is ’hd720’.
+
+
+
+
Each expression can contain the following constants:
+
+
+in_w, iw
+Input width.
+
+
+in_h, ih
+Input height.
+
+
+out_w, ow
+Output width.
+
+
+out_h, oh
+Output height.
+
+
+in
+Input frame count.
+
+
+on
+Output frame count.
+
+
+x
+y
+Last calculated ’x’ and ’y’ position from ’x’ and ’y’ expression
+for current input frame.
+
+
+px
+py
+’x’ and ’y’ of last output frame of previous input frame or 0 when there was
+not yet such frame (first input frame).
+
+
+zoom
+Last calculated zoom from ’z’ expression for current input frame.
+
+
+pzoom
+Last calculated zoom of last output frame of previous input frame.
+
+
+duration
+Number of output frames for current input frame. Calculated from ’d’ expression
+for each input frame.
+
+
+pduration
+number of output frames created for previous input frame
+
+
+a
+Rational number: input width / input height
+
+
+sar
+sample aspect ratio
+
+
+dar
+display aspect ratio
+
+
+
+
+
+
9.107.1 Examples# TOC
+
+
+ Zoom-in up to 1.5 and pan at same time to some spot near center of picture:
+
+
zoompan=z='min(zoom+0.0015,1.5)':d=700:x='if(gte(zoom,1.5),x,x+1/a)':y='if(gte(zoom,1.5),y,y+1)':s=640x360
+
+
+
+
+
+
10 Video Sources# TOC
+
+
Below is a description of the currently available video sources.
+
+
+
10.1 buffer# TOC
+
+
Buffer video frames, and make them available to the filter chain.
+
+
This source is mainly intended for a programmatic use, in particular
+through the interface defined in libavfilter/vsrc_buffer.h .
+
+
It accepts the following parameters:
+
+
+video_size
+Specify the size (width and height) of the buffered video frames. For the
+syntax of this option, check the "Video size" section in the ffmpeg-utils
+manual.
+
+
+width
+The input video width.
+
+
+height
+The input video height.
+
+
+pix_fmt
+A string representing the pixel format of the buffered video frames.
+It may be a number corresponding to a pixel format, or a pixel format
+name.
+
+
+time_base
+Specify the timebase assumed by the timestamps of the buffered frames.
+
+
+frame_rate
+Specify the frame rate expected for the video stream.
+
+
+pixel_aspect, sar
+The sample (pixel) aspect ratio of the input video.
+
+
+sws_param
+Specify the optional parameters to be used for the scale filter which
+is automatically inserted when an input change is detected in the
+input size or format.
+
+
+
+
For example:
+
+
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
+
+
+
will instruct the source to accept video frames with size 320x240 and
+with format "yuv410p", assuming 1/24 as the timestamps timebase and
+square pixels (1:1 sample aspect ratio).
+Since the pixel format with name "yuv410p" corresponds to the number 6
+(check the enum AVPixelFormat definition in libavutil/pixfmt.h ),
+this example corresponds to:
+
+
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
+
+
+
Alternatively, the options can be specified as a flat string, but this
+syntax is deprecated:
+
+
width :height :pix_fmt :time_base.num :time_base.den :pixel_aspect.num :pixel_aspect.den [:sws_param ]
+
+
+
10.2 cellauto# TOC
+
+
Create a pattern generated by an elementary cellular automaton.
+
+
The initial state of the cellular automaton can be defined through the
+filename , and pattern options. If such options are
+not specified an initial state is created randomly.
+
+
At each new frame a new row in the video is filled with the result of
+the cellular automaton next generation. The behavior when the whole
+frame is filled is defined by the scroll option.
+
+
This source accepts the following options:
+
+
+filename, f
+Read the initial cellular automaton state, i.e. the starting row, from
+the specified file.
+In the file, each non-whitespace character is considered an alive
+cell, a newline will terminate the row, and further characters in the
+file will be ignored.
+
+
+pattern, p
+Read the initial cellular automaton state, i.e. the starting row, from
+the specified string.
+
+Each non-whitespace character in the string is considered an alive
+cell, a newline will terminate the row, and further characters in the
+string will be ignored.
+
+
+rate, r
+Set the video rate, that is the number of frames generated per second.
+Default is 25.
+
+
+random_fill_ratio, ratio
+Set the random fill ratio for the initial cellular automaton row. It
+is a floating point number value ranging from 0 to 1, defaults to
+1/PHI.
+
+This option is ignored when a file or a pattern is specified.
+
+
+random_seed, seed
+Set the seed for filling randomly the initial row, must be an integer
+included between 0 and UINT32_MAX. If not specified, or if explicitly
+set to -1, the filter will try to use a good random seed on a best
+effort basis.
+
+
+rule
+Set the cellular automaton rule, it is a number ranging from 0 to 255.
+Default value is 110.
+
+
+size, s
+Set the size of the output video. For the syntax of this option, check
+the "Video size" section in the ffmpeg-utils manual.
+
+If filename or pattern is specified, the size is set
+by default to the width of the specified initial state row, and the
+height is set to width * PHI.
+
+If size is set, it must contain the width of the specified
+pattern string, and the specified pattern will be centered in the
+larger row.
+
+If a filename or a pattern string is not specified, the size value
+defaults to "320x518" (used for a randomly generated initial state).
+
+
+scroll
+If set to 1, scroll the output upward when all the rows in the output
+have been already filled. If set to 0, the new generated row will be
+written over the top row just after the bottom row is filled.
+Defaults to 1.
+
+
+start_full, full
+If set to 1, completely fill the output with generated rows before
+outputting the first frame.
+This is the default behavior, for disabling set the value to 0.
+
+
+stitch
+If set to 1, stitch the left and right row edges together.
+This is the default behavior, for disabling set the value to 0.
+
+
+
+
+
10.2.1 Examples# TOC
+
+
+ Read the initial state from pattern , and specify an output of
+size 200x400.
+
+
cellauto=f=pattern:s=200x400
+
+
+ Generate a random initial row with a width of 200 cells, with a fill
+ratio of 2/3:
+
+
cellauto=ratio=2/3:s=200x200
+
+
+ Create a pattern generated by rule 18 starting by a single alive cell
+centered on an initial row with width 100:
+
+
cellauto=p=@:s=100x400:full=0:rule=18
+
+
+ Specify a more elaborated initial pattern:
+
+
cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
+
+
+
+
+
+
10.3 mandelbrot# TOC
+
+
Generate a Mandelbrot set fractal, and progressively zoom towards the
+point specified with start_x and start_y .
+
+
This source accepts the following options:
+
+
+end_pts
+Set the terminal pts value. Default value is 400.
+
+
+end_scale
+Set the terminal scale value.
+Must be a floating point value. Default value is 0.3.
+
+
+inner
+Set the inner coloring mode, that is the algorithm used to draw the
+Mandelbrot fractal internal region.
+
+It shall assume one of the following values:
+
+black
+Set black mode.
+
+convergence
+Show time until convergence.
+
+mincol
+Set color based on point closest to the origin of the iterations.
+
+period
+Set period mode.
+
+
+
+Default value is mincol .
+
+
+bailout
+Set the bailout value. Default value is 10.0.
+
+
+maxiter
+Set the maximum of iterations performed by the rendering
+algorithm. Default value is 7189.
+
+
+outer
+Set outer coloring mode.
+It shall assume one of following values:
+
+iteration_count
+Set iteration cound mode.
+
+normalized_iteration_count
+set normalized iteration count mode.
+
+
+Default value is normalized_iteration_count .
+
+
+rate, r
+Set frame rate, expressed as number of frames per second. Default
+value is "25".
+
+
+size, s
+Set frame size. For the syntax of this option, check the "Video
+size" section in the ffmpeg-utils manual. Default value is "640x480".
+
+
+start_scale
+Set the initial scale value. Default value is 3.0.
+
+
+start_x
+Set the initial x position. Must be a floating point value between
+-100 and 100. Default value is -0.743643887037158704752191506114774.
+
+
+start_y
+Set the initial y position. Must be a floating point value between
+-100 and 100. Default value is -0.131825904205311970493132056385139.
+
+
+
+
+
10.4 mptestsrc# TOC
+
+
Generate various test patterns, as generated by the MPlayer test filter.
+
+
The size of the generated video is fixed, and is 256x256.
+This source is useful in particular for testing encoding features.
+
+
This source accepts the following options:
+
+
+rate, r
+Specify the frame rate of the sourced video, as the number of frames
+generated per second. It has to be a string in the format
+frame_rate_num /frame_rate_den , an integer number, a floating point
+number or a valid video frame rate abbreviation. The default value is
+"25".
+
+
+duration, d
+Set the duration of the sourced video. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+
+If not specified, or the expressed duration is negative, the video is
+supposed to be generated forever.
+
+
+test, t
+
+Set the number or the name of the test to perform. Supported tests are:
+
+dc_luma
+dc_chroma
+freq_luma
+freq_chroma
+amp_luma
+amp_chroma
+cbp
+mv
+ring1
+ring2
+all
+
+
+Default value is "all", which will cycle through the list of all tests.
+
+
+
+
Some examples:
+
+
+
will generate a "dc_luma" test pattern.
+
+
+
10.5 frei0r_src# TOC
+
+
Provide a frei0r source.
+
+
To enable compilation of this filter you need to install the frei0r
+header and configure FFmpeg with --enable-frei0r
.
+
+
This source accepts the following parameters:
+
+
+size
+The size of the video to generate. For the syntax of this option, check the
+"Video size" section in the ffmpeg-utils manual.
+
+
+framerate
+The framerate of the generated video. It may be a string of the form
+num /den or a frame rate abbreviation.
+
+
+filter_name
+The name to the frei0r source to load. For more information regarding frei0r and
+how to set the parameters, read the frei0r section in the video filters
+documentation.
+
+
+filter_params
+A ’|’-separated list of parameters to pass to the frei0r source.
+
+
+
+
+
For example, to generate a frei0r partik0l source with size 200x200
+and frame rate 10 which is overlaid on the overlay filter main input:
+
+
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
+
+
+
+
10.6 life# TOC
+
+
Generate a life pattern.
+
+
This source is based on a generalization of John Conway’s life game.
+
+
The sourced input represents a life grid, each pixel represents a cell
+which can be in one of two possible states, alive or dead. Every cell
+interacts with its eight neighbours, which are the cells that are
+horizontally, vertically, or diagonally adjacent.
+
+
At each interaction the grid evolves according to the adopted rule,
+which specifies the number of neighbor alive cells which will make a
+cell stay alive or born. The rule option allows one to specify
+the rule to adopt.
+
+
This source accepts the following options:
+
+
+filename, f
+Set the file from which to read the initial grid state. In the file,
+each non-whitespace character is considered an alive cell, and newline
+is used to delimit the end of each row.
+
+If this option is not specified, the initial grid is generated
+randomly.
+
+
+rate, r
+Set the video rate, that is the number of frames generated per second.
+Default is 25.
+
+
+random_fill_ratio, ratio
+Set the random fill ratio for the initial random grid. It is a
+floating point number value ranging from 0 to 1, defaults to 1/PHI.
+It is ignored when a file is specified.
+
+
+random_seed, seed
+Set the seed for filling the initial random grid, must be an integer
+included between 0 and UINT32_MAX. If not specified, or if explicitly
+set to -1, the filter will try to use a good random seed on a best
+effort basis.
+
+
+rule
+Set the life rule.
+
+A rule can be specified with a code of the kind "SNS /BNB ",
+where NS and NB are sequences of numbers in the range 0-8,
+NS specifies the number of alive neighbor cells which make a
+live cell stay alive, and NB the number of alive neighbor cells
+which make a dead cell to become alive (i.e. to "born").
+"s" and "b" can be used in place of "S" and "B", respectively.
+
+Alternatively a rule can be specified by an 18-bits integer. The 9
+high order bits are used to encode the next cell state if it is alive
+for each number of neighbor alive cells, the low order bits specify
+the rule for "borning" new cells. Higher order bits encode for an
+higher number of neighbor cells.
+For example the number 6153 = (12<<9)+9
specifies a stay alive
+rule of 12 and a born rule of 9, which corresponds to "S23/B03".
+
+Default value is "S23/B3", which is the original Conway’s game of life
+rule, and will keep a cell alive if it has 2 or 3 neighbor alive
+cells, and will born a new cell if there are three alive cells around
+a dead cell.
+
+
+size, s
+Set the size of the output video. For the syntax of this option, check the
+"Video size" section in the ffmpeg-utils manual.
+
+If filename is specified, the size is set by default to the
+same size of the input file. If size is set, it must contain
+the size specified in the input file, and the initial grid defined in
+that file is centered in the larger resulting area.
+
+If a filename is not specified, the size value defaults to "320x240"
+(used for a randomly generated initial grid).
+
+
+stitch
+If set to 1, stitch the left and right grid edges together, and the
+top and bottom edges also. Defaults to 1.
+
+
+mold
+Set cell mold speed. If set, a dead cell will go from death_color to
+mold_color with a step of mold . mold can have a
+value from 0 to 255.
+
+
+life_color
+Set the color of living (or new born) cells.
+
+
+death_color
+Set the color of dead cells. If mold is set, this is the first color
+used to represent a dead cell.
+
+
+mold_color
+Set mold color, for definitely dead and moldy cells.
+
+For the syntax of these 3 color options, check the "Color" section in the
+ffmpeg-utils manual.
+
+
+
+
+
10.6.1 Examples# TOC
+
+
+ Read a grid from pattern , and center it on a grid of size
+300x300 pixels:
+
+
life=f=pattern:s=300x300
+
+
+ Generate a random grid of size 200x200, with a fill ratio of 2/3:
+
+
life=ratio=2/3:s=200x200
+
+
+ Specify a custom rule for evolving a randomly generated grid:
+
+
+ Full example with slow death effect (mold) using ffplay
:
+
+
ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
+
+
+
+
+
10.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc# TOC
+
+
The color
source provides an uniformly colored input.
+
+
The haldclutsrc
source provides an identity Hald CLUT. See also
+haldclut filter.
+
+
The nullsrc
source returns unprocessed video frames. It is
+mainly useful to be employed in analysis / debugging tools, or as the
+source for filters which ignore the input data.
+
+
The rgbtestsrc
source generates an RGB test pattern useful for
+detecting RGB vs BGR issues. You should see a red, green and blue
+stripe from top to bottom.
+
+
The smptebars
source generates a color bars pattern, based on
+the SMPTE Engineering Guideline EG 1-1990.
+
+
The smptehdbars
source generates a color bars pattern, based on
+the SMPTE RP 219-2002.
+
+
The testsrc
source generates a test video pattern, showing a
+color pattern, a scrolling gradient and a timestamp. This is mainly
+intended for testing purposes.
+
+
The sources accept the following parameters:
+
+
+color, c
+Specify the color of the source, only available in the color
+source. For the syntax of this option, check the "Color" section in the
+ffmpeg-utils manual.
+
+
+level
+Specify the level of the Hald CLUT, only available in the haldclutsrc
+source. A level of N
generates a picture of N*N*N
by N*N*N
+pixels to be used as identity matrix for 3D lookup tables. Each component is
+coded on a 1/(N*N)
scale.
+
+
+size, s
+Specify the size of the sourced video. For the syntax of this option, check the
+"Video size" section in the ffmpeg-utils manual. The default value is
+"320x240".
+
+This option is not available with the haldclutsrc
filter.
+
+
+rate, r
+Specify the frame rate of the sourced video, as the number of frames
+generated per second. It has to be a string in the format
+frame_rate_num /frame_rate_den , an integer number, a floating point
+number or a valid video frame rate abbreviation. The default value is
+"25".
+
+
+sar
+Set the sample aspect ratio of the sourced video.
+
+
+duration, d
+Set the duration of the sourced video. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+
+If not specified, or the expressed duration is negative, the video is
+supposed to be generated forever.
+
+
+decimals, n
+Set the number of decimals to show in the timestamp, only available in the
+testsrc
source.
+
+The displayed timestamp value will correspond to the original
+timestamp value multiplied by the power of 10 of the specified
+value. Default value is 0.
+
+
+
+
For example the following:
+
+
testsrc=duration=5.3:size=qcif:rate=10
+
+
+
will generate a video with a duration of 5.3 seconds, with size
+176x144 and a frame rate of 10 frames per second.
+
+
The following graph description will generate a red source
+with an opacity of 0.2, with size "qcif" and a frame rate of 10
+frames per second.
+
+
color=c=red@0.2:s=qcif:r=10
+
+
+
If the input content is to be ignored, nullsrc
can be used. The
+following command generates noise in the luminance plane by employing
+the geq
filter:
+
+
nullsrc=s=256x256, geq=random(1)*255:128:128
+
+
+
+
10.7.1 Commands# TOC
+
+
The color
source supports the following commands:
+
+
+c, color
+Set the color of the created image. Accepts the same syntax of the
+corresponding color option.
+
+
+
+
+
+
11 Video Sinks# TOC
+
+
Below is a description of the currently available video sinks.
+
+
+
11.1 buffersink# TOC
+
+
Buffer video frames, and make them available to the end of the filter
+graph.
+
+
This sink is mainly intended for programmatic use, in particular
+through the interface defined in libavfilter/buffersink.h
+or the options system.
+
+
It accepts a pointer to an AVBufferSinkContext structure, which
+defines the incoming buffers’ formats, to be passed as the opaque
+parameter to avfilter_init_filter
for initialization.
+
+
+
11.2 nullsink# TOC
+
+
Null video sink: do absolutely nothing with the input video. It is
+mainly useful as a template and for use in analysis / debugging
+tools.
+
+
+
+
12 Multimedia Filters# TOC
+
+
Below is a description of the currently available multimedia filters.
+
+
+
12.1 avectorscope# TOC
+
+
Convert input audio to a video output, representing the audio vector
+scope.
+
+
The filter is used to measure the difference between channels of stereo
+audio stream. A monoaural signal, consisting of identical left and right
+signal, results in straight vertical line. Any stereo separation is visible
+as a deviation from this line, creating a Lissajous figure.
+If the straight (or deviation from it) but horizontal line appears this
+indicates that the left and right channels are out of phase.
+
+
The filter accepts the following options:
+
+
+mode, m
+Set the vectorscope mode.
+
+Available values are:
+
+‘lissajous ’
+Lissajous rotated by 45 degrees.
+
+
+‘lissajous_xy ’
+Same as above but not rotated.
+
+
+
+Default value is ‘lissajous ’.
+
+
+size, s
+Set the video size for the output. For the syntax of this option, check the "Video size"
+section in the ffmpeg-utils manual. Default value is 400x400
.
+
+
+rate, r
+Set the output frame rate. Default value is 25
.
+
+
+rc
+gc
+bc
+Specify the red, green and blue contrast. Default values are 40
, 160
and 80
.
+Allowed range is [0, 255]
.
+
+
+rf
+gf
+bf
+Specify the red, green and blue fade. Default values are 15
, 10
and 5
.
+Allowed range is [0, 255]
.
+
+
+zoom
+Set the zoom factor. Default value is 1
. Allowed range is [1, 10]
.
+
+
+
+
+
12.1.1 Examples# TOC
+
+
+ Complete example using ffplay
:
+
+
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
+ [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
+
+
+
+
+
12.2 concat# TOC
+
+
Concatenate audio and video streams, joining them together one after the
+other.
+
+
The filter works on segments of synchronized video and audio streams. All
+segments must have the same number of streams of each type, and that will
+also be the number of streams at output.
+
+
The filter accepts the following options:
+
+
+n
+Set the number of segments. Default is 2.
+
+
+v
+Set the number of output video streams, that is also the number of video
+streams in each segment. Default is 1.
+
+
+a
+Set the number of output audio streams, that is also the number of audio
+streams in each segment. Default is 0.
+
+
+unsafe
+Activate unsafe mode: do not fail if segments have a different format.
+
+
+
+
+
The filter has v +a outputs: first v video outputs, then
+a audio outputs.
+
+
There are n x(v +a ) inputs: first the inputs for the first
+segment, in the same order as the outputs, then the inputs for the second
+segment, etc.
+
+
Related streams do not always have exactly the same duration, for various
+reasons including codec frame size or sloppy authoring. For that reason,
+related synchronized streams (e.g. a video and its audio track) should be
+concatenated at once. The concat filter will use the duration of the longest
+stream in each segment (except the last one), and if necessary pad shorter
+audio streams with silence.
+
+
For this filter to work correctly, all segments must start at timestamp 0.
+
+
All corresponding streams must have the same parameters in all segments; the
+filtering system will automatically select a common pixel format for video
+streams, and a common sample format, sample rate and channel layout for
+audio streams, but other settings, such as resolution, must be converted
+explicitly by the user.
+
+
Different frame rates are acceptable but will result in variable frame rate
+at output; be sure to configure the output file to handle it.
+
+
+
12.2.1 Examples# TOC
+
+
+
+
+
12.3 ebur128# TOC
+
+
EBU R128 scanner filter. This filter takes an audio stream as input and outputs
+it unchanged. By default, it logs a message at a frequency of 10Hz with the
+Momentary loudness (identified by M
), Short-term loudness (S
),
+Integrated loudness (I
) and Loudness Range (LRA
).
+
+
The filter also has a video output (see the video option) with a real
+time graph to observe the loudness evolution. The graphic contains the logged
+message mentioned above, so it is not printed anymore when this option is set,
+unless the verbose logging is set. The main graphing area contains the
+short-term loudness (3 seconds of analysis), and the gauge on the right is for
+the momentary loudness (400 milliseconds).
+
+
More information about the Loudness Recommendation EBU R128 on
+http://tech.ebu.ch/loudness .
+
+
The filter accepts the following options:
+
+
+video
+Activate the video output. The audio stream is passed unchanged whether this
+option is set or no. The video stream will be the first output stream if
+activated. Default is 0
.
+
+
+size
+Set the video size. This option is for video only. For the syntax of this
+option, check the "Video size" section in the ffmpeg-utils manual. Default
+and minimum resolution is 640x480
.
+
+
+meter
+Set the EBU scale meter. Default is 9
. Common values are 9
and
+18
, respectively for EBU scale meter +9 and EBU scale meter +18. Any
+other integer value between this range is allowed.
+
+
+metadata
+Set metadata injection. If set to 1
, the audio input will be segmented
+into 100ms output frames, each of them containing various loudness information
+in metadata. All the metadata keys are prefixed with lavfi.r128.
.
+
+Default is 0
.
+
+
+framelog
+Force the frame logging level.
+
+Available values are:
+
+‘info ’
+information logging level
+
+‘verbose ’
+verbose logging level
+
+
+
+By default, the logging level is set to info . If the video or
+the metadata options are set, it switches to verbose .
+
+
+peak
+Set peak mode(s).
+
+Available modes can be cumulated (the option is a flag
type). Possible
+values are:
+
+‘none ’
+Disable any peak mode (default).
+
+‘sample ’
+Enable sample-peak mode.
+
+Simple peak mode looking for the higher sample value. It logs a message
+for sample-peak (identified by SPK
).
+
+‘true ’
+Enable true-peak mode.
+
+If enabled, the peak lookup is done on an over-sampled version of the input
+stream for better peak accuracy. It logs a message for true-peak.
+(identified by TPK
) and true-peak per frame (identified by FTPK
).
+This mode requires a build with libswresample
.
+
+
+
+
+
+
+
+
12.3.1 Examples# TOC
+
+
+ Real-time graph using ffplay
, with a EBU scale meter +18:
+
+
ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
+
+
+ Run an analysis with ffmpeg
:
+
+
ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
+
+
+
+
+
12.4 interleave, ainterleave# TOC
+
+
Temporally interleave frames from several inputs.
+
+
interleave
works with video inputs, ainterleave
with audio.
+
+
These filters read frames from several inputs and send the oldest
+queued frame to the output.
+
+
Input streams must have a well defined, monotonically increasing frame
+timestamp values.
+
+
In order to submit one frame to output, these filters need to enqueue
+at least one frame for each input, so they cannot work in case one
+input is not yet terminated and will not receive incoming frames.
+
+
For example consider the case when one input is a select
filter
+which always drop input frames. The interleave
filter will keep
+reading from that input, but it will never be able to send new frames
+to output until the input will send an end-of-stream signal.
+
+
Also, depending on inputs synchronization, the filters will drop
+frames in case one input receives more frames than the other ones, and
+the queue is already filled.
+
+
These filters accept the following options:
+
+
+nb_inputs, n
+Set the number of different inputs, it is 2 by default.
+
+
+
+
+
12.4.1 Examples# TOC
+
+
+ Interleave frames belonging to different streams using ffmpeg
:
+
+
ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
+
+
+ Add flickering blur effect:
+
+
select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
+
+
+
+
+
12.5 perms, aperms# TOC
+
+
Set read/write permissions for the output frames.
+
+
These filters are mainly aimed at developers to test direct path in the
+following filter in the filtergraph.
+
+
The filters accept the following options:
+
+
+mode
+Select the permissions mode.
+
+It accepts the following values:
+
+‘none ’
+Do nothing. This is the default.
+
+‘ro ’
+Set all the output frames read-only.
+
+‘rw ’
+Set all the output frames directly writable.
+
+‘toggle ’
+Make the frame read-only if writable, and writable if read-only.
+
+‘random ’
+Set each output frame read-only or writable randomly.
+
+
+
+
+seed
+Set the seed for the random mode, must be an integer included between
+0
and UINT32_MAX
. If not specified, or if explicitly set to
+-1
, the filter will try to use a good random seed on a best effort
+basis.
+
+
+
+
Note: in case of auto-inserted filter between the permission filter and the
+following one, the permission might not be received as expected in that
+following filter. Inserting a format or aformat filter before the
+perms/aperms filter can avoid this problem.
+
+
+
12.6 select, aselect# TOC
+
+
Select frames to pass in output.
+
+
This filter accepts the following options:
+
+
+expr, e
+Set expression, which is evaluated for each input frame.
+
+If the expression is evaluated to zero, the frame is discarded.
+
+If the evaluation result is negative or NaN, the frame is sent to the
+first output; otherwise it is sent to the output with index
+ceil(val)-1
, assuming that the input index starts from 0.
+
+For example a value of 1.2
corresponds to the output with index
+ceil(1.2)-1 = 2-1 = 1
, that is the second output.
+
+
+outputs, n
+Set the number of outputs. The output to which to send the selected
+frame is based on the result of the evaluation. Default value is 1.
+
+
+
+
The expression can contain the following constants:
+
+
+n
+The (sequential) number of the filtered frame, starting from 0.
+
+
+selected_n
+The (sequential) number of the selected frame, starting from 0.
+
+
+prev_selected_n
+The sequential number of the last selected frame. It’s NAN if undefined.
+
+
+TB
+The timebase of the input timestamps.
+
+
+pts
+The PTS (Presentation TimeStamp) of the filtered video frame,
+expressed in TB units. It’s NAN if undefined.
+
+
+t
+The PTS of the filtered video frame,
+expressed in seconds. It’s NAN if undefined.
+
+
+prev_pts
+The PTS of the previously filtered video frame. It’s NAN if undefined.
+
+
+prev_selected_pts
+The PTS of the last previously filtered video frame. It’s NAN if undefined.
+
+
+prev_selected_t
+The PTS of the last previously selected video frame. It’s NAN if undefined.
+
+
+start_pts
+The PTS of the first video frame in the video. It’s NAN if undefined.
+
+
+start_t
+The time of the first video frame in the video. It’s NAN if undefined.
+
+
+pict_type (video only)
+The type of the filtered frame. It can assume one of the following
+values:
+
+I
+P
+B
+S
+SI
+SP
+BI
+
+
+
+interlace_type (video only)
+The frame interlace type. It can assume one of the following values:
+
+PROGRESSIVE
+The frame is progressive (not interlaced).
+
+TOPFIRST
+The frame is top-field-first.
+
+BOTTOMFIRST
+The frame is bottom-field-first.
+
+
+
+
+consumed_sample_n (audio only)
+the number of selected samples before the current frame
+
+
+samples_n (audio only)
+the number of samples in the current frame
+
+
+sample_rate (audio only)
+the input sample rate
+
+
+key
+This is 1 if the filtered frame is a key-frame, 0 otherwise.
+
+
+pos
+the position in the file of the filtered frame, -1 if the information
+is not available (e.g. for synthetic video)
+
+
+scene (video only)
+value between 0 and 1 to indicate a new scene; a low value reflects a low
+probability for the current frame to introduce a new scene, while a higher
+value means the current frame is more likely to be one (see the example below)
+
+
+
+
+
The default value of the select expression is "1".
+
+
+
12.6.1 Examples# TOC
+
+
+
+
+
12.7 sendcmd, asendcmd# TOC
+
+
Send commands to filters in the filtergraph.
+
+
These filters read commands to be sent to other filters in the
+filtergraph.
+
+
sendcmd
must be inserted between two video filters,
+asendcmd
must be inserted between two audio filters, but apart
+from that they act the same way.
+
+
The specification of commands can be provided in the filter arguments
+with the commands option, or in a file specified by the
+filename option.
+
+
These filters accept the following options:
+
+commands, c
+Set the commands to be read and sent to the other filters.
+
+filename, f
+Set the filename of the commands to be read and sent to the other
+filters.
+
+
+
+
+
12.7.1 Commands syntax# TOC
+
+
A commands description consists of a sequence of interval
+specifications, comprising a list of commands to be executed when a
+particular event related to that interval occurs. The occurring event
+is typically the current frame time entering or leaving a given time
+interval.
+
+
An interval is specified by the following syntax:
+
+
+
The time interval is specified by the START and END times.
+END is optional and defaults to the maximum time.
+
+
The current frame time is considered within the specified interval if
+it is included in the interval [START , END ), that is when
+the time is greater or equal to START and is lesser than
+END .
+
+
COMMANDS consists of a sequence of one or more command
+specifications, separated by ",", relating to that interval. The
+syntax of a command specification is given by:
+
+
[FLAGS ] TARGET COMMAND ARG
+
+
+
FLAGS is optional and specifies the type of events relating to
+the time interval which enable sending the specified command, and must
+be a non-null sequence of identifier flags separated by "+" or "|" and
+enclosed between "[" and "]".
+
+
The following flags are recognized:
+
+enter
+The command is sent when the current frame timestamp enters the
+specified interval. In other words, the command is sent when the
+previous frame timestamp was not in the given interval, and the
+current is.
+
+
+leave
+The command is sent when the current frame timestamp leaves the
+specified interval. In other words, the command is sent when the
+previous frame timestamp was in the given interval, and the
+current is not.
+
+
+
+
If FLAGS is not specified, a default value of [enter]
is
+assumed.
+
+
TARGET specifies the target of the command, usually the name of
+the filter class or a specific filter instance name.
+
+
COMMAND specifies the name of the command for the target filter.
+
+
ARG is optional and specifies the optional list of argument for
+the given COMMAND .
+
+
Between one interval specification and another, whitespaces, or
+sequences of characters starting with #
until the end of line,
+are ignored and can be used to annotate comments.
+
+
A simplified BNF description of the commands specification syntax
+follows:
+
+
COMMAND_FLAG ::= "enter" | "leave"
+COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG ]
+COMMAND ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG ]
+COMMANDS ::= COMMAND [,COMMANDS ]
+INTERVAL ::= START [-END ] COMMANDS
+INTERVALS ::= INTERVAL [;INTERVALS ]
+
+
+
+
12.7.2 Examples# TOC
+
+
+
+
+
12.8 setpts, asetpts# TOC
+
+
Change the PTS (presentation timestamp) of the input frames.
+
+
setpts
works on video frames, asetpts
on audio frames.
+
+
This filter accepts the following options:
+
+
+expr
+The expression which is evaluated for each frame to construct its timestamp.
+
+
+
+
+
The expression is evaluated through the eval API and can contain the following
+constants:
+
+
+FRAME_RATE
+frame rate, only defined for constant frame-rate video
+
+
+PTS
+The presentation timestamp in input
+
+
+N
+The count of the input frame for video or the number of consumed samples,
+not including the current frame for audio, starting from 0.
+
+
+NB_CONSUMED_SAMPLES
+The number of consumed samples, not including the current frame (only
+audio)
+
+
+NB_SAMPLES, S
+The number of samples in the current frame (only audio)
+
+
+SAMPLE_RATE, SR
+The audio sample rate.
+
+
+STARTPTS
+The PTS of the first frame.
+
+
+STARTT
+the time in seconds of the first frame
+
+
+INTERLACED
+State whether the current frame is interlaced.
+
+
+T
+the time in seconds of the current frame
+
+
+POS
+original position in the file of the frame, or undefined if undefined
+for the current frame
+
+
+PREV_INPTS
+The previous input PTS.
+
+
+PREV_INT
+previous input time in seconds
+
+
+PREV_OUTPTS
+The previous output PTS.
+
+
+PREV_OUTT
+previous output time in seconds
+
+
+RTCTIME
+The wallclock (RTC) time in microseconds. This is deprecated, use time(0)
+instead.
+
+
+RTCSTART
+The wallclock (RTC) time at the start of the movie in microseconds.
+
+
+TB
+The timebase of the input timestamps.
+
+
+
+
+
+
12.8.1 Examples# TOC
+
+
+ Start counting PTS from zero
+
+
+ Apply fast motion effect:
+
+
+ Apply slow motion effect:
+
+
+ Set fixed rate of 25 frames per second:
+
+
+ Set fixed rate 25 fps with some jitter:
+
+
setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
+
+
+ Apply an offset of 10 seconds to the input PTS:
+
+
+ Generate timestamps from a "live source" and rebase onto the current timebase:
+
+
setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
+
+
+ Generate timestamps by counting samples:
+
+
+
+
+
+
12.9 settb, asettb# TOC
+
+
Set the timebase to use for the output frames timestamps.
+It is mainly useful for testing timebase configuration.
+
+
It accepts the following parameters:
+
+
+expr, tb
+The expression which is evaluated into the output timebase.
+
+
+
+
+
The value for tb is an arithmetic expression representing a
+rational. The expression can contain the constants "AVTB" (the default
+timebase), "intb" (the input timebase) and "sr" (the sample rate,
+audio only). Default value is "intb".
+
+
+
12.9.1 Examples# TOC
+
+
+ Set the timebase to 1/25:
+
+
+ Set the timebase to 1/10:
+
+
+ Set the timebase to 1001/1000:
+
+
+ Set the timebase to 2*intb:
+
+
+ Set the default timebase value:
+
+
+
+
+
12.10 showcqt# TOC
+
Convert input audio to a video output representing
+frequency spectrum logarithmically (using constant Q transform with
+Brown-Puckette algorithm), with musical tone scale, from E0 to D#10 (10 octaves).
+
+
The filter accepts the following options:
+
+
+volume
+Specify transform volume (multiplier) expression. The expression can contain
+variables:
+
+frequency, freq, f
+the frequency where transform is evaluated
+
+timeclamp, tc
+value of timeclamp option
+
+
+and functions:
+
+a_weighting(f)
+A-weighting of equal loudness
+
+b_weighting(f)
+B-weighting of equal loudness
+
+c_weighting(f)
+C-weighting of equal loudness
+
+
+Default value is 16
.
+
+
+tlength
+Specify transform length expression. The expression can contain variables:
+
+frequency, freq, f
+the frequency where transform is evaluated
+
+timeclamp, tc
+value of timeclamp option
+
+
+Default value is 384/f*tc/(384/f+tc)
.
+
+
+timeclamp
+Specify the transform timeclamp. At low frequency, there is trade-off between
+accuracy in time domain and frequency domain. If timeclamp is lower,
+event in time domain is represented more accurately (such as fast bass drum),
+otherwise event in frequency domain is represented more accurately
+(such as bass guitar). Acceptable value is [0.1, 1.0]. Default value is 0.17
.
+
+
+coeffclamp
+Specify the transform coeffclamp. If coeffclamp is lower, transform is
+more accurate, otherwise transform is faster. Acceptable value is [0.1, 10.0].
+Default value is 1.0
.
+
+
+gamma
+Specify gamma. Lower gamma makes the spectrum more contrast, higher gamma
+makes the spectrum having more range. Acceptable value is [1.0, 7.0].
+Default value is 3.0
.
+
+
+fontfile
+Specify font file for use with freetype. If not specified, use embedded font.
+
+
+fontcolor
+Specify font color expression. This is arithmetic expression that should return
+integer value 0xRRGGBB. The expression can contain variables:
+
+frequency, freq, f
+the frequency where transform is evaluated
+
+timeclamp, tc
+value of timeclamp option
+
+
+and functions:
+
+midi(f)
+midi number of frequency f, some midi numbers: E0(16), C1(24), C2(36), A4(69)
+
+r(x), g(x), b(x)
+red, green, and blue value of intensity x
+
+
+Default value is st(0, (midi(f)-59.5)/12);
+st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));
+r(1-ld(1)) + b(ld(1))
+
+
+fullhd
+If set to 1 (the default), the video size is 1920x1080 (full HD),
+if set to 0, the video size is 960x540. Use this option to make CPU usage lower.
+
+
+fps
+Specify video fps. Default value is 25
.
+
+
+count
+Specify number of transform per frame, so there are fps*count transforms
+per second. Note that audio data rate must be divisible by fps*count.
+Default value is 6
.
+
+
+
+
+
+
12.10.1 Examples# TOC
+
+
+ Playing audio while showing the spectrum:
+
+
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt [out0]'
+
+
+ Same as above, but with frame rate 30 fps:
+
+
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fps=30:count=5 [out0]'
+
+
+ Playing at 960x540 and lower CPU usage:
+
+
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fullhd=0:count=3 [out0]'
+
+
+ A1 and its harmonics: A1, A2, (near)E3, A3:
+
+
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
+ asplit[a][out1]; [a] showcqt [out0]'
+
+
+ Same as above, but with more accuracy in frequency domain (and slower):
+
+
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
+ asplit[a][out1]; [a] showcqt=timeclamp=0.5 [out0]'
+
+
+ B-weighting of equal loudness
+
+
volume=16*b_weighting(f)
+
+
+ Lower Q factor
+
+
tlength=100/f*tc/(100/f+tc)
+
+
+ Custom fontcolor, C-note is colored green, others are colored blue
+
+
fontcolor='if(mod(floor(midi(f)+0.5),12), 0x0000FF, g(1))'
+
+
+
+
+
+
12.11 showspectrum# TOC
+
+
Convert input audio to a video output, representing the audio frequency
+spectrum.
+
+
The filter accepts the following options:
+
+
+size, s
+Specify the video size for the output. For the syntax of this option, check
+the "Video size" section in the ffmpeg-utils manual. Default value is
+640x512
.
+
+
+slide
+Specify how the spectrum should slide along the window.
+
+It accepts the following values:
+
+‘replace ’
+the samples start again on the left when they reach the right
+
+‘scroll ’
+the samples scroll from right to left
+
+‘fullframe ’
+frames are only produced when the samples reach the right
+
+
+
+Default value is replace
.
+
+
+mode
+Specify display mode.
+
+It accepts the following values:
+
+‘combined ’
+all channels are displayed in the same row
+
+‘separate ’
+all channels are displayed in separate rows
+
+
+
+Default value is ‘combined ’.
+
+
+color
+Specify display color mode.
+
+It accepts the following values:
+
+‘channel ’
+each channel is displayed in a separate color
+
+‘intensity ’
+each channel is is displayed using the same color scheme
+
+
+
+Default value is ‘channel ’.
+
+
+scale
+Specify scale used for calculating intensity color values.
+
+It accepts the following values:
+
+‘lin ’
+linear
+
+‘sqrt ’
+square root, default
+
+‘cbrt ’
+cubic root
+
+‘log ’
+logarithmic
+
+
+
+Default value is ‘sqrt ’.
+
+
+saturation
+Set saturation modifier for displayed colors. Negative values provide
+alternative color scheme. 0
is no saturation at all.
+Saturation must be in [-10.0, 10.0] range.
+Default value is 1
.
+
+
+win_func
+Set window function.
+
+It accepts the following values:
+
+‘none ’
+No samples pre-processing (do not expect this to be faster)
+
+‘hann ’
+Hann window
+
+‘hamming ’
+Hamming window
+
+‘blackman ’
+Blackman window
+
+
+
+Default value is hann
.
+
+
+
+
The usage is very similar to the showwaves filter; see the examples in that
+section.
+
+
+
12.11.1 Examples# TOC
+
+
+ Large window with logarithmic color scaling:
+
+
showspectrum=s=1280x480:scale=log
+
+
+ Complete example for a colored and sliding spectrum per channel using ffplay
:
+
+
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
+ [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
+
+
+
+
+
12.12 showwaves# TOC
+
+
Convert input audio to a video output, representing the samples waves.
+
+
The filter accepts the following options:
+
+
+size, s
+Specify the video size for the output. For the syntax of this option, check
+the "Video size" section in the ffmpeg-utils manual. Default value
+is "600x240".
+
+
+mode
+Set display mode.
+
+Available values are:
+
+‘point ’
+Draw a point for each sample.
+
+
+‘line ’
+Draw a vertical line for each sample.
+
+
+‘p2p ’
+Draw a point for each sample and a line between them.
+
+
+‘cline ’
+Draw a centered vertical line for each sample.
+
+
+
+Default value is point
.
+
+
+n
+Set the number of samples which are printed on the same column. A
+larger value will decrease the frame rate. Must be a positive
+integer. This option can be set only if the value for rate
+is not explicitly specified.
+
+
+rate, r
+Set the (approximate) output frame rate. This is done by setting the
+option n . Default value is "25".
+
+
+split_channels
+Set if channels should be drawn separately or overlap. Default value is 0.
+
+
+
+
+
+
12.12.1 Examples# TOC
+
+
+ Output the input file audio and the corresponding video representation
+at the same time:
+
+
amovie=a.mp3,asplit[out0],showwaves[out1]
+
+
+ Create a synthetic signal and show it with showwaves, forcing a
+frame rate of 30 frames per second:
+
+
aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
+
+
+
+
+
12.13 split, asplit# TOC
+
+
Split input into several identical outputs.
+
+
asplit
works with audio input, split
with video.
+
+
The filter accepts a single parameter which specifies the number of outputs. If
+unspecified, it defaults to 2.
+
+
+
12.13.1 Examples# TOC
+
+
+ Create two separate outputs from the same input:
+
+
[in] split [out0][out1]
+
+
+ To create 3 or more outputs, you need to specify the number of
+outputs, like in:
+
+
[in] asplit=3 [out0][out1][out2]
+
+
+ Create two separate outputs from the same input, one cropped and
+one padded:
+
+
[in] split [splitout1][splitout2];
+[splitout1] crop=100:100:0:0 [cropout];
+[splitout2] pad=200:200:100:100 [padout];
+
+
+ Create 5 copies of the input audio with ffmpeg
:
+
+
ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
+
+
+
+
+
12.14 zmq, azmq# TOC
+
+
Receive commands sent through a libzmq client, and forward them to
+filters in the filtergraph.
+
+
zmq
and azmq
work as a pass-through filters. zmq
+must be inserted between two video filters, azmq
between two
+audio filters.
+
+
To enable these filters you need to install the libzmq library and
+headers and configure FFmpeg with --enable-libzmq
.
+
+
For more information about libzmq see:
+http://www.zeromq.org/
+
+
The zmq
and azmq
filters work as a libzmq server, which
+receives messages sent through a network interface defined by the
+bind_address option.
+
+
The received message must be in the form:
+
+
+
TARGET specifies the target of the command, usually the name of
+the filter class or a specific filter instance name.
+
+
COMMAND specifies the name of the command for the target filter.
+
+
ARG is optional and specifies the optional argument list for the
+given COMMAND .
+
+
Upon reception, the message is processed and the corresponding command
+is injected into the filtergraph. Depending on the result, the filter
+will send a reply to the client, adopting the format:
+
+
ERROR_CODE ERROR_REASON
+MESSAGE
+
+
+
MESSAGE is optional.
+
+
+
12.14.1 Examples# TOC
+
+
Look at tools/zmqsend for an example of a zmq client which can
+be used to send commands processed by these filters.
+
+
Consider the following filtergraph generated by ffplay
+
+
ffplay -dumpgraph 1 -f lavfi "
+color=s=100x100:c=red [l];
+color=s=100x100:c=blue [r];
+nullsrc=s=200x100, zmq [bg];
+[bg][l] overlay [bg+l];
+[bg+l][r] overlay=x=100 "
+
+
+
To change the color of the left side of the video, the following
+command can be used:
+
+
echo Parsed_color_0 c yellow | tools/zmqsend
+
+
+
To change the right side:
+
+
echo Parsed_color_1 c pink | tools/zmqsend
+
+
+
+
+
13 Multimedia Sources# TOC
+
+
Below is a description of the currently available multimedia sources.
+
+
+
13.1 amovie# TOC
+
+
This is the same as movie source, except it selects an audio
+stream by default.
+
+
+
13.2 movie# TOC
+
+
Read audio and/or video stream(s) from a movie container.
+
+
It accepts the following parameters:
+
+
+filename
+The name of the resource to read (not necessarily a file; it can also be a
+device or a stream accessed through some protocol).
+
+
+format_name, f
+Specifies the format assumed for the movie to read, and can be either
+the name of a container or an input device. If not specified, the
+format is guessed from movie_name or by probing.
+
+
+seek_point, sp
+Specifies the seek point in seconds. The frames will be output
+starting from this seek point. The parameter is evaluated with
+av_strtod
, so the numerical value may be suffixed by an IS
+postfix. The default value is "0".
+
+
+streams, s
+Specifies the streams to read. Several streams can be specified,
+separated by "+". The source will then have as many outputs, in the
+same order. The syntax is explained in the “Stream specifiers”
+section in the ffmpeg manual. Two special names, "dv" and "da" specify
+respectively the default (best suited) video and audio stream. Default
+is "dv", or "da" if the filter is called as "amovie".
+
+
+stream_index, si
+Specifies the index of the video stream to read. If the value is -1,
+the most suitable video stream will be automatically selected. The default
+value is "-1". Deprecated. If the filter is called "amovie", it will select
+audio instead of video.
+
+
+loop
+Specifies how many times to read the stream in sequence.
+If the value is less than 1, the stream will be read again and again.
+Default value is "1".
+
+Note that when the movie is looped the source timestamps are not
+changed, so it will generate non monotonically increasing timestamps.
+
+
+
+
It allows overlaying a second video on top of the main input of
+a filtergraph, as shown in this graph:
+
+
input -----------> deltapts0 --> overlay --> output
+ ^
+ |
+movie --> scale--> deltapts1 -------+
+
+
+
13.2.1 Examples# TOC
+
+
+ Skip 3.2 seconds from the start of the AVI file in.avi, and overlay it
+on top of the input labelled "in":
+
+
movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
+[in] setpts=PTS-STARTPTS [main];
+[main][over] overlay=16:16 [out]
+
+
+ Read from a video4linux2 device, and overlay it on top of the input
+labelled "in":
+
+
movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
+[in] setpts=PTS-STARTPTS [main];
+[main][over] overlay=16:16 [out]
+
+
+ Read the first video stream and the audio stream with id 0x81 from
+dvd.vob; the video is connected to the pad named "video" and the audio is
+connected to the pad named "audio":
+
+
movie=dvd.vob:s=v:0+#0x81 [video] [audio]
+
+
+
+
+
+
14 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+libavfilter
+
+
+
+
15 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-formats.html b/Externals/ffmpeg/dev/doc/ffmpeg-formats.html
new file mode 100644
index 0000000000..1350b5caba
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/ffmpeg-formats.html
@@ -0,0 +1,2311 @@
+
+
+
+
+
+
+ FFmpeg Formats Documentation
+
+
+
+
+
+
+
+
+ FFmpeg Formats Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
This document describes the supported formats (muxers and demuxers)
+provided by the libavformat library.
+
+
+
+
2 Format Options# TOC
+
+
The libavformat library provides some generic global options, which
+can be set on all the muxers and demuxers. In addition each muxer or
+demuxer may support so-called private options, which are specific for
+that component.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, or by setting the value explicitly in the
+AVFormatContext
options or using the libavutil/opt.h API
+for programmatic use.
+
+
The list of supported options follows:
+
+
+avioflags flags (input/output )
+Possible values:
+
+‘direct ’
+Reduce buffering.
+
+
+
+
+probesize integer (input )
+Set probing size in bytes, i.e. the size of the data to analyze to get
+stream information. A higher value will allow to detect more
+information in case it is dispersed into the stream, but will increase
+latency. Must be an integer not lesser than 32. It is 5000000 by default.
+
+
+packetsize integer (output )
+Set packet size.
+
+
+fflags flags (input/output )
+Set format flags.
+
+Possible values:
+
+‘ignidx ’
+Ignore index.
+
+‘genpts ’
+Generate PTS.
+
+‘nofillin ’
+Do not fill in missing values that can be exactly calculated.
+
+‘noparse ’
+Disable AVParsers, this needs +nofillin
too.
+
+‘igndts ’
+Ignore DTS.
+
+‘discardcorrupt ’
+Discard corrupted frames.
+
+‘sortdts ’
+Try to interleave output packets by DTS.
+
+‘keepside ’
+Do not merge side data.
+
+‘latm ’
+Enable RTP MP4A-LATM payload.
+
+‘nobuffer ’
+Reduce the latency introduced by optional buffering
+
+‘bitexact ’
+Only write platform-, build- and time-independent data.
+This ensures that file and data checksums are reproducible and match between
+platforms. Its primary use is for regression testing.
+
+
+
+
+seek2any integer (input )
+Allow seeking to non-keyframes on demuxer level when supported if set to 1.
+Default is 0.
+
+
+analyzeduration integer (input )
+Specify how many microseconds are analyzed to probe the input. A
+higher value will allow to detect more accurate information, but will
+increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
+
+
+cryptokey hexadecimal string (input )
+Set decryption key.
+
+
+indexmem integer (input )
+Set max memory used for timestamp index (per stream).
+
+
+rtbufsize integer (input )
+Set max memory used for buffering real-time frames.
+
+
+fdebug flags (input/output )
+Print specific debug info.
+
+Possible values:
+
+‘ts ’
+
+
+
+max_delay integer (input/output )
+Set maximum muxing or demuxing delay in microseconds.
+
+
+fpsprobesize integer (input )
+Set number of frames used to probe fps.
+
+
+audio_preload integer (output )
+Set microseconds by which audio packets should be interleaved earlier.
+
+
+chunk_duration integer (output )
+Set microseconds for each chunk.
+
+
+chunk_size integer (output )
+Set size in bytes for each chunk.
+
+
+err_detect, f_err_detect flags (input )
+Set error detection flags. f_err_detect
is deprecated and
+should be used only via the ffmpeg
tool.
+
+Possible values:
+
+‘crccheck ’
+Verify embedded CRCs.
+
+‘bitstream ’
+Detect bitstream specification deviations.
+
+‘buffer ’
+Detect improper bitstream length.
+
+‘explode ’
+Abort decoding on minor error detection.
+
+‘careful ’
+Consider things that violate the spec and have not been seen in the
+wild as errors.
+
+‘compliant ’
+Consider all spec non compliancies as errors.
+
+‘aggressive ’
+Consider things that a sane encoder should not do as an error.
+
+
+
+
+use_wallclock_as_timestamps integer (input )
+Use wallclock as timestamps.
+
+
+avoid_negative_ts integer (output )
+
+Possible values:
+
+‘make_non_negative ’
+Shift timestamps to make them non-negative.
+Also note that this affects only leading negative timestamps, and not
+non-monotonic negative timestamps.
+
+‘make_zero ’
+Shift timestamps so that the first timestamp is 0.
+
+‘auto (default) ’
+Enables shifting when required by the target format.
+
+‘disabled ’
+Disables shifting of timestamp.
+
+
+
+When shifting is enabled, all output timestamps are shifted by the
+same amount. Audio, video, and subtitles desynching and relative
+timestamp differences are preserved compared to how they would have
+been without shifting.
+
+
+skip_initial_bytes integer (input )
+Set number of bytes to skip before reading header and frames if set to 1.
+Default is 0.
+
+
+correct_ts_overflow integer (input )
+Correct single timestamp overflows if set to 1. Default is 1.
+
+
+flush_packets integer (output )
+Flush the underlying I/O stream after each packet. Default 1 enables it, and
+has the effect of reducing the latency; 0 disables it and may slightly
+increase performance in some cases.
+
+
+output_ts_offset offset (output )
+Set the output time offset.
+
+offset must be a time duration specification,
+see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
+
+The offset is added by the muxer to the output timestamps.
+
+Specifying a positive offset means that the corresponding streams are
+delayed bt the time duration specified in offset . Default value
+is 0
(meaning that no offset is applied).
+
+
+format_whitelist list (input )
+"," separated List of allowed demuxers. By default all are allowed.
+
+
+dump_separator string (input )
+Separator used to separate the fields printed on the command line about the
+Stream parameters.
+For example to separate the fields with newlines and indention:
+
+
ffprobe -dump_separator "
+ " -i ~/videos/matrixbench_mpeg2.mpg
+
+
+
+
+
+
+
2.1 Format stream specifiers# TOC
+
+
Format stream specifiers allow selection of one or more streams that
+match specific properties.
+
+
Possible forms of stream specifiers are:
+
+stream_index
+Matches the stream with this index.
+
+
+stream_type [:stream_index ]
+stream_type is one of following: ’v’ for video, ’a’ for audio,
+’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If
+stream_index is given, then it matches the stream number
+stream_index of this type. Otherwise, it matches all streams of
+this type.
+
+
+p:program_id [:stream_index ]
+If stream_index is given, then it matches the stream with number
+stream_index in the program with the id
+program_id . Otherwise, it matches all streams in the program.
+
+
+#stream_id
+Matches the stream by a format-specific ID.
+
+
+
+
The exact semantics of stream specifiers is defined by the
+avformat_match_stream_specifier()
function declared in the
+libavformat/avformat.h header.
+
+
+
3 Demuxers# TOC
+
+
Demuxers are configured elements in FFmpeg that can read the
+multimedia streams from a particular type of file.
+
+
When you configure your FFmpeg build, all the supported demuxers
+are enabled by default. You can list all available ones using the
+configure option --list-demuxers
.
+
+
You can disable all the demuxers using the configure option
+--disable-demuxers
, and selectively enable a single demuxer with
+the option --enable-demuxer=DEMUXER
, or disable it
+with the option --disable-demuxer=DEMUXER
.
+
+
The option -formats
of the ff* tools will display the list of
+enabled demuxers.
+
+
The description of some of the currently available demuxers follows.
+
+
+
3.1 applehttp# TOC
+
+
Apple HTTP Live Streaming demuxer.
+
+
This demuxer presents all AVStreams from all variant streams.
+The id field is set to the bitrate variant index number. By setting
+the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay),
+the caller can decide which variant streams to actually receive.
+The total bitrate of the variant that the stream belongs to is
+available in a metadata key named "variant_bitrate".
+
+
+
3.2 apng# TOC
+
+
Animated Portable Network Graphics demuxer.
+
+
This demuxer is used to demux APNG files.
+All headers, but the PNG signature, up to (but not including) the first
+fcTL chunk are transmitted as extradata.
+Frames are then split as being all the chunks between two fcTL ones, or
+between the last fcTL and IEND chunks.
+
+
+-ignore_loop bool
+Ignore the loop variable in the file if set.
+
+-max_fps int
+Maximum framerate in frames per second (0 for no limit).
+
+-default_fps int
+Default framerate in frames per second when none is specified in the file
+(0 meaning as fast as possible).
+
+
+
+
+
+
+
Advanced Systems Format demuxer.
+
+
This demuxer is used to demux ASF files and MMS network streams.
+
+
+-no_resync_search bool
+Do not try to resynchronize by looking for a certain optional start code.
+
+
+
+
+
3.4 concat# TOC
+
+
Virtual concatenation script demuxer.
+
+
This demuxer reads a list of files and other directives from a text file and
+demuxes them one after the other, as if all their packet had been muxed
+together.
+
+
The timestamps in the files are adjusted so that the first file starts at 0
+and each next file starts where the previous one finishes. Note that it is
+done globally and may cause gaps if all streams do not have exactly the same
+length.
+
+
All files must have the same streams (same codecs, same time base, etc.).
+
+
The duration of each file is used to adjust the timestamps of the next file:
+if the duration is incorrect (because it was computed using the bit-rate or
+because the file is truncated, for example), it can cause artifacts. The
+duration
directive can be used to override the duration stored in
+each file.
+
+
+
3.4.1 Syntax# TOC
+
+
The script is a text file in extended-ASCII, with one directive per line.
+Empty lines, leading spaces and lines starting with ’#’ are ignored. The
+following directive is recognized:
+
+
+file path
+Path to a file to read; special characters and spaces must be escaped with
+backslash or single quotes.
+
+All subsequent file-related directives apply to that file.
+
+
+ffconcat version 1.0
+Identify the script type and version. It also sets the safe option
+to 1 if it was to its default -1.
+
+To make FFmpeg recognize the format automatically, this directive must
+appears exactly as is (no extra space or byte-order-mark) on the very first
+line of the script.
+
+
+duration dur
+Duration of the file. This information can be specified from the file;
+specifying it here may be more efficient or help if the information from the
+file is not available or accurate.
+
+If the duration is set for all files, then it is possible to seek in the
+whole concatenated video.
+
+
+stream
+Introduce a stream in the virtual file.
+All subsequent stream-related directives apply to the last introduced
+stream.
+Some streams properties must be set in order to allow identifying the
+matching streams in the subfiles.
+If no streams are defined in the script, the streams from the first file are
+copied.
+
+
+exact_stream_id id
+Set the id of the stream.
+If this directive is given, the string with the corresponding id in the
+subfiles will be used.
+This is especially useful for MPEG-PS (VOB) files, where the order of the
+streams is not reliable.
+
+
+
+
+
+
3.4.2 Options# TOC
+
+
This demuxer accepts the following option:
+
+
+safe
+If set to 1, reject unsafe file paths. A file path is considered safe if it
+does not contain a protocol specification and is relative and all components
+only contain characters from the portable character set (letters, digits,
+period, underscore and hyphen) and have no period at the beginning of a
+component.
+
+If set to 0, any file name is accepted.
+
+The default is -1, it is equivalent to 1 if the format was automatically
+probed and 0 otherwise.
+
+
+auto_convert
+If set to 1, try to perform automatic conversions on packet data to make the
+streams concatenable.
+
+Currently, the only conversion is adding the h264_mp4toannexb bitstream
+filter to H.264 streams in MP4 format. This is necessary in particular if
+there are resolution changes.
+
+
+
+
+
+
+
+
Adobe Flash Video Format demuxer.
+
+
This demuxer is used to demux FLV files and RTMP network streams.
+
+
+-flv_metadata bool
+Allocate the streams according to the onMetaData array content.
+
+
+
+
+
3.6 libgme# TOC
+
+
The Game Music Emu library is a collection of video game music file emulators.
+
+
See http://code.google.com/p/game-music-emu/ for more information.
+
+
Some files have multiple tracks. The demuxer will pick the first track by
+default. The track_index option can be used to select a different
+track. Track indexes start at 0. The demuxer exports the number of tracks as
+tracks meta data entry.
+
+
For very large files, the max_size option may have to be adjusted.
+
+
+
3.7 libquvi# TOC
+
+
Play media from Internet services using the quvi project.
+
+
The demuxer accepts a format option to request a specific quality. It
+is by default set to best .
+
+
See http://quvi.sourceforge.net/ for more information.
+
+
FFmpeg needs to be built with --enable-libquvi
for this demuxer to be
+enabled.
+
+
+
+
+
Animated GIF demuxer.
+
+
It accepts the following options:
+
+
+min_delay
+Set the minimum valid delay between frames in hundredths of seconds.
+Range is 0 to 6000. Default value is 2.
+
+
+default_delay
+Set the default delay between frames in hundredths of seconds.
+Range is 0 to 6000. Default value is 10.
+
+
+ignore_loop
+GIF files can contain information to loop a certain number of times (or
+infinitely). If ignore_loop is set to 1, then the loop setting
+from the input will be ignored and looping will not occur. If set to 0,
+then looping will occur and will cycle the number of times according to
+the GIF. Default value is 1.
+
+
+
+
For example, with the overlay filter, place an infinitely looping GIF
+over another video:
+
+
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
+
+
+
Note that in the above example the shortest option for overlay filter is
+used to end the output video at the length of the shortest input file,
+which in this case is input.mp4 as the GIF in this example loops
+infinitely.
+
+
+
3.9 image2# TOC
+
+
Image file demuxer.
+
+
This demuxer reads from a list of image files specified by a pattern.
+The syntax and meaning of the pattern is specified by the
+option pattern_type .
+
+
The pattern may contain a suffix which is used to automatically
+determine the format of the images contained in the files.
+
+
The size, the pixel format, and the format of each image must be the
+same for all the files in the sequence.
+
+
This demuxer accepts the following options:
+
+framerate
+Set the frame rate for the video stream. It defaults to 25.
+
+loop
+If set to 1, loop over the input. Default value is 0.
+
+pattern_type
+Select the pattern type used to interpret the provided filename.
+
+pattern_type accepts one of the following values.
+
+sequence
+Select a sequence pattern type, used to specify a sequence of files
+indexed by sequential numbers.
+
+A sequence pattern may contain the string "%d" or "%0N d", which
+specifies the position of the characters representing a sequential
+number in each filename matched by the pattern. If the form
+"%d0N d" is used, the string representing the number in each
+filename is 0-padded and N is the total number of 0-padded
+digits representing the number. The literal character ’%’ can be
+specified in the pattern with the string "%%".
+
+If the sequence pattern contains "%d" or "%0N d", the first filename of
+the file list specified by the pattern must contain a number
+inclusively contained between start_number and
+start_number +start_number_range -1, and all the following
+numbers must be sequential.
+
+For example the pattern "img-%03d.bmp" will match a sequence of
+filenames of the form img-001.bmp , img-002.bmp , ...,
+img-010.bmp , etc.; the pattern "i%%m%%g-%d.jpg" will match a
+sequence of filenames of the form i%m%g-1.jpg ,
+i%m%g-2.jpg , ..., i%m%g-10.jpg , etc.
+
+Note that the pattern must not necessarily contain "%d" or
+"%0N d", for example to convert a single image file
+img.jpeg you can employ the command:
+
+
ffmpeg -i img.jpeg img.png
+
+
+
+glob
+Select a glob wildcard pattern type.
+
+The pattern is interpreted like a glob()
pattern. This is only
+selectable if libavformat was compiled with globbing support.
+
+
+glob_sequence (deprecated, will be removed)
+Select a mixed glob wildcard/sequence pattern.
+
+If your version of libavformat was compiled with globbing support, and
+the provided pattern contains at least one glob meta character among
+%*?[]{}
that is preceded by an unescaped "%", the pattern is
+interpreted like a glob()
pattern, otherwise it is interpreted
+like a sequence pattern.
+
+All glob special characters %*?[]{}
must be prefixed
+with "%". To escape a literal "%" you shall use "%%".
+
+For example the pattern foo-%*.jpeg
will match all the
+filenames prefixed by "foo-" and terminating with ".jpeg", and
+foo-%?%?%?.jpeg
will match all the filenames prefixed with
+"foo-", followed by a sequence of three characters, and terminating
+with ".jpeg".
+
+This pattern type is deprecated in favor of glob and
+sequence .
+
+
+
+Default value is glob_sequence .
+
+pixel_format
+Set the pixel format of the images to read. If not specified the pixel
+format is guessed from the first image file in the sequence.
+
+start_number
+Set the index of the file matched by the image file pattern to start
+to read from. Default value is 0.
+
+start_number_range
+Set the index interval range to check when looking for the first image
+file in the sequence, starting from start_number . Default value
+is 5.
+
+ts_from_file
+If set to 1, will set frame timestamp to modification time of image file. Note
+that monotonity of timestamps is not provided: images go in the same order as
+without this option. Default value is 0.
+If set to 2, will set frame timestamp to the modification time of the image file in
+nanosecond precision.
+
+video_size
+Set the video size of the images to read. If not specified the video
+size is guessed from the first image file in the sequence.
+
+
+
+
+
3.9.1 Examples# TOC
+
+
+ Use ffmpeg
for creating a video from the images in the file
+sequence img-001.jpeg , img-002.jpeg , ..., assuming an
+input frame rate of 10 frames per second:
+
+
ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
+
+
+ As above, but start by reading from a file with index 100 in the sequence:
+
+
ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
+
+
+ Read images matching the "*.png" glob pattern , that is all the files
+terminating with the ".png" suffix:
+
+
ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
+
+
+
+
+
3.10 mpegts# TOC
+
+
MPEG-2 transport stream demuxer.
+
+
+fix_teletext_pts
+Overrides teletext packet PTS and DTS values with the timestamps calculated
+from the PCR of the first program which the teletext stream is part of and is
+not discarded. Default value is 1, set this option to 0 if you want your
+teletext packet PTS and DTS values untouched.
+
+
+
+
+
3.11 rawvideo# TOC
+
+
Raw video demuxer.
+
+
This demuxer allows one to read raw video data. Since there is no header
+specifying the assumed video parameters, the user must specify them
+in order to be able to decode the data correctly.
+
+
This demuxer accepts the following options:
+
+framerate
+Set input video frame rate. Default value is 25.
+
+
+pixel_format
+Set the input video pixel format. Default value is yuv420p
.
+
+
+video_size
+Set the input video size. This value must be specified explicitly.
+
+
+
+
For example to read a rawvideo file input.raw with
+ffplay
, assuming a pixel format of rgb24
, a video
+size of 320x240
, and a frame rate of 10 images per second, use
+the command:
+
+
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
+
+
+
+
3.12 sbg# TOC
+
+
SBaGen script demuxer.
+
+
This demuxer reads the script language used by SBaGen
+http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG
+script looks like that:
+
+
-SE
+a: 300-2.5/3 440+4.5/0
+b: 300-2.5/0 440+4.5/3
+off: -
+NOW == a
++0:07:00 == b
++0:14:00 == a
++0:21:00 == b
++0:30:00 off
+
+
+
A SBG script can mix absolute and relative timestamps. If the script uses
+either only absolute timestamps (including the script start time) or only
+relative ones, then its layout is fixed, and the conversion is
+straightforward. On the other hand, if the script mixes both kind of
+timestamps, then the NOW reference for relative timestamps will be
+taken from the current time of day at the time the script is read, and the
+script layout will be frozen according to that reference. That means that if
+the script is directly played, the actual times will match the absolute
+timestamps up to the sound controller’s clock accuracy, but if the user
+somehow pauses the playback or seeks, all times will be shifted accordingly.
+
+
+
3.13 tedcaptions# TOC
+
+
JSON captions used for TED Talks .
+
+
TED does not provide links to the captions, but they can be guessed from the
+page. The file tools/bookmarklets.html from the FFmpeg source tree
+contains a bookmarklet to expose them.
+
+
This demuxer accepts the following option:
+
+start_time
+Set the start time of the TED talk, in milliseconds. The default is 15000
+(15s). It is used to sync the captions with the downloadable videos, because
+they include a 15s intro.
+
+
+
+
Example: convert the captions to a format most players understand:
+
+
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
+
+
+
+
4 Muxers# TOC
+
+
Muxers are configured elements in FFmpeg which allow writing
+multimedia streams to a particular type of file.
+
+
When you configure your FFmpeg build, all the supported muxers
+are enabled by default. You can list all available muxers using the
+configure option --list-muxers
.
+
+
You can disable all the muxers with the configure option
+--disable-muxers
and selectively enable / disable single muxers
+with the options --enable-muxer=MUXER
/
+--disable-muxer=MUXER
.
+
+
The option -formats
of the ff* tools will display the list of
+enabled muxers.
+
+
A description of some of the currently available muxers follows.
+
+
+
4.1 aiff# TOC
+
+
Audio Interchange File Format muxer.
+
+
+
4.1.1 Options# TOC
+
+
It accepts the following options:
+
+
+write_id3v2
+Enable ID3v2 tags writing when set to 1. Default is 0 (disabled).
+
+
+id3v2_version
+Select ID3v2 version to write. Currently only version 3 and 4 (aka.
+ID3v2.3 and ID3v2.4) are supported. The default is version 4.
+
+
+
+
+
+
+
+
CRC (Cyclic Redundancy Check) testing format.
+
+
This muxer computes and prints the Adler-32 CRC of all the input audio
+and video frames. By default audio frames are converted to signed
+16-bit raw audio and video frames to raw video before computing the
+CRC.
+
+
The output of the muxer consists of a single line of the form:
+CRC=0xCRC , where CRC is a hexadecimal number 0-padded to
+8 digits containing the CRC for all the decoded input frames.
+
+
See also the framecrc muxer.
+
+
+
4.2.1 Examples# TOC
+
+
For example to compute the CRC of the input, and store it in the file
+out.crc :
+
+
ffmpeg -i INPUT -f crc out.crc
+
+
+
You can print the CRC to stdout with the command:
+
+
ffmpeg -i INPUT -f crc -
+
+
+
You can select the output format of each frame with ffmpeg
by
+specifying the audio and video codec and format. For example to
+compute the CRC of the input audio converted to PCM unsigned 8-bit
+and the input video converted to MPEG-2 video, use the command:
+
+
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f crc -
+
+
+
+
4.3 framecrc# TOC
+
+
Per-packet CRC (Cyclic Redundancy Check) testing format.
+
+
This muxer computes and prints the Adler-32 CRC for each audio
+and video packet. By default audio frames are converted to signed
+16-bit raw audio and video frames to raw video before computing the
+CRC.
+
+
The output of the muxer consists of a line for each audio and video
+packet of the form:
+
+
stream_index , packet_dts , packet_pts , packet_duration , packet_size , 0xCRC
+
+
+
CRC is a hexadecimal number 0-padded to 8 digits containing the
+CRC of the packet.
+
+
+
4.3.1 Examples# TOC
+
+
For example to compute the CRC of the audio and video frames in
+INPUT , converted to raw audio and video packets, and store it
+in the file out.crc :
+
+
ffmpeg -i INPUT -f framecrc out.crc
+
+
+
To print the information to stdout, use the command:
+
+
ffmpeg -i INPUT -f framecrc -
+
+
+
With ffmpeg
, you can select the output format to which the
+audio and video frames are encoded before computing the CRC for each
+packet by specifying the audio and video codec. For example, to
+compute the CRC of each decoded input audio frame converted to PCM
+unsigned 8-bit and of each decoded input video frame converted to
+MPEG-2 video, use the command:
+
+
ffmpeg -i INPUT -c:a pcm_u8 -c:v mpeg2video -f framecrc -
+
+
+
See also the crc muxer.
+
+
+
4.4 framemd5# TOC
+
+
Per-packet MD5 testing format.
+
+
This muxer computes and prints the MD5 hash for each audio
+and video packet. By default audio frames are converted to signed
+16-bit raw audio and video frames to raw video before computing the
+hash.
+
+
The output of the muxer consists of a line for each audio and video
+packet of the form:
+
+
stream_index , packet_dts , packet_pts , packet_duration , packet_size , MD5
+
+
+
MD5 is a hexadecimal number representing the computed MD5 hash
+for the packet.
+
+
+
4.4.1 Examples# TOC
+
+
For example to compute the MD5 of the audio and video frames in
+INPUT , converted to raw audio and video packets, and store it
+in the file out.md5 :
+
+
ffmpeg -i INPUT -f framemd5 out.md5
+
+
+
To print the information to stdout, use the command:
+
+
ffmpeg -i INPUT -f framemd5 -
+
+
+
See also the md5 muxer.
+
+
+
+
+
Animated GIF muxer.
+
+
It accepts the following options:
+
+
+loop
+Set the number of times to loop the output. Use -1
for no loop, 0
+for looping indefinitely (default).
+
+
+final_delay
+Force the delay (expressed in centiseconds) after the last frame. Each frame
+ends with a delay until the next frame. The default is -1
, which is a
+special value to tell the muxer to re-use the previous delay. In case of a
+loop, you might want to customize this value to mark a pause for instance.
+
+
+
+
For example, to encode a gif looping 10 times, with a 5 seconds delay between
+the loops:
+
+
ffmpeg -i INPUT -loop 10 -final_delay 500 out.gif
+
+
+
Note 1: if you wish to extract the frames in separate GIF files, you need to
+force the image2 muxer:
+
+
ffmpeg -i INPUT -c:v gif -f image2 "out%d.gif"
+
+
+
Note 2: the GIF format has a very small time base: the delay between two frames
+can not be smaller than one centi second.
+
+
+
+
+
Apple HTTP Live Streaming muxer that segments MPEG-TS according to
+the HTTP Live Streaming (HLS) specification.
+
+
It creates a playlist file, and one or more segment files. The output filename
+specifies the playlist filename.
+
+
By default, the muxer creates a file for each segment produced. These files
+have the same name as the playlist, followed by a sequential number and a
+.ts extension.
+
+
For example, to convert an input file with ffmpeg
:
+
+
ffmpeg -i in.nut out.m3u8
+
+
This example will produce the playlist, out.m3u8 , and segment files:
+out0.ts , out1.ts , out2.ts , etc.
+
+
See also the segment muxer, which provides a more generic and
+flexible implementation of a segmenter, and can be used to perform HLS
+segmentation.
+
+
+
4.6.1 Options# TOC
+
+
This muxer supports the following options:
+
+
+hls_time seconds
+Set the segment length in seconds. Default value is 2.
+
+
+hls_list_size size
+Set the maximum number of playlist entries. If set to 0 the list file
+will contain all the segments. Default value is 5.
+
+
+hls_ts_options options_list
+Set output format options using a :-separated list of key=value
+parameters. Values containing :
special characters must be
+escaped.
+
+
+hls_wrap wrap
+Set the number after which the segment filename number (the number
+specified in each segment file) wraps. If set to 0 the number will be
+never wrapped. Default value is 0.
+
+This option is useful to avoid to fill the disk with many segment
+files, and limits the maximum number of segment files written to disk
+to wrap .
+
+
+start_number number
+Start the playlist sequence number from number . Default value is
+0.
+
+
+hls_allow_cache allowcache
+Explicitly set whether the client MAY (1) or MUST NOT (0) cache media segments.
+
+
+hls_base_url baseurl
+Append baseurl to every entry in the playlist.
+Useful to generate playlists with absolute paths.
+
+Note that the playlist sequence number must be unique for each segment
+and it is not to be confused with the segment filename sequence number
+which can be cyclic, for example if the wrap option is
+specified.
+
+
+hls_segment_filename filename
+Set the segment filename. Unless hls_flags single_file is set filename
+is used as a string format with the segment number:
+
+
ffmpeg in.nut -hls_segment_filename 'file%03d.ts' out.m3u8
+
+This example will produce the playlist, out.m3u8 , and segment files:
+file000.ts , file001.ts , file002.ts , etc.
+
+
+hls_flags single_file
+If this flag is set, the muxer will store all segments in a single MPEG-TS
+file, and will use byte ranges in the playlist. HLS playlists generated with
+this way will have the version number 4.
+For example:
+
+
ffmpeg -i in.nut -hls_flags single_file out.m3u8
+
+Will produce the playlist, out.m3u8 , and a single segment file,
+out.ts .
+
+
+hls_flags delete_segments
+Segment files removed from the playlist are deleted after a period of time
+equal to the duration of the segment plus the duration of the playlist.
+
+
+
+
+
+
+
ICO file muxer.
+
+
Microsoft’s icon file format (ICO) has some strict limitations that should be noted:
+
+
+ Size cannot exceed 256 pixels in any dimension
+
+ Only BMP and PNG images can be stored
+
+ If a BMP image is used, it must be one of the following pixel formats:
+
+
BMP Bit Depth FFmpeg Pixel Format
+1bit pal8
+4bit pal8
+8bit pal8
+16bit rgb555le
+24bit bgr24
+32bit bgra
+
+
+ If a BMP image is used, it must use the BITMAPINFOHEADER DIB header
+
+ If a PNG image is used, it must use the rgba pixel format
+
+
+
+
4.8 image2# TOC
+
+
Image file muxer.
+
+
The image file muxer writes video frames to image files.
+
+
The output filenames are specified by a pattern, which can be used to
+produce sequentially numbered series of files.
+The pattern may contain the string "%d" or "%0N d", this string
+specifies the position of the characters representing a numbering in
+the filenames. If the form "%0N d" is used, the string
+representing the number in each filename is 0-padded to N
+digits. The literal character ’%’ can be specified in the pattern with
+the string "%%".
+
+
If the pattern contains "%d" or "%0N d", the first filename of
+the file list specified will contain the number 1, all the following
+numbers will be sequential.
+
+
The pattern may contain a suffix which is used to automatically
+determine the format of the image files to write.
+
+
For example the pattern "img-%03d.bmp" will specify a sequence of
+filenames of the form img-001.bmp , img-002.bmp , ...,
+img-010.bmp , etc.
+The pattern "img%%-%d.jpg" will specify a sequence of filenames of the
+form img%-1.jpg , img%-2.jpg , ..., img%-10.jpg ,
+etc.
+
+
+
4.8.1 Examples# TOC
+
+
The following example shows how to use ffmpeg
for creating a
+sequence of files img-001.jpeg , img-002.jpeg , ...,
+taking one image every second from the input video:
+
+
ffmpeg -i in.avi -vsync 1 -r 1 -f image2 'img-%03d.jpeg'
+
+
+
Note that with ffmpeg
, if the format is not specified with the
+-f
option and the output filename specifies an image file
+format, the image2 muxer is automatically selected, so the previous
+command can be written as:
+
+
ffmpeg -i in.avi -vsync 1 -r 1 'img-%03d.jpeg'
+
+
+
Note also that the pattern must not necessarily contain "%d" or
+"%0N d", for example to create a single image file
+img.jpeg from the input video you can employ the command:
+
+
ffmpeg -i in.avi -f image2 -frames:v 1 img.jpeg
+
+
+
The strftime option allows you to expand the filename with
+date and time information. Check the documentation of
+the strftime()
function for the syntax.
+
+
For example to generate image files from the strftime()
+"%Y-%m-%d_%H-%M-%S" pattern, the following ffmpeg
command
+can be used:
+
+
ffmpeg -f v4l2 -r 1 -i /dev/video0 -f image2 -strftime 1 "%Y-%m-%d_%H-%M-%S.jpg"
+
+
+
+
4.8.2 Options# TOC
+
+
+start_number
+Start the sequence from the specified number. Default value is 1. Must
+be a non-negative number.
+
+
+update
+If set to 1, the filename will always be interpreted as just a
+filename, not a pattern, and the corresponding file will be continuously
+overwritten with new images. Default value is 0.
+
+
+strftime
+If set to 1, expand the filename with date and time information from
+strftime()
. Default value is 0.
+
+
+
+
The image muxer supports the .Y.U.V image file format. This format is
+special in that that each image frame consists of three files, for
+each of the YUV420P components. To read or write this image file format,
+specify the name of the ’.Y’ file. The muxer will automatically open the
+’.U’ and ’.V’ files as required.
+
+
+
4.9 matroska# TOC
+
+
Matroska container muxer.
+
+
This muxer implements the matroska and webm container specs.
+
+
+
4.9.1 Metadata# TOC
+
+
The recognized metadata settings in this muxer are:
+
+
+title
+Set title name provided to a single track.
+
+
+language
+Specify the language of the track in the Matroska languages form.
+
+The language can be either the 3 letters bibliographic ISO-639-2 (ISO
+639-2/B) form (like "fre" for French), or a language code mixed with a
+country code for specialities in languages (like "fre-ca" for Canadian
+French).
+
+
+stereo_mode
+Set stereo 3D video layout of two views in a single video track.
+
+The following values are recognized:
+
+‘mono ’
+video is not stereo
+
+‘left_right ’
+Both views are arranged side by side, Left-eye view is on the left
+
+‘bottom_top ’
+Both views are arranged in top-bottom orientation, Left-eye view is at bottom
+
+‘top_bottom ’
+Both views are arranged in top-bottom orientation, Left-eye view is on top
+
+‘checkerboard_rl ’
+Each view is arranged in a checkerboard interleaved pattern, Left-eye view being first
+
+‘checkerboard_lr ’
+Each view is arranged in a checkerboard interleaved pattern, Right-eye view being first
+
+‘row_interleaved_rl ’
+Each view is constituted by a row based interleaving, Right-eye view is first row
+
+‘row_interleaved_lr ’
+Each view is constituted by a row based interleaving, Left-eye view is first row
+
+‘col_interleaved_rl ’
+Both views are arranged in a column based interleaving manner, Right-eye view is first column
+
+‘col_interleaved_lr ’
+Both views are arranged in a column based interleaving manner, Left-eye view is first column
+
+‘anaglyph_cyan_red ’
+All frames are in anaglyph format viewable through red-cyan filters
+
+‘right_left ’
+Both views are arranged side by side, Right-eye view is on the left
+
+‘anaglyph_green_magenta ’
+All frames are in anaglyph format viewable through green-magenta filters
+
+‘block_lr ’
+Both eyes laced in one Block, Left-eye view is first
+
+‘block_rl ’
+Both eyes laced in one Block, Right-eye view is first
+
+
+
+
+
+
For example a 3D WebM clip can be created using the following command line:
+
+
ffmpeg -i sample_left_right_clip.mpg -an -c:v libvpx -metadata stereo_mode=left_right -y stereo_clip.webm
+
+
+
+
4.9.2 Options# TOC
+
+
This muxer supports the following options:
+
+
+reserve_index_space
+By default, this muxer writes the index for seeking (called cues in Matroska
+terms) at the end of the file, because it cannot know in advance how much space
+to leave for the index at the beginning of the file. However for some use cases
+– e.g. streaming where seeking is possible but slow – it is useful to put the
+index at the beginning of the file.
+
+If this option is set to a non-zero value, the muxer will reserve a given amount
+of space in the file header and then try to write the cues there when the muxing
+finishes. If the available space does not suffice, muxing will fail. A safe size
+for most use cases should be about 50kB per hour of video.
+
+Note that cues are only written if the output is seekable and this option will
+have no effect if it is not.
+
+
+
+
+
4.10 md5# TOC
+
+
MD5 testing format.
+
+
This muxer computes and prints the MD5 hash of all the input audio
+and video frames. By default audio frames are converted to signed
+16-bit raw audio and video frames to raw video before computing the
+hash.
+
+
The output of the muxer consists of a single line of the form:
+MD5=MD5 , where MD5 is a hexadecimal number representing
+the computed MD5 hash.
+
+
For example to compute the MD5 hash of the input converted to raw
+audio and video, and store it in the file out.md5 :
+
+
ffmpeg -i INPUT -f md5 out.md5
+
+
+
You can print the MD5 to stdout with the command:
+
+
ffmpeg -i INPUT -f md5 -
+
+
+
See also the framemd5 muxer.
+
+
+
4.11 mov, mp4, ismv# TOC
+
+
MOV/MP4/ISMV (Smooth Streaming) muxer.
+
+
The mov/mp4/ismv muxer supports fragmentation. Normally, a MOV/MP4
+file has all the metadata about all packets stored in one location
+(written at the end of the file, it can be moved to the start for
+better playback by adding faststart to the movflags , or
+using the qt-faststart
tool). A fragmented
+file consists of a number of fragments, where packets and metadata
+about these packets are stored together. Writing a fragmented
+file has the advantage that the file is decodable even if the
+writing is interrupted (while a normal MOV/MP4 is undecodable if
+it is not properly finished), and it requires less memory when writing
+very long files (since writing normal MOV/MP4 files stores info about
+every single packet in memory until the file is closed). The downside
+is that it is less compatible with other applications.
+
+
+
4.11.1 Options# TOC
+
+
Fragmentation is enabled by setting one of the AVOptions that define
+how to cut the file into fragments:
+
+
+-moov_size bytes
+Reserves space for the moov atom at the beginning of the file instead of placing the
+moov atom at the end. If the space reserved is insufficient, muxing will fail.
+
+-movflags frag_keyframe
+Start a new fragment at each video keyframe.
+
+-frag_duration duration
+Create fragments that are duration microseconds long.
+
+-frag_size size
+Create fragments that contain up to size bytes of payload data.
+
+-movflags frag_custom
+Allow the caller to manually choose when to cut fragments, by
+calling av_write_frame(ctx, NULL)
to write a fragment with
+the packets written so far. (This is only useful with other
+applications integrating libavformat, not from ffmpeg
.)
+
+-min_frag_duration duration
+Don’t create fragments that are shorter than duration microseconds long.
+
+
+
+
If more than one condition is specified, fragments are cut when
+one of the specified conditions is fulfilled. The exception to this is
+-min_frag_duration
, which has to be fulfilled for any of the other
+conditions to apply.
+
+
Additionally, the way the output file is written can be adjusted
+through a few other options:
+
+
+-movflags empty_moov
+Write an initial moov atom directly at the start of the file, without
+describing any samples in it. Generally, an mdat/moov pair is written
+at the start of the file, as a normal MOV/MP4 file, containing only
+a short portion of the file. With this option set, there is no initial
+mdat atom, and the moov atom only describes the tracks but has
+a zero duration.
+
+This option is implicitly set when writing ismv (Smooth Streaming) files.
+
+-movflags separate_moof
+Write a separate moof (movie fragment) atom for each track. Normally,
+packets for all tracks are written in a moof atom (which is slightly
+more efficient), but with this option set, the muxer writes one moof/mdat
+pair for each track, making it easier to separate tracks.
+
+This option is implicitly set when writing ismv (Smooth Streaming) files.
+
+-movflags faststart
+Run a second pass moving the index (moov atom) to the beginning of the file.
+This operation can take a while, and will not work in various situations such
+as fragmented output, thus it is not enabled by default.
+
+-movflags rtphint
+Add RTP hinting tracks to the output file.
+
+-movflags disable_chpl
+Disable Nero chapter markers (chpl atom). Normally, both Nero chapters
+and a QuickTime chapter track are written to the file. With this option
+set, only the QuickTime chapter track will be written. Nero chapters can
+cause failures when the file is reprocessed with certain tagging programs, like
+mp3Tag 2.61a and iTunes 11.3, most likely other versions are affected as well.
+
+-movflags omit_tfhd_offset
+Do not write any absolute base_data_offset in tfhd atoms. This avoids
+tying fragments to absolute byte positions in the file/streams.
+
+-movflags default_base_moof
+Similarly to the omit_tfhd_offset, this flag avoids writing the
+absolute base_data_offset field in tfhd atoms, but does so by using
+the new default-base-is-moof flag instead. This flag is new from
+14496-12:2012. This may make the fragments easier to parse in certain
+circumstances (avoiding basing track fragment location calculations
+on the implicit end of the previous track fragment).
+
+
+
+
+
4.11.2 Example# TOC
+
+
Smooth Streaming content can be pushed in real time to a publishing
+point on IIS with this muxer. Example:
+
+
ffmpeg -re <normal input/transcoding options> -movflags isml+frag_keyframe -f ismv http://server/publishingpoint.isml/Streams(Encoder1)
+
+
+
+
4.12 mp3# TOC
+
+
The MP3 muxer writes a raw MP3 stream with the following optional features:
+
+ An ID3v2 metadata header at the beginning (enabled by default). Versions 2.3 and
+2.4 are supported, the id3v2_version
private option controls which one is
+used (3 or 4). Setting id3v2_version
to 0 disables the ID3v2 header
+completely.
+
+The muxer supports writing attached pictures (APIC frames) to the ID3v2 header.
+The pictures are supplied to the muxer in form of a video stream with a single
+packet. There can be any number of those streams, each will correspond to a
+single APIC frame. The stream metadata tags title and comment map
+to APIC description and picture type respectively. See
+http://id3.org/id3v2.4.0-frames for allowed picture types.
+
+Note that the APIC frames must be written at the beginning, so the muxer will
+buffer the audio frames until it gets all the pictures. It is therefore advised
+to provide the pictures as soon as possible to avoid excessive buffering.
+
+ A Xing/LAME frame right after the ID3v2 header (if present). It is enabled by
+default, but will be written only if the output is seekable. The
+write_xing
private option can be used to disable it. The frame contains
+various information that may be useful to the decoder, like the audio duration
+or encoder delay.
+
+ A legacy ID3v1 tag at the end of the file (disabled by default). It may be
+enabled with the write_id3v1
private option, but as its capabilities are
+very limited, its usage is not recommended.
+
+
+
Examples:
+
+
Write an mp3 with an ID3v2.3 header and an ID3v1 footer:
+
+
ffmpeg -i INPUT -id3v2_version 3 -write_id3v1 1 out.mp3
+
+
+
To attach a picture to an mp3 file select both the audio and the picture stream
+with map
:
+
+
ffmpeg -i input.mp3 -i cover.png -c copy -map 0 -map 1
+-metadata:s:v title="Album cover" -metadata:s:v comment="Cover (Front)" out.mp3
+
+
+
Write a "clean" MP3 without any extra features:
+
+
ffmpeg -i input.wav -write_xing 0 -id3v2_version 0 out.mp3
+
+
+
+
4.13 mpegts# TOC
+
+
MPEG transport stream muxer.
+
+
This muxer implements ISO 13818-1 and part of ETSI EN 300 468.
+
+
The recognized metadata settings in mpegts muxer are service_provider
+and service_name
. If they are not set the default for
+service_provider
is "FFmpeg" and the default for
+service_name
is "Service01".
+
+
+
4.13.1 Options# TOC
+
+
The muxer options are:
+
+
+-mpegts_original_network_id number
+Set the original_network_id (default 0x0001). This is unique identifier
+of a network in DVB. Its main use is in the unique identification of a
+service through the path Original_Network_ID, Transport_Stream_ID.
+
+-mpegts_transport_stream_id number
+Set the transport_stream_id (default 0x0001). This identifies a
+transponder in DVB.
+
+-mpegts_service_id number
+Set the service_id (default 0x0001) also known as program in DVB.
+
+-mpegts_pmt_start_pid number
+Set the first PID for PMT (default 0x1000, max 0x1f00).
+
+-mpegts_start_pid number
+Set the first PID for data packets (default 0x0100, max 0x0f00).
+
+-mpegts_m2ts_mode number
+Enable m2ts mode if set to 1. Default value is -1 which disables m2ts mode.
+
+-muxrate number
+Set a constant muxrate (default VBR).
+
+-pcr_period numer
+Override the default PCR retransmission time (default 20ms), ignored
+if variable muxrate is selected.
+
+-pes_payload_size number
+Set minimum PES packet payload in bytes.
+
+-mpegts_flags flags
+Set flags (see below).
+
+-mpegts_copyts number
+Preserve original timestamps, if value is set to 1. Default value is -1, which
+results in shifting timestamps so that they start from 0.
+
+-tables_version number
+Set PAT, PMT and SDT version (default 0, valid values are from 0 to 31, inclusively).
+This option allows updating stream structure so that standard consumer may
+detect the change. To do so, reopen output AVFormatContext (in case of API
+usage) or restart ffmpeg instance, cyclically changing tables_version value:
+
+
ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
+ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
+...
+ffmpeg -i source3.ts -codec copy -f mpegts -tables_version 31 udp://1.1.1.1:1111
+ffmpeg -i source1.ts -codec copy -f mpegts -tables_version 0 udp://1.1.1.1:1111
+ffmpeg -i source2.ts -codec copy -f mpegts -tables_version 1 udp://1.1.1.1:1111
+...
+
+
+
+
+
Option mpegts_flags may take a set of such flags:
+
+
+resend_headers
+Reemit PAT/PMT before writing the next packet.
+
+latm
+Use LATM packetization for AAC.
+
+
+
+
+
4.13.2 Example# TOC
+
+
+
ffmpeg -i file.mpg -c copy \
+ -mpegts_original_network_id 0x1122 \
+ -mpegts_transport_stream_id 0x3344 \
+ -mpegts_service_id 0x5566 \
+ -mpegts_pmt_start_pid 0x1500 \
+ -mpegts_start_pid 0x150 \
+ -metadata service_provider="Some provider" \
+ -metadata service_name="Some Channel" \
+ -y out.ts
+
+
+
+
4.14 null# TOC
+
+
Null muxer.
+
+
This muxer does not generate any output file, it is mainly useful for
+testing or benchmarking purposes.
+
+
For example to benchmark decoding with ffmpeg
you can use the
+command:
+
+
ffmpeg -benchmark -i INPUT -f null out.null
+
+
+
Note that the above command does not read or write the out.null
+file, but specifying the output file is required by the ffmpeg
+syntax.
+
+
Alternatively you can write the command as:
+
+
ffmpeg -benchmark -i INPUT -f null -
+
+
+
+
4.15 nut# TOC
+
+
+-syncpoints flags
+Change the syncpoint usage in nut:
+
+default use the normal low-overhead seeking aids.
+none do not use the syncpoints at all, reducing the overhead but making the stream non-seekable;
+Use of this option is not recommended, as the resulting files are very damage
+ sensitive and seeking is not possible. Also in general the overhead from
+ syncpoints is negligible. Note, -write_index
0 can be used to disable
+ all growing data tables, allowing to mux endless streams with limited memory
+ and without these disadvantages.
+
+timestamped extend the syncpoint with a wallclock field.
+
+The none and timestamped flags are experimental.
+
+-write_index bool
+Write index at the end, the default is to write an index.
+
+
+
+
+
ffmpeg -i INPUT -f_strict experimental -syncpoints none - | processor
+
+
+
+
4.16 ogg# TOC
+
+
Ogg container muxer.
+
+
+-page_duration duration
+Preferred page duration, in microseconds. The muxer will attempt to create
+pages that are approximately duration microseconds long. This allows the
+user to compromise between seek granularity and container overhead. The default
+is 1 second. A value of 0 will fill all segments, making pages as large as
+possible. A value of 1 will effectively use 1 packet-per-page in most
+situations, giving a small seek granularity at the cost of additional container
+overhead.
+
+
+
+
+
4.17 segment, stream_segment, ssegment# TOC
+
+
Basic stream segmenter.
+
+
This muxer outputs streams to a number of separate files of nearly
+fixed duration. Output filename pattern can be set in a fashion similar to
+image2 .
+
+
stream_segment
is a variant of the muxer used to write to
+streaming output formats, i.e. which do not require global headers,
+and is recommended for outputting e.g. to MPEG transport stream segments.
+ssegment
is a shorter alias for stream_segment
.
+
+
Every segment starts with a keyframe of the selected reference stream,
+which is set through the reference_stream option.
+
+
Note that if you want accurate splitting for a video file, you need to
+make the input key frames correspond to the exact splitting times
+expected by the segmenter, or the segment muxer will start the new
+segment with the key frame found next after the specified start
+time.
+
+
The segment muxer works best with a single constant frame rate video.
+
+
Optionally it can generate a list of the created segments, by setting
+the option segment_list . The list type is specified by the
+segment_list_type option. The entry filenames in the segment
+list are set by default to the basename of the corresponding segment
+files.
+
+
See also the hls muxer, which provides a more specific
+implementation for HLS segmentation.
+
+
+
4.17.1 Options# TOC
+
+
The segment muxer supports the following options:
+
+
+reference_stream specifier
+Set the reference stream, as specified by the string specifier .
+If specifier is set to auto
, the reference is chosen
+automatically. Otherwise it must be a stream specifier (see the “Stream
+specifiers” chapter in the ffmpeg manual) which specifies the
+reference stream. The default value is auto
.
+
+
+segment_format format
+Override the inner container format, by default it is guessed by the filename
+extension.
+
+
+segment_format_options options_list
+Set output format options using a :-separated list of key=value
+parameters. Values containing the :
special character must be
+escaped.
+
+
+segment_list name
+Generate also a listfile named name . If not specified no
+listfile is generated.
+
+
+segment_list_flags flags
+Set flags affecting the segment list generation.
+
+It currently supports the following flags:
+
+‘cache ’
+Allow caching (only affects M3U8 list files).
+
+
+‘live ’
+Allow live-friendly file generation.
+
+
+
+
+segment_list_type type
+Select the listing format.
+
+flat use a simple flat list of entries.
+hls use a m3u8-like structure.
+
+
+
+segment_list_size size
+Update the list file so that it contains at most size
+segments. If 0 the list file will contain all the segments. Default
+value is 0.
+
+
+segment_list_entry_prefix prefix
+Prepend prefix to each entry. Useful to generate absolute paths.
+By default no prefix is applied.
+
+The following values are recognized:
+
+‘flat ’
+Generate a flat list for the created segments, one segment per line.
+
+
+‘csv, ext ’
+Generate a list for the created segments, one segment per line,
+each line matching the format (comma-separated values):
+
+
segment_filename ,segment_start_time ,segment_end_time
+
+
+segment_filename is the name of the output file generated by the
+muxer according to the provided pattern. CSV escaping (according to
+RFC4180) is applied if required.
+
+segment_start_time and segment_end_time specify
+the segment start and end time expressed in seconds.
+
+A list file with the suffix ".csv"
or ".ext"
will
+auto-select this format.
+
+‘ext ’ is deprecated in favor or ‘csv ’.
+
+
+‘ffconcat ’
+Generate an ffconcat file for the created segments. The resulting file
+can be read using the FFmpeg concat demuxer.
+
+A list file with the suffix ".ffcat"
or ".ffconcat"
will
+auto-select this format.
+
+
+‘m3u8 ’
+Generate an extended M3U8 file, version 3, compliant with
+http://tools.ietf.org/id/draft-pantos-http-live-streaming .
+
+A list file with the suffix ".m3u8"
will auto-select this format.
+
+
+
+If not specified the type is guessed from the list file name suffix.
+
+
+segment_time time
+Set segment duration to time , the value must be a duration
+specification. Default value is "2". See also the
+segment_times option.
+
+Note that splitting may not be accurate, unless you force the
+reference stream key-frames at the given time. See the introductory
+notice and the examples below.
+
+
+segment_atclocktime 1|0
+If set to "1" split at regular clock time intervals starting from 00:00
+o’clock. The time value specified in segment_time is
+used for setting the length of the splitting interval.
+
+For example with segment_time set to "900" this makes it possible
+to create files at 12:00 o’clock, 12:15, 12:30, etc.
+
+Default value is "0".
+
+
+segment_time_delta delta
+Specify the accuracy time when selecting the start time for a
+segment, expressed as a duration specification. Default value is "0".
+
+When delta is specified a key-frame will start a new segment if its
+PTS satisfies the relation:
+
+
PTS >= start_time - time_delta
+
+
+This option is useful when splitting video content, which is always
+split at GOP boundaries, in case a key frame is found just before the
+specified split time.
+
+In particular may be used in combination with the ffmpeg option
+force_key_frames . The key frame times specified by
+force_key_frames may not be set accurately because of rounding
+issues, with the consequence that a key frame time may result set just
+before the specified time. For constant frame rate videos a value of
+1/(2*frame_rate ) should address the worst case mismatch between
+the specified time and the time set by force_key_frames .
+
+
+segment_times times
+Specify a list of split points. times contains a list of comma
+separated duration specifications, in increasing order. See also
+the segment_time option.
+
+
+segment_frames frames
+Specify a list of split video frame numbers. frames contains a
+list of comma separated integer numbers, in increasing order.
+
+This option specifies to start a new segment whenever a reference
+stream key frame is found and the sequential number (starting from 0)
+of the frame is greater or equal to the next value in the list.
+
+
+segment_wrap limit
+Wrap around segment index once it reaches limit .
+
+
+segment_start_number number
+Set the sequence number of the first segment. Defaults to 0
.
+
+
+reset_timestamps 1|0
+Reset timestamps at the begin of each segment, so that each segment
+will start with near-zero timestamps. It is meant to ease the playback
+of the generated segments. May not work with some combinations of
+muxers/codecs. It is set to 0
by default.
+
+
+initial_offset offset
+Specify timestamp offset to apply to the output packet timestamps. The
+argument must be a time duration specification, and defaults to 0.
+
+
+
+
+
4.17.2 Examples# TOC
+
+
+
+
+
4.18 smoothstreaming# TOC
+
+
Smooth Streaming muxer generates a set of files (Manifest, chunks) suitable for serving with conventional web server.
+
+
+window_size
+Specify the number of fragments kept in the manifest. Default 0 (keep all).
+
+
+extra_window_size
+Specify the number of fragments kept outside of the manifest before removing from disk. Default 5.
+
+
+lookahead_count
+Specify the number of lookahead fragments. Default 2.
+
+
+min_frag_duration
+Specify the minimum fragment duration (in microseconds). Default 5000000.
+
+
+remove_at_exit
+Specify whether to remove all fragments when finished. Default 0 (do not remove).
+
+
+
+
+
+
4.19 tee# TOC
+
+
The tee muxer can be used to write the same data to several files or any
+other kind of muxer. It can be used, for example, to both stream a video to
+the network and save it to disk at the same time.
+
+
It is different from specifying several outputs to the ffmpeg
+command-line tool because the audio and video data will be encoded only once
+with the tee muxer; encoding can be a very expensive process. It is not
+useful when using the libavformat API directly because it is then possible
+to feed the same packets to several muxers directly.
+
+
The slave outputs are specified in the file name given to the muxer,
+separated by ’|’. If any of the slave name contains the ’|’ separator,
+leading or trailing spaces or any special character, it must be
+escaped (see (ffmpeg-utils)the "Quoting and escaping"
+section in the ffmpeg-utils(1) manual ).
+
+
Muxer options can be specified for each slave by prepending them as a list of
+key =value pairs separated by ’:’, between square brackets. If
+the options values contain a special character or the ’:’ separator, they
+must be escaped; note that this is a second level escaping.
+
+
The following special options are also recognized:
+
+f
+Specify the format name. Useful if it cannot be guessed from the
+output name suffix.
+
+
+bsfs[/spec ]
+Specify a list of bitstream filters to apply to the specified
+output.
+
+It is possible to specify to which streams a given bitstream filter
+applies, by appending a stream specifier to the option separated by
+/
. spec must be a stream specifier (see Format stream specifiers ). If the stream specifier is not specified, the
+bitstream filters will be applied to all streams in the output.
+
+Several bitstream filters can be specified, separated by ",".
+
+
+select
+Select the streams that should be mapped to the slave output,
+specified by a stream specifier. If not specified, this defaults to
+all the input streams.
+
+
+
+
+
4.19.1 Examples# TOC
+
+
+ Encode something and both archive it in a WebM file and stream it
+as MPEG-TS over UDP (the streams need to be explicitly mapped):
+
+
ffmpeg -i ... -c:v libx264 -c:a mp2 -f tee -map 0:v -map 0:a
+ "archive-20121107.mkv|[f=mpegts]udp://10.0.1.255:1234/"
+
+
+ Use ffmpeg
to encode the input, and send the output
+to three different destinations. The dump_extra
bitstream
+filter is used to add extradata information to all the output video
+keyframes packets, as requested by the MPEG-TS format. The select
+option is applied to out.aac in order to make it contain only
+audio packets.
+
+
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
+ -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=a]out.aac"
+
+
+ As below, but select only stream a:1
for the audio output. Note
+that a second level escaping must be performed, as ":" is a special
+character used to separate options.
+
+
ffmpeg -i ... -map 0 -flags +global_header -c:v libx264 -c:a aac -strict experimental
+ -f tee "[bsfs/v=dump_extra]out.ts|[movflags=+faststart]out.mp4|[select=\'a:1\']out.aac"
+
+
+
+
Note: some codecs may need different options depending on the output format;
+the auto-detection of this can not work with the tee muxer. The main example
+is the global_header flag.
+
+
+
4.20 webm_dash_manifest# TOC
+
+
WebM DASH Manifest muxer.
+
+
This muxer implements the WebM DASH Manifest specification to generate the DASH manifest XML.
+
+
+
4.20.1 Options# TOC
+
+
This muxer supports the following options:
+
+
+adaptation_sets
+This option has the following syntax: "id=x,streams=a,b,c id=y,streams=d,e" where x and y are the
+unique identifiers of the adaptation sets and a,b,c,d and e are the indices of the corresponding
+audio and video streams. Any number of adaptation sets can be added using this option.
+
+
+
+
+
4.20.2 Example# TOC
+
+
ffmpeg -f webm_dash_manifest -i video1.webm \
+ -f webm_dash_manifest -i video2.webm \
+ -f webm_dash_manifest -i audio1.webm \
+ -f webm_dash_manifest -i audio2.webm \
+ -map 0 -map 1 -map 2 -map 3 \
+ -c copy \
+ -f webm_dash_manifest \
+ -adaptation_sets "id=0,streams=0,1 id=1,streams=2,3" \
+ manifest.xml
+
+
+
+
5 Metadata# TOC
+
+
FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded
+INI-like text file and then load it back using the metadata muxer/demuxer.
+
+
The file format is as follows:
+
+ A file consists of a header and a number of metadata tags divided into sections,
+each on its own line.
+
+ The header is a ’;FFMETADATA’ string, followed by a version number (now 1).
+
+ Metadata tags are of the form ’key=value’
+
+ Immediately after header follows global metadata
+
+ After global metadata there may be sections with per-stream/per-chapter
+metadata.
+
+ A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in
+brackets (’[’, ’]’) and ends with next section or end of file.
+
+ At the beginning of a chapter section there may be an optional timebase to be
+used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and
+den are integers. If the timebase is missing then start/end times are assumed to
+be in milliseconds.
+Next a chapter section must contain chapter start and end times in form
+’START=num’, ’END=num’, where num is a positive integer.
+
+ Empty lines and lines starting with ’;’ or ’#’ are ignored.
+
+ Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a
+newline) must be escaped with a backslash ’\’.
+
+ Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of
+the tag (in the example above key is ’foo ’, value is ’ bar’).
+
+
+
A ffmetadata file might look like this:
+
+
;FFMETADATA1
+title=bike\\shed
+;this is a comment
+artist=FFmpeg troll team
+
+[CHAPTER]
+TIMEBASE=1/1000
+START=0
+#chapter ends at 0:01:00
+END=60000
+title=chapter \#1
+[STREAM]
+title=multi\
+line
+
+
+
By using the ffmetadata muxer and demuxer it is possible to extract
+metadata from an input file to an ffmetadata file, and then transcode
+the file into an output file with the edited ffmetadata file.
+
+
Extracting an ffmetadata file with ffmpeg goes as follows:
+
+
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
+
+
+
Reinserting edited metadata information from the FFMETADATAFILE file can
+be done as:
+
+
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
+
+
+
+
+
6 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+libavformat
+
+
+
+
7 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-protocols.html b/Externals/ffmpeg/dev/doc/ffmpeg-protocols.html
new file mode 100644
index 0000000000..0fd895cce0
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/ffmpeg-protocols.html
@@ -0,0 +1,1545 @@
+
+
+
+
+
+
+ FFmpeg Protocols Documentation
+
+
+
+
+
+
+
+
+ FFmpeg Protocols Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
This document describes the input and output protocols provided by the
+libavformat library.
+
+
+
+
2 Protocols# TOC
+
+
Protocols are configured elements in FFmpeg that enable access to
+resources that require specific protocols.
+
+
When you configure your FFmpeg build, all the supported protocols are
+enabled by default. You can list all available ones using the
+configure option "–list-protocols".
+
+
You can disable all the protocols using the configure option
+"–disable-protocols", and selectively enable a protocol using the
+option "–enable-protocol=PROTOCOL ", or you can disable a
+particular protocol using the option
+"–disable-protocol=PROTOCOL ".
+
+
The option "-protocols" of the ff* tools will display the list of
+supported protocols.
+
+
A description of the currently available protocols follows.
+
+
+
2.1 bluray# TOC
+
+
Read BluRay playlist.
+
+
The accepted options are:
+
+angle
+BluRay angle
+
+
+chapter
+Start chapter (1...N)
+
+
+playlist
+Playlist to read (BDMV/PLAYLIST/?????.mpls)
+
+
+
+
+
Examples:
+
+
Read longest playlist from BluRay mounted to /mnt/bluray:
+
+
+
Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2:
+
+
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
+
+
+
+
2.2 cache# TOC
+
+
Caching wrapper for input stream.
+
+
Cache the input stream to temporary file. It brings seeking capability to live streams.
+
+
+
+
+
2.3 concat# TOC
+
+
Physical concatenation protocol.
+
+
Allow to read and seek from many resource in sequence as if they were
+a unique resource.
+
+
A URL accepted by this protocol has the syntax:
+
+
concat:URL1 |URL2 |...|URLN
+
+
+
where URL1 , URL2 , ..., URLN are the urls of the
+resource to be concatenated, each one possibly specifying a distinct
+protocol.
+
+
For example to read a sequence of files split1.mpeg ,
+split2.mpeg , split3.mpeg with ffplay
use the
+command:
+
+
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
+
+
+
Note that you may need to escape the character "|" which is special for
+many shells.
+
+
+
2.4 crypto# TOC
+
+
AES-encrypted stream reading protocol.
+
+
The accepted options are:
+
+key
+Set the AES decryption key binary block from given hexadecimal representation.
+
+
+iv
+Set the AES decryption initialization vector binary block from given hexadecimal representation.
+
+
+
+
Accepted URL formats:
+
+
crypto:URL
+crypto+URL
+
+
+
+
2.5 data# TOC
+
+
Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme .
+
+
For example, to convert a GIF file given inline with ffmpeg
:
+
+
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
+
+
+
+
2.6 file# TOC
+
+
File access protocol.
+
+
Allow to read from or write to a file.
+
+
A file URL can have the form:
+
+
+
where filename is the path of the file to read.
+
+
An URL that does not have a protocol prefix will be assumed to be a
+file URL. Depending on the build, an URL that looks like a Windows
+path with the drive letter at the beginning will also be assumed to be
+a file URL (usually not the case in builds for unix-like systems).
+
+
For example to read from a file input.mpeg with ffmpeg
+use the command:
+
+
ffmpeg -i file:input.mpeg output.mpeg
+
+
+
This protocol accepts the following options:
+
+
+truncate
+Truncate existing files on write, if set to 1. A value of 0 prevents
+truncating. Default value is 1.
+
+
+blocksize
+Set I/O operation maximum block size, in bytes. Default value is
+INT_MAX
, which results in not limiting the requested block size.
+Setting this value reasonably low improves user termination request reaction
+time, which is valuable for files on slow medium.
+
+
+
+
+
+
+
FTP (File Transfer Protocol).
+
+
Allow to read from or write to remote resources using FTP protocol.
+
+
Following syntax is required.
+
+
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+
+
This protocol accepts the following options.
+
+
+timeout
+Set timeout in microseconds of socket I/O operations used by the underlying low level
+operation. By default it is set to -1, which means that the timeout is
+not specified.
+
+
+ftp-anonymous-password
+Password used when login as anonymous user. Typically an e-mail address
+should be used.
+
+
+ftp-write-seekable
+Control seekability of connection during encoding. If set to 1 the
+resource is supposed to be seekable, if set to 0 it is assumed not
+to be seekable. Default value is 0.
+
+
+
+
NOTE: Protocol can be used as output, but it is recommended to not do
+it, unless special care is taken (tests, customized server configuration
+etc.). Different FTP servers behave in different way during seek
+operation. ff* tools may produce incomplete content due to server limitations.
+
+
+
2.8 gopher# TOC
+
+
Gopher protocol.
+
+
+
+
+
Read Apple HTTP Live Streaming compliant segmented stream as
+a uniform one. The M3U8 playlists describing the segments can be
+remote HTTP resources or local files, accessed using the standard
+file protocol.
+The nested protocol is declared by specifying
+"+proto " after the hls URI scheme name, where proto
+is either "file" or "http".
+
+
+
hls+http://host/path/to/remote/resource.m3u8
+hls+file://path/to/local/resource.m3u8
+
+
+
Using this protocol is discouraged - the hls demuxer should work
+just as well (if not, please report the issues) and is more complete.
+To use the hls demuxer instead, simply use the direct URLs to the
+m3u8 files.
+
+
+
2.10 http# TOC
+
+
HTTP (Hyper Text Transfer Protocol).
+
+
This protocol accepts the following options:
+
+
+seekable
+Control seekability of connection. If set to 1 the resource is
+supposed to be seekable, if set to 0 it is assumed not to be seekable,
+if set to -1 it will try to autodetect if it is seekable. Default
+value is -1.
+
+
+chunked_post
+If set to 1 use chunked Transfer-Encoding for posts, default is 1.
+
+
+content_type
+Set a specific content type for the POST messages.
+
+
+headers
+Set custom HTTP headers, can override built in default headers. The
+value must be a string encoding the headers.
+
+
+multiple_requests
+Use persistent connections if set to 1, default is 0.
+
+
+post_data
+Set custom HTTP post data.
+
+
+user-agent
+user_agent
+Override the User-Agent header. If not specified the protocol will use a
+string describing the libavformat build. ("Lavf/<version>")
+
+
+timeout
+Set timeout in microseconds of socket I/O operations used by the underlying low level
+operation. By default it is set to -1, which means that the timeout is
+not specified.
+
+
+mime_type
+Export the MIME type.
+
+
+icy
+If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
+supports this, the metadata has to be retrieved by the application by reading
+the icy_metadata_headers and icy_metadata_packet options.
+The default is 1.
+
+
+icy_metadata_headers
+If the server supports ICY metadata, this contains the ICY-specific HTTP reply
+headers, separated by newline characters.
+
+
+icy_metadata_packet
+If the server supports ICY metadata, and icy was set to 1, this
+contains the last non-empty metadata packet sent by the server. It should be
+polled in regular intervals by applications interested in mid-stream metadata
+updates.
+
+
+cookies
+Set the cookies to be sent in future requests. The format of each cookie is the
+same as the value of a Set-Cookie HTTP response field. Multiple cookies can be
+delimited by a newline character.
+
+
+offset
+Set initial byte offset.
+
+
+end_offset
+Try to limit the request to bytes preceding this offset.
+
+
+
+
+
2.10.1 HTTP Cookies# TOC
+
+
Some HTTP requests will be denied unless cookie values are passed in with the
+request. The cookies option allows these cookies to be specified. At
+the very least, each cookie must specify a value along with a path and domain.
+HTTP requests that match both the domain and path will automatically include the
+cookie value in the HTTP Cookie header field. Multiple cookies can be delimited
+by a newline.
+
+
The required syntax to play a stream specifying a cookie is:
+
+
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
+
+
+
+
2.11 Icecast# TOC
+
+
Icecast protocol (stream to Icecast servers)
+
+
This protocol accepts the following options:
+
+
+ice_genre
+Set the stream genre.
+
+
+ice_name
+Set the stream name.
+
+
+ice_description
+Set the stream description.
+
+
+ice_url
+Set the stream website URL.
+
+
+ice_public
+Set if the stream should be public.
+The default is 0 (not public).
+
+
+user_agent
+Override the User-Agent header. If not specified a string of the form
+"Lavf/<version>" will be used.
+
+
+password
+Set the Icecast mountpoint password.
+
+
+content_type
+Set the stream content type. This must be set if it is different from
+audio/mpeg.
+
+
+legacy_icecast
+This enables support for Icecast versions < 2.4.0, that do not support the
+HTTP PUT method but the SOURCE method.
+
+
+
+
+
+
icecast://[username [:password ]@]server :port /mountpoint
+
+
+
+
2.12 mmst# TOC
+
+
MMS (Microsoft Media Server) protocol over TCP.
+
+
+
2.13 mmsh# TOC
+
+
MMS (Microsoft Media Server) protocol over HTTP.
+
+
The required syntax is:
+
+
mmsh://server [:port ][/app ][/playpath ]
+
+
+
+
2.14 md5# TOC
+
+
MD5 output protocol.
+
+
Computes the MD5 hash of the data to be written, and on close writes
+this to the designated output or stdout if none is specified. It can
+be used to test muxers without writing an actual file.
+
+
Some examples follow.
+
+
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
+ffmpeg -i input.flv -f avi -y md5:output.avi.md5
+
+# Write the MD5 hash of the encoded AVI file to stdout.
+ffmpeg -i input.flv -f avi -y md5:
+
+
+
Note that some formats (typically MOV) require the output protocol to
+be seekable, so they will fail with the MD5 output protocol.
+
+
+
2.15 pipe# TOC
+
+
UNIX pipe access protocol.
+
+
Allow to read and write from UNIX pipes.
+
+
The accepted syntax is:
+
+
+
number is the number corresponding to the file descriptor of the
+pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number
+is not specified, by default the stdout file descriptor will be used
+for writing, stdin for reading.
+
+
For example to read from stdin with ffmpeg
:
+
+
cat test.wav | ffmpeg -i pipe:0
+# ...this is the same as...
+cat test.wav | ffmpeg -i pipe:
+
+
+
For writing to stdout with ffmpeg
:
+
+
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
+# ...this is the same as...
+ffmpeg -i test.wav -f avi pipe: | cat > test.avi
+
+
+
This protocol accepts the following options:
+
+
+blocksize
+Set I/O operation maximum block size, in bytes. Default value is
+INT_MAX
, which results in not limiting the requested block size.
+Setting this value reasonably low improves user termination request reaction
+time, which is valuable if data transmission is slow.
+
+
+
+
Note that some formats (typically MOV), require the output protocol to
+be seekable, so they will fail with the pipe output protocol.
+
+
+
2.16 rtmp# TOC
+
+
Real-Time Messaging Protocol.
+
+
The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia
+content across a TCP/IP network.
+
+
The required syntax is:
+
+
rtmp://[username :password @]server [:port ][/app ][/instance ][/playpath ]
+
+
+
The accepted parameters are:
+
+username
+An optional username (mostly for publishing).
+
+
+password
+An optional password (mostly for publishing).
+
+
+server
+The address of the RTMP server.
+
+
+port
+The number of the TCP port to use (by default is 1935).
+
+
+app
+It is the name of the application to access. It usually corresponds to
+the path where the application is installed on the RTMP server
+(e.g. /ondemand/ , /flash/live/ , etc.). You can override
+the value parsed from the URI through the rtmp_app
option, too.
+
+
+playpath
+It is the path or name of the resource to play with reference to the
+application specified in app , may be prefixed by "mp4:". You
+can override the value parsed from the URI through the rtmp_playpath
+option, too.
+
+
+listen
+Act as a server, listening for an incoming connection.
+
+
+timeout
+Maximum time to wait for the incoming connection. Implies listen.
+
+
+
+
Additionally, the following parameters can be set via command line options
+(or in code via AVOption
s):
+
+rtmp_app
+Name of application to connect on the RTMP server. This option
+overrides the parameter specified in the URI.
+
+
+rtmp_buffer
+Set the client buffer time in milliseconds. The default is 3000.
+
+
+rtmp_conn
+Extra arbitrary AMF connection parameters, parsed from a string,
+e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0
.
+Each value is prefixed by a single character denoting the type,
+B for Boolean, N for number, S for string, O for object, or Z for null,
+followed by a colon. For Booleans the data must be either 0 or 1 for
+FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or
+1 to end or begin an object, respectively. Data items in subobjects may
+be named, by prefixing the type with ’N’ and specifying the name before
+the value (i.e. NB:myFlag:1
). This option may be used multiple
+times to construct arbitrary AMF sequences.
+
+
+rtmp_flashver
+Version of the Flash plugin used to run the SWF player. The default
+is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible;
+<libavformat version>).)
+
+
+rtmp_flush_interval
+Number of packets flushed in the same request (RTMPT only). The default
+is 10.
+
+
+rtmp_live
+Specify that the media is a live stream. No resuming or seeking in
+live streams is possible. The default value is any
, which means the
+subscriber first tries to play the live stream specified in the
+playpath. If a live stream of that name is not found, it plays the
+recorded stream. The other possible values are live
and
+recorded
.
+
+
+rtmp_pageurl
+URL of the web page in which the media was embedded. By default no
+value will be sent.
+
+
+rtmp_playpath
+Stream identifier to play or to publish. This option overrides the
+parameter specified in the URI.
+
+
+rtmp_subscribe
+Name of live stream to subscribe to. By default no value will be sent.
+It is only sent if the option is specified or if rtmp_live
+is set to live.
+
+
+rtmp_swfhash
+SHA256 hash of the decompressed SWF file (32 bytes).
+
+
+rtmp_swfsize
+Size of the decompressed SWF file, required for SWFVerification.
+
+
+rtmp_swfurl
+URL of the SWF player for the media. By default no value will be sent.
+
+
+rtmp_swfverify
+URL to player swf file, compute hash/size automatically.
+
+
+rtmp_tcurl
+URL of the target stream. Defaults to proto://host[:port]/app.
+
+
+
+
+
For example to read with ffplay
a multimedia resource named
+"sample" from the application "vod" from an RTMP server "myserver":
+
+
ffplay rtmp://myserver/vod/sample
+
+
+
To publish to a password protected server, passing the playpath and
+app names separately:
+
+
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
+
+
+
+
2.17 rtmpe# TOC
+
+
Encrypted Real-Time Messaging Protocol.
+
+
The Encrypted Real-Time Messaging Protocol (RTMPE) is used for
+streaming multimedia content within standard cryptographic primitives,
+consisting of Diffie-Hellman key exchange and HMACSHA256, generating
+a pair of RC4 keys.
+
+
+
2.18 rtmps# TOC
+
+
Real-Time Messaging Protocol over a secure SSL connection.
+
+
The Real-Time Messaging Protocol (RTMPS) is used for streaming
+multimedia content across an encrypted connection.
+
+
+
2.19 rtmpt# TOC
+
+
Real-Time Messaging Protocol tunneled through HTTP.
+
+
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
+for streaming multimedia content within HTTP requests to traverse
+firewalls.
+
+
+
2.20 rtmpte# TOC
+
+
Encrypted Real-Time Messaging Protocol tunneled through HTTP.
+
+
The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE)
+is used for streaming multimedia content within HTTP requests to traverse
+firewalls.
+
+
+
2.21 rtmpts# TOC
+
+
Real-Time Messaging Protocol tunneled through HTTPS.
+
+
The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
+for streaming multimedia content within HTTPS requests to traverse
+firewalls.
+
+
+
2.22 libsmbclient# TOC
+
+
libsmbclient permits one to manipulate CIFS/SMB network resources.
+
+
Following syntax is required.
+
+
+
smb://[[domain:]user[:password@]]server[/share[/path[/file]]]
+
+
+
This protocol accepts the following options.
+
+
+timeout
+Set timeout in miliseconds of socket I/O operations used by the underlying
+low level operation. By default it is set to -1, which means that the timeout
+is not specified.
+
+
+truncate
+Truncate existing files on write, if set to 1. A value of 0 prevents
+truncating. Default value is 1.
+
+
+workgroup
+Set the workgroup used for making connections. By default workgroup is not specified.
+
+
+
+
+
For more information see: http://www.samba.org/ .
+
+
+
2.23 libssh# TOC
+
+
Secure File Transfer Protocol via libssh
+
+
Allow to read from or write to remote resources using SFTP protocol.
+
+
Following syntax is required.
+
+
+
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+
+
This protocol accepts the following options.
+
+
+timeout
+Set timeout of socket I/O operations used by the underlying low level
+operation. By default it is set to -1, which means that the timeout
+is not specified.
+
+
+truncate
+Truncate existing files on write, if set to 1. A value of 0 prevents
+truncating. Default value is 1.
+
+
+private_key
+Specify the path of the file containing private key to use during authorization.
+By default libssh searches for keys in the ~/.ssh/ directory.
+
+
+
+
+
Example: Play a file stored on remote server.
+
+
+
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
+
+
+
+
2.24 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte# TOC
+
+
Real-Time Messaging Protocol and its variants supported through
+librtmp.
+
+
Requires the presence of the librtmp headers and library during
+configuration. You need to explicitly configure the build with
+"–enable-librtmp". If enabled this will replace the native RTMP
+protocol.
+
+
This protocol provides most client functions and a few server
+functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT),
+encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled
+variants of these encrypted types (RTMPTE, RTMPTS).
+
+
The required syntax is:
+
+
rtmp_proto ://server [:port ][/app ][/playpath ] options
+
+
+
where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe",
+"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and
+server , port , app and playpath have the same
+meaning as specified for the RTMP native protocol.
+options contains a list of space-separated options of the form
+key =val .
+
+
See the librtmp manual page (man 3 librtmp) for more information.
+
+
For example, to stream a file in real-time to an RTMP server using
+ffmpeg
:
+
+
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
+
+
+
To play the same stream using ffplay
:
+
+
ffplay "rtmp://myserver/live/mystream live=1"
+
+
+
+
2.25 rtp# TOC
+
+
Real-time Transport Protocol.
+
+
The required syntax for an RTP URL is:
+rtp://hostname [:port ][?option =val ...]
+
+
port specifies the RTP port to use.
+
+
The following URL options are supported:
+
+
+ttl=n
+Set the TTL (Time-To-Live) value (for multicast only).
+
+
+rtcpport=n
+Set the remote RTCP port to n .
+
+
+localrtpport=n
+Set the local RTP port to n .
+
+
+localrtcpport=n '
+Set the local RTCP port to n .
+
+
+pkt_size=n
+Set max packet size (in bytes) to n .
+
+
+connect=0|1
+Do a connect()
on the UDP socket (if set to 1) or not (if set
+to 0).
+
+
+sources=ip [,ip ]
+List allowed source IP addresses.
+
+
+block=ip [,ip ]
+List disallowed (blocked) source IP addresses.
+
+
+write_to_source=0|1
+Send packets to the source address of the latest received packet (if
+set to 1) or to a default remote address (if set to 0).
+
+
+localport=n
+Set the local RTP port to n .
+
+This is a deprecated option. Instead, localrtpport should be
+used.
+
+
+
+
+
Important notes:
+
+
+ If rtcpport is not set the RTCP port will be set to the RTP
+port value plus 1.
+
+ If localrtpport (the local RTP port) is not set any available
+port will be used for the local RTP and RTCP ports.
+
+ If localrtcpport (the local RTCP port) is not set it will be
+set to the local RTP port value plus 1.
+
+
+
+
2.26 rtsp# TOC
+
+
Real-Time Streaming Protocol.
+
+
RTSP is not technically a protocol handler in libavformat, it is a demuxer
+and muxer. The demuxer supports both normal RTSP (with data transferred
+over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with
+data transferred over RDT).
+
+
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
+supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s
+RTSP server ).
+
+
The required syntax for a RTSP url is:
+
+
rtsp://hostname [:port ]/path
+
+
+
Options can be set on the ffmpeg
/ffplay
command
+line, or set in code via AVOption
s or in
+avformat_open_input
.
+
+
The following options are supported.
+
+
+initial_pause
+Do not start playing the stream immediately if set to 1. Default value
+is 0.
+
+
+rtsp_transport
+Set RTSP transport protocols.
+
+It accepts the following values:
+
+‘udp ’
+Use UDP as lower transport protocol.
+
+
+‘tcp ’
+Use TCP (interleaving within the RTSP control channel) as lower
+transport protocol.
+
+
+‘udp_multicast ’
+Use UDP multicast as lower transport protocol.
+
+
+‘http ’
+Use HTTP tunneling as lower transport protocol, which is useful for
+passing proxies.
+
+
+
+Multiple lower transport protocols may be specified, in that case they are
+tried one at a time (if the setup of one fails, the next one is tried).
+For the muxer, only the ‘tcp ’ and ‘udp ’ options are supported.
+
+
+rtsp_flags
+Set RTSP flags.
+
+The following values are accepted:
+
+‘filter_src ’
+Accept packets only from negotiated peer address and port.
+
+‘listen ’
+Act as a server, listening for an incoming connection.
+
+‘prefer_tcp ’
+Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
+
+
+
+Default value is ‘none ’.
+
+
+allowed_media_types
+Set media types to accept from the server.
+
+The following flags are accepted:
+
+‘video ’
+‘audio ’
+‘data ’
+
+
+By default it accepts all media types.
+
+
+min_port
+Set minimum local UDP port. Default value is 5000.
+
+
+max_port
+Set maximum local UDP port. Default value is 65000.
+
+
+timeout
+Set maximum timeout (in seconds) to wait for incoming connections.
+
+A value of -1 means infinite (default). This option implies the
+rtsp_flags set to ‘listen ’.
+
+
+reorder_queue_size
+Set number of packets to buffer for handling of reordered packets.
+
+
+stimeout
+Set socket TCP I/O timeout in microseconds.
+
+
+user-agent
+Override User-Agent header. If not specified, it defaults to the
+libavformat identifier string.
+
+
+
+
When receiving data over UDP, the demuxer tries to reorder received packets
+(since they may arrive out of order, or packets may get lost totally). This
+can be disabled by setting the maximum demuxing delay to zero (via
+the max_delay
field of AVFormatContext).
+
+
When watching multi-bitrate Real-RTSP streams with ffplay
, the
+streams to display can be chosen with -vst
n and
+-ast
n for video and audio respectively, and can be switched
+on the fly by pressing v
and a
.
+
+
+
2.26.1 Examples# TOC
+
+
The following examples all make use of the ffplay
and
+ffmpeg
tools.
+
+
+ Watch a stream over UDP, with a max reordering delay of 0.5 seconds:
+
+
ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
+
+
+ Watch a stream tunneled over HTTP:
+
+
ffplay -rtsp_transport http rtsp://server/video.mp4
+
+
+ Send a stream in realtime to a RTSP server, for others to watch:
+
+
ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
+
+
+ Receive a stream in realtime:
+
+
ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
+
+
+
+
+
2.27 sap# TOC
+
+
Session Announcement Protocol (RFC 2974). This is not technically a
+protocol handler in libavformat, it is a muxer and demuxer.
+It is used for signalling of RTP streams, by announcing the SDP for the
+streams regularly on a separate port.
+
+
+
2.27.1 Muxer# TOC
+
+
The syntax for a SAP url given to the muxer is:
+
+
sap://destination [:port ][?options ]
+
+
+
The RTP packets are sent to destination on port port ,
+or to port 5004 if no port is specified.
+options is a &
-separated list. The following options
+are supported:
+
+
+announce_addr=address
+Specify the destination IP address for sending the announcements to.
+If omitted, the announcements are sent to the commonly used SAP
+announcement multicast address 224.2.127.254 (sap.mcast.net), or
+ff0e::2:7ffe if destination is an IPv6 address.
+
+
+announce_port=port
+Specify the port to send the announcements on, defaults to
+9875 if not specified.
+
+
+ttl=ttl
+Specify the time to live value for the announcements and RTP packets,
+defaults to 255.
+
+
+same_port=0|1
+If set to 1, send all RTP streams on the same port pair. If zero (the
+default), all streams are sent on unique ports, with each stream on a
+port 2 numbers higher than the previous.
+VLC/Live555 requires this to be set to 1, to be able to receive the stream.
+The RTP stack in libavformat for receiving requires all streams to be sent
+on unique ports.
+
+
+
+
Example command lines follow.
+
+
To broadcast a stream on the local subnet, for watching in VLC:
+
+
+
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
+
+
+
Similarly, for watching in ffplay
:
+
+
+
ffmpeg -re -i input -f sap sap://224.0.0.255
+
+
+
And for watching in ffplay
, over IPv6:
+
+
+
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
+
+
+
+
2.27.2 Demuxer# TOC
+
+
The syntax for a SAP url given to the demuxer is:
+
+
sap://[address ][:port ]
+
+
+
address is the multicast address to listen for announcements on,
+if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port
+is the port that is listened on, 9875 if omitted.
+
+
The demuxers listens for announcements on the given address and port.
+Once an announcement is received, it tries to receive that particular stream.
+
+
Example command lines follow.
+
+
To play back the first stream announced on the normal SAP multicast address:
+
+
+
+
To play back the first stream announced on one the default IPv6 SAP multicast address:
+
+
+
ffplay sap://[ff0e::2:7ffe]
+
+
+
+
2.28 sctp# TOC
+
+
Stream Control Transmission Protocol.
+
+
The accepted URL syntax is:
+
+
sctp://host :port [?options ]
+
+
+
The protocol accepts the following options:
+
+listen
+If set to any value, listen for an incoming connection. Outgoing connection is done by default.
+
+
+max_streams
+Set the maximum number of streams. By default no limit is set.
+
+
+
+
+
2.29 srtp# TOC
+
+
Secure Real-time Transport Protocol.
+
+
The accepted options are:
+
+srtp_in_suite
+srtp_out_suite
+Select input and output encoding suites.
+
+Supported values:
+
+‘AES_CM_128_HMAC_SHA1_80 ’
+‘SRTP_AES128_CM_HMAC_SHA1_80 ’
+‘AES_CM_128_HMAC_SHA1_32 ’
+‘SRTP_AES128_CM_HMAC_SHA1_32 ’
+
+
+
+srtp_in_params
+srtp_out_params
+Set input and output encoding parameters, which are expressed by a
+base64-encoded representation of a binary block. The first 16 bytes of
+this binary block are used as master key, the following 14 bytes are
+used as master salt.
+
+
+
+
+
2.30 subfile# TOC
+
+
Virtually extract a segment of a file or another stream.
+The underlying stream must be seekable.
+
+
Accepted options:
+
+start
+Start offset of the extracted segment, in bytes.
+
+end
+End offset of the extracted segment, in bytes.
+
+
+
+
Examples:
+
+
Extract a chapter from a DVD VOB file (start and end sectors obtained
+externally and multiplied by 2048):
+
+
subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
+
+
+
Play an AVI file directly from a TAR archive:
+subfile,,start,183241728,end,366490624,,:archive.tar
+
+
+
2.31 tcp# TOC
+
+
Transmission Control Protocol.
+
+
The required syntax for a TCP url is:
+
+
tcp://hostname :port [?options ]
+
+
+
options contains a list of &-separated options of the form
+key =val .
+
+
The list of supported options follows.
+
+
+listen=1|0
+Listen for an incoming connection. Default value is 0.
+
+
+timeout=microseconds
+Set raise error timeout, expressed in microseconds.
+
+This option is only relevant in read mode: if no data arrived in more
+than this time interval, raise error.
+
+
+listen_timeout=microseconds
+Set listen timeout, expressed in microseconds.
+
+
+
+
The following example shows how to setup a listening TCP connection
+with ffmpeg
, which is then accessed with ffplay
:
+
+
ffmpeg -i input -f format tcp://hostname :port ?listen
+ffplay tcp://hostname :port
+
+
+
+
2.32 tls# TOC
+
+
Transport Layer Security (TLS) / Secure Sockets Layer (SSL)
+
+
The required syntax for a TLS/SSL url is:
+
+
tls://hostname :port [?options ]
+
+
+
The following parameters can be set via command line options
+(or in code via AVOption
s):
+
+
+ca_file, cafile=filename
+A file containing certificate authority (CA) root certificates to treat
+as trusted. If the linked TLS library contains a default this might not
+need to be specified for verification to work, but not all libraries and
+setups have defaults built in.
+The file must be in OpenSSL PEM format.
+
+
+tls_verify=1|0
+If enabled, try to verify the peer that we are communicating with.
+Note, if using OpenSSL, this currently only makes sure that the
+peer certificate is signed by one of the root certificates in the CA
+database, but it does not validate that the certificate actually
+matches the host name we are trying to connect to. (With GnuTLS,
+the host name is validated as well.)
+
+This is disabled by default since it requires a CA database to be
+provided by the caller in many cases.
+
+
+cert_file, cert=filename
+A file containing a certificate to use in the handshake with the peer.
+(When operating as server, in listen mode, this is more often required
+by the peer, while client certificates only are mandated in certain
+setups.)
+
+
+key_file, key=filename
+A file containing the private key for the certificate.
+
+
+listen=1|0
+If enabled, listen for connections on the provided port, and assume
+the server role in the handshake instead of the client role.
+
+
+
+
+
Example command lines:
+
+
To create a TLS/SSL server that serves an input stream.
+
+
+
ffmpeg -i input -f format tls://hostname :port ?listen&cert=server.crt &key=server.key
+
+
+
To play back a stream from the TLS/SSL server using ffplay
:
+
+
+
ffplay tls://hostname :port
+
+
+
+
2.33 udp# TOC
+
+
User Datagram Protocol.
+
+
The required syntax for an UDP URL is:
+
+
udp://hostname :port [?options ]
+
+
+
options contains a list of &-separated options of the form key =val .
+
+
In case threading is enabled on the system, a circular buffer is used
+to store the incoming data, which allows one to reduce loss of data due to
+UDP socket buffer overruns. The fifo_size and
+overrun_nonfatal options are related to this buffer.
+
+
The list of supported options follows.
+
+
+buffer_size=size
+Set the UDP maximum socket buffer size in bytes. This is used to set either
+the receive or send buffer size, depending on what the socket is used for.
+Default is 64KB. See also fifo_size .
+
+
+localport=port
+Override the local UDP port to bind with.
+
+
+localaddr=addr
+Choose the local IP address. This is useful e.g. if sending multicast
+and the host has multiple interfaces, where the user can choose
+which interface to send on by specifying the IP address of that interface.
+
+
+pkt_size=size
+Set the size in bytes of UDP packets.
+
+
+reuse=1|0
+Explicitly allow or disallow reusing UDP sockets.
+
+
+ttl=ttl
+Set the time to live value (for multicast only).
+
+
+connect=1|0
+Initialize the UDP socket with connect()
. In this case, the
+destination address can’t be changed with ff_udp_set_remote_url later.
+If the destination address isn’t known at the start, this option can
+be specified in ff_udp_set_remote_url, too.
+This allows finding out the source address for the packets with getsockname,
+and makes writes return with AVERROR(ECONNREFUSED) if "destination
+unreachable" is received.
+For receiving, this gives the benefit of only receiving packets from
+the specified peer address/port.
+
+
+sources=address [,address ]
+Only receive packets sent to the multicast group from one of the
+specified sender IP addresses.
+
+
+block=address [,address ]
+Ignore packets sent to the multicast group from the specified
+sender IP addresses.
+
+
+fifo_size=units
+Set the UDP receiving circular buffer size, expressed as a number of
+packets with size of 188 bytes. If not specified defaults to 7*4096.
+
+
+overrun_nonfatal=1|0
+Survive in case of UDP receiving circular buffer overrun. Default
+value is 0.
+
+
+timeout=microseconds
+Set raise error timeout, expressed in microseconds.
+
+This option is only relevant in read mode: if no data arrived in more
+than this time interval, raise error.
+
+
+broadcast=1|0
+Explicitly allow or disallow UDP broadcasting.
+
+Note that broadcasting may not work properly on networks having
+a broadcast storm protection.
+
+
+
+
+
2.33.1 Examples# TOC
+
+
+ Use ffmpeg
to stream over UDP to a remote endpoint:
+
+
ffmpeg -i input -f format udp://hostname :port
+
+
+ Use ffmpeg
to stream in mpegts format over UDP using 188
+sized UDP packets, using a large input buffer:
+
+
ffmpeg -i input -f mpegts udp://hostname :port ?pkt_size=188&buffer_size=65535
+
+
+ Use ffmpeg
to receive over UDP from a remote endpoint:
+
+
ffmpeg -i udp://[multicast-address ]:port ...
+
+
+
+
+
2.34 unix# TOC
+
+
Unix local socket
+
+
The required syntax for a Unix socket URL is:
+
+
+
+
The following parameters can be set via command line options
+(or in code via AVOption
s):
+
+
+timeout
+Timeout in ms.
+
+listen
+Create the Unix socket in listening mode.
+
+
+
+
+
+
3 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+libavformat
+
+
+
+
4 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-resampler.html b/Externals/ffmpeg/dev/doc/ffmpeg-resampler.html
new file mode 100644
index 0000000000..2611dfc1f0
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/ffmpeg-resampler.html
@@ -0,0 +1,357 @@
+
+
+
+
+
+
+ FFmpeg Resampler Documentation
+
+
+
+
+
+
+
+
+ FFmpeg Resampler Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
The FFmpeg resampler provides a high-level interface to the
+libswresample library audio resampling utilities. In particular it
+allows one to perform audio resampling, audio channel layout rematrixing,
+and convert audio format and packing layout.
+
+
+
+
2 Resampler Options# TOC
+
+
The audio resampler supports the following named options.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, option =value for the aresample filter,
+by setting the value explicitly in the
+SwrContext
options or using the libavutil/opt.h API for
+programmatic use.
+
+
+ich, in_channel_count
+Set the number of input channels. Default value is 0. Setting this
+value is not mandatory if the corresponding channel layout
+in_channel_layout is set.
+
+
+och, out_channel_count
+Set the number of output channels. Default value is 0. Setting this
+value is not mandatory if the corresponding channel layout
+out_channel_layout is set.
+
+
+uch, used_channel_count
+Set the number of used input channels. Default value is 0. This option is
+only used for special remapping.
+
+
+isr, in_sample_rate
+Set the input sample rate. Default value is 0.
+
+
+osr, out_sample_rate
+Set the output sample rate. Default value is 0.
+
+
+isf, in_sample_fmt
+Specify the input sample format. It is set by default to none
.
+
+
+osf, out_sample_fmt
+Specify the output sample format. It is set by default to none
.
+
+
+tsf, internal_sample_fmt
+Set the internal sample format. Default value is none
.
+This will automatically be chosen when it is not explicitly set.
+
+
+icl, in_channel_layout
+ocl, out_channel_layout
+Set the input/output channel layout.
+
+See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
+for the required syntax.
+
+
+clev, center_mix_level
+Set the center mix level. It is a value expressed in deciBel, and must be
+in the interval [-32,32].
+
+
+slev, surround_mix_level
+Set the surround mix level. It is a value expressed in deciBel, and must
+be in the interval [-32,32].
+
+
+lfe_mix_level
+Set LFE mix into non LFE level. It is used when there is a LFE input but no
+LFE output. It is a value expressed in deciBel, and must
+be in the interval [-32,32].
+
+
+rmvol, rematrix_volume
+Set rematrix volume. Default value is 1.0.
+
+
+rematrix_maxval
+Set maximum output value for rematrixing.
+This can be used to prevent clipping vs. preventing volumn reduction
+A value of 1.0 prevents cliping.
+
+
+flags, swr_flags
+Set flags used by the converter. Default value is 0.
+
+It supports the following individual flags:
+
+res
+force resampling, this flag forces resampling to be used even when the
+input and output sample rates match.
+
+
+
+
+dither_scale
+Set the dither scale. Default value is 1.
+
+
+dither_method
+Set dither method. Default value is 0.
+
+Supported values:
+
+‘rectangular ’
+select rectangular dither
+
+‘triangular ’
+select triangular dither
+
+‘triangular_hp ’
+select triangular dither with high pass
+
+‘lipshitz ’
+select lipshitz noise shaping dither
+
+‘shibata ’
+select shibata noise shaping dither
+
+‘low_shibata ’
+select low shibata noise shaping dither
+
+‘high_shibata ’
+select high shibata noise shaping dither
+
+‘f_weighted ’
+select f-weighted noise shaping dither
+
+‘modified_e_weighted ’
+select modified-e-weighted noise shaping dither
+
+‘improved_e_weighted ’
+select improved-e-weighted noise shaping dither
+
+
+
+
+
+resampler
+Set resampling engine. Default value is swr.
+
+Supported values:
+
+‘swr ’
+select the native SW Resampler; filter options precision and cheby are not
+applicable in this case.
+
+‘soxr ’
+select the SoX Resampler (where available); compensation, and filter options
+filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
+case.
+
+
+
+
+filter_size
+For swr only, set resampling filter size, default value is 32.
+
+
+phase_shift
+For swr only, set resampling phase shift, default value is 10, and must be in
+the interval [0,30].
+
+
+linear_interp
+Use Linear Interpolation if set to 1, default value is 0.
+
+
+cutoff
+Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
+value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
+(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
+
+
+precision
+For soxr only, the precision in bits to which the resampled signal will be
+calculated. The default value of 20 (which, with suitable dithering, is
+appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a
+value of 28 gives SoX’s ’Very High Quality’.
+
+
+cheby
+For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
+approximation for ’irrational’ ratios. Default value is 0.
+
+
+async
+For swr only, simple 1 parameter audio sync to timestamps using stretching,
+squeezing, filling and trimming. Setting this to 1 will enable filling and
+trimming, larger values represent the maximum amount in samples that the data
+may be stretched or squeezed for each second.
+Default value is 0, thus no compensation is applied to make the samples match
+the audio timestamps.
+
+
+first_pts
+For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
+This allows for padding/trimming at the start of stream. By default, no
+assumption is made about the first frame’s expected pts, so no padding or
+trimming is done. For example, this could be set to 0 to pad the beginning with
+silence if an audio stream starts after the video stream or to trim any samples
+with a negative pts due to encoder delay.
+
+
+min_comp
+For swr only, set the minimum difference between timestamps and audio data (in
+seconds) to trigger stretching/squeezing/filling or trimming of the
+data to make it match the timestamps. The default is that
+stretching/squeezing/filling and trimming is disabled
+(min_comp = FLT_MAX
).
+
+
+min_hard_comp
+For swr only, set the minimum difference between timestamps and audio data (in
+seconds) to trigger adding/dropping samples to make it match the
+timestamps. This option effectively is a threshold to select between
+hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
+all compensation is by default disabled through min_comp .
+The default is 0.1.
+
+
+comp_duration
+For swr only, set duration (in seconds) over which data is stretched/squeezed
+to make it match the timestamps. Must be a non-negative double float value,
+default value is 1.0.
+
+
+max_soft_comp
+For swr only, set maximum factor by which data is stretched/squeezed to make it
+match the timestamps. Must be a non-negative double float value, default value
+is 0.
+
+
+matrix_encoding
+Select matrixed stereo encoding.
+
+It accepts the following values:
+
+‘none ’
+select none
+
+‘dolby ’
+select Dolby
+
+‘dplii ’
+select Dolby Pro Logic II
+
+
+
+Default value is none
.
+
+
+filter_type
+For swr only, select resampling filter type. This only affects resampling
+operations.
+
+It accepts the following values:
+
+‘cubic ’
+select cubic
+
+‘blackman_nuttall ’
+select Blackman Nuttall Windowed Sinc
+
+‘kaiser ’
+select Kaiser Windowed Sinc
+
+
+
+
+kaiser_beta
+For swr only, set Kaiser Window Beta value. Must be an integer in the
+interval [2,16], default value is 9.
+
+
+output_sample_bits
+For swr only, set number of used output sample bits for dithering. Must be an integer in the
+interval [0,64], default value is 0, which means it’s not used.
+
+
+
+
+
+
+
3 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+libswresample
+
+
+
+
4 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-scaler.html b/Externals/ffmpeg/dev/doc/ffmpeg-scaler.html
new file mode 100644
index 0000000000..b7e57e3891
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/ffmpeg-scaler.html
@@ -0,0 +1,231 @@
+
+
+
+
+
+
+ FFmpeg Scaler Documentation
+
+
+
+
+
+
+
+
+ FFmpeg Scaler Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
The FFmpeg rescaler provides a high-level interface to the libswscale
+library image conversion utilities. In particular it allows one to perform
+image rescaling and pixel format conversion.
+
+
+
+
2 Scaler Options# TOC
+
+
The video scaler supports the following named options.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools. For programmatic use, they can be set explicitly in the
+SwsContext
options or through the libavutil/opt.h API.
+
+
+
+
+sws_flags
+Set the scaler flags. This is also used to set the scaling
+algorithm. Only a single algorithm should be selected.
+
+It accepts the following values:
+
+‘fast_bilinear ’
+Select fast bilinear scaling algorithm.
+
+
+‘bilinear ’
+Select bilinear scaling algorithm.
+
+
+‘bicubic ’
+Select bicubic scaling algorithm.
+
+
+‘experimental ’
+Select experimental scaling algorithm.
+
+
+‘neighbor ’
+Select nearest neighbor rescaling algorithm.
+
+
+‘area ’
+Select averaging area rescaling algorithm.
+
+
+‘bicublin ’
+Select bicubic scaling algorithm for the luma component, bilinear for
+chroma components.
+
+
+‘gauss ’
+Select Gaussian rescaling algorithm.
+
+
+‘sinc ’
+Select sinc rescaling algorithm.
+
+
+‘lanczos ’
+Select lanczos rescaling algorithm.
+
+
+‘spline ’
+Select natural bicubic spline rescaling algorithm.
+
+
+‘print_info ’
+Enable printing/debug logging.
+
+
+‘accurate_rnd ’
+Enable accurate rounding.
+
+
+‘full_chroma_int ’
+Enable full chroma interpolation.
+
+
+‘full_chroma_inp ’
+Select full chroma input.
+
+
+‘bitexact ’
+Enable bitexact output.
+
+
+
+
+srcw
+Set source width.
+
+
+srch
+Set source height.
+
+
+dstw
+Set destination width.
+
+
+dsth
+Set destination height.
+
+
+src_format
+Set source pixel format (must be expressed as an integer).
+
+
+dst_format
+Set destination pixel format (must be expressed as an integer).
+
+
+src_range
+Select source range.
+
+
+dst_range
+Select destination range.
+
+
+param0, param1
+Set scaling algorithm parameters. The specified values are specific of
+some scaling algorithms and ignored by others. The specified values
+are floating point number values.
+
+
+sws_dither
+Set the dithering algorithm. Accepts one of the following
+values. Default value is ‘auto ’.
+
+
+‘auto ’
+automatic choice
+
+
+‘none ’
+no dithering
+
+
+‘bayer ’
+bayer dither
+
+
+‘ed ’
+error diffusion dither
+
+
+‘a_dither ’
+arithmetic dither, based using addition
+
+
+‘x_dither ’
+arithmetic dither, based using xor (more random/less apparent patterning that
+a_dither).
+
+
+
+
+
+
+
+
+
+
3 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+libswscale
+
+
+
+
4 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg-utils.html b/Externals/ffmpeg/dev/doc/ffmpeg-utils.html
new file mode 100644
index 0000000000..127e624d9d
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/ffmpeg-utils.html
@@ -0,0 +1,1468 @@
+
+
+
+
+
+
+ FFmpeg Utilities Documentation
+
+
+
+
+
+
+
+
+ FFmpeg Utilities Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
This document describes some generic features and utilities provided
+by the libavutil library.
+
+
+
+
2 Syntax# TOC
+
+
This section documents the syntax and formats employed by the FFmpeg
+libraries and tools.
+
+
+
2.1 Quoting and escaping# TOC
+
+
FFmpeg adopts the following quoting and escaping mechanism, unless
+explicitly specified. The following rules are applied:
+
+
+ '
and \
are special characters (respectively used for
+quoting and escaping). In addition to them, there might be other
+special characters depending on the specific syntax where the escaping
+and quoting are employed.
+
+ A special character is escaped by prefixing it with a ’\’.
+
+ All characters enclosed between ” are included literally in the
+parsed string. The quote character '
itself cannot be quoted,
+so you may need to close the quote and escape it.
+
+ Leading and trailing whitespaces, unless escaped or quoted, are
+removed from the parsed string.
+
+
+
Note that you may need to add a second level of escaping when using
+the command line or a script, which depends on the syntax of the
+adopted shell language.
+
+
The function av_get_token
defined in
+libavutil/avstring.h can be used to parse a token quoted or
+escaped according to the rules defined above.
+
+
The tool tools/ffescape in the FFmpeg source tree can be used
+to automatically quote or escape a string in a script.
+
+
+
2.1.1 Examples# TOC
+
+
+ Escape the string Crime d'Amour
containing the '
special
+character:
+
+
+ The string above contains a quote, so the '
needs to be escaped
+when quoting it:
+
+
+ Include leading or trailing whitespaces using quoting:
+
+
' this string starts and ends with whitespaces '
+
+
+ Escaping and quoting can be mixed together:
+
+
' The string '\'string\'' is a string '
+
+
+ To include a literal \
you can use either escaping or quoting:
+
+
'c:\foo' can be written as c:\\foo
+
+
+
+
+
2.2 Date# TOC
+
+
The accepted syntax is:
+
+
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
+now
+
+
+
If the value is "now" it takes the current time.
+
+
Time is local time unless Z is appended, in which case it is
+interpreted as UTC.
+If the year-month-day part is not specified it takes the current
+year-month-day.
+
+
+
2.3 Time duration# TOC
+
+
There are two accepted syntaxes for expressing time duration.
+
+
+
+
HH expresses the number of hours, MM the number of minutes
+for a maximum of 2 digits, and SS the number of seconds for a
+maximum of 2 digits. The m at the end expresses decimal value for
+SS .
+
+
or
+
+
+
+
S expresses the number of seconds, with the optional decimal part
+m .
+
+
In both expressions, the optional ‘- ’ indicates negative duration.
+
+
+
2.3.1 Examples# TOC
+
+
The following examples are all valid time duration:
+
+
+‘55 ’
+55 seconds
+
+
+‘12:03:45 ’
+12 hours, 03 minutes and 45 seconds
+
+
+‘23.189 ’
+23.189 seconds
+
+
+
+
+
2.4 Video size# TOC
+
Specify the size of the sourced video, it may be a string of the form
+width xheight , or the name of a size abbreviation.
+
+
The following abbreviations are recognized:
+
+‘ntsc ’
+720x480
+
+‘pal ’
+720x576
+
+‘qntsc ’
+352x240
+
+‘qpal ’
+352x288
+
+‘sntsc ’
+640x480
+
+‘spal ’
+768x576
+
+‘film ’
+352x240
+
+‘ntsc-film ’
+352x240
+
+‘sqcif ’
+128x96
+
+‘qcif ’
+176x144
+
+‘cif ’
+352x288
+
+‘4cif ’
+704x576
+
+‘16cif ’
+1408x1152
+
+‘qqvga ’
+160x120
+
+‘qvga ’
+320x240
+
+‘vga ’
+640x480
+
+‘svga ’
+800x600
+
+‘xga ’
+1024x768
+
+‘uxga ’
+1600x1200
+
+‘qxga ’
+2048x1536
+
+‘sxga ’
+1280x1024
+
+‘qsxga ’
+2560x2048
+
+‘hsxga ’
+5120x4096
+
+‘wvga ’
+852x480
+
+‘wxga ’
+1366x768
+
+‘wsxga ’
+1600x1024
+
+‘wuxga ’
+1920x1200
+
+‘woxga ’
+2560x1600
+
+‘wqsxga ’
+3200x2048
+
+‘wquxga ’
+3840x2400
+
+‘whsxga ’
+6400x4096
+
+‘whuxga ’
+7680x4800
+
+‘cga ’
+320x200
+
+‘ega ’
+640x350
+
+‘hd480 ’
+852x480
+
+‘hd720 ’
+1280x720
+
+‘hd1080 ’
+1920x1080
+
+‘2k ’
+2048x1080
+
+‘2kflat ’
+1998x1080
+
+‘2kscope ’
+2048x858
+
+‘4k ’
+4096x2160
+
+‘4kflat ’
+3996x2160
+
+‘4kscope ’
+4096x1716
+
+‘nhd ’
+640x360
+
+‘hqvga ’
+240x160
+
+‘wqvga ’
+400x240
+
+‘fwqvga ’
+432x240
+
+‘hvga ’
+480x320
+
+‘qhd ’
+960x540
+
+
+
+
+
2.5 Video rate# TOC
+
+
Specify the frame rate of a video, expressed as the number of frames
+generated per second. It has to be a string in the format
+frame_rate_num /frame_rate_den , an integer number, a float
+number or a valid video frame rate abbreviation.
+
+
The following abbreviations are recognized:
+
+‘ntsc ’
+30000/1001
+
+‘pal ’
+25/1
+
+‘qntsc ’
+30000/1001
+
+‘qpal ’
+25/1
+
+‘sntsc ’
+30000/1001
+
+‘spal ’
+25/1
+
+‘film ’
+24/1
+
+‘ntsc-film ’
+24000/1001
+
+
+
+
+
2.6 Ratio# TOC
+
+
A ratio can be expressed as an expression, or in the form
+numerator :denominator .
+
+
Note that a ratio with infinite (1/0) or negative value is
+considered valid, so you should check on the returned value if you
+want to exclude those values.
+
+
The undefined value can be expressed using the "0:0" string.
+
+
+
2.7 Color# TOC
+
+
It can be the name of a color as defined below (case insensitive match) or a
+[0x|#]RRGGBB[AA]
sequence, possibly followed by @ and a string
+representing the alpha component.
+
+
The alpha component may be a string composed by "0x" followed by an
+hexadecimal number or a decimal number between 0.0 and 1.0, which
+represents the opacity value (‘0x00 ’ or ‘0.0 ’ means completely
+transparent, ‘0xff ’ or ‘1.0 ’ completely opaque). If the alpha
+component is not specified then ‘0xff ’ is assumed.
+
+
The string ‘random ’ will result in a random color.
+
+
The following names of colors are recognized:
+
+‘AliceBlue ’
+0xF0F8FF
+
+‘AntiqueWhite ’
+0xFAEBD7
+
+‘Aqua ’
+0x00FFFF
+
+‘Aquamarine ’
+0x7FFFD4
+
+‘Azure ’
+0xF0FFFF
+
+‘Beige ’
+0xF5F5DC
+
+‘Bisque ’
+0xFFE4C4
+
+‘Black ’
+0x000000
+
+‘BlanchedAlmond ’
+0xFFEBCD
+
+‘Blue ’
+0x0000FF
+
+‘BlueViolet ’
+0x8A2BE2
+
+‘Brown ’
+0xA52A2A
+
+‘BurlyWood ’
+0xDEB887
+
+‘CadetBlue ’
+0x5F9EA0
+
+‘Chartreuse ’
+0x7FFF00
+
+‘Chocolate ’
+0xD2691E
+
+‘Coral ’
+0xFF7F50
+
+‘CornflowerBlue ’
+0x6495ED
+
+‘Cornsilk ’
+0xFFF8DC
+
+‘Crimson ’
+0xDC143C
+
+‘Cyan ’
+0x00FFFF
+
+‘DarkBlue ’
+0x00008B
+
+‘DarkCyan ’
+0x008B8B
+
+‘DarkGoldenRod ’
+0xB8860B
+
+‘DarkGray ’
+0xA9A9A9
+
+‘DarkGreen ’
+0x006400
+
+‘DarkKhaki ’
+0xBDB76B
+
+‘DarkMagenta ’
+0x8B008B
+
+‘DarkOliveGreen ’
+0x556B2F
+
+‘Darkorange ’
+0xFF8C00
+
+‘DarkOrchid ’
+0x9932CC
+
+‘DarkRed ’
+0x8B0000
+
+‘DarkSalmon ’
+0xE9967A
+
+‘DarkSeaGreen ’
+0x8FBC8F
+
+‘DarkSlateBlue ’
+0x483D8B
+
+‘DarkSlateGray ’
+0x2F4F4F
+
+‘DarkTurquoise ’
+0x00CED1
+
+‘DarkViolet ’
+0x9400D3
+
+‘DeepPink ’
+0xFF1493
+
+‘DeepSkyBlue ’
+0x00BFFF
+
+‘DimGray ’
+0x696969
+
+‘DodgerBlue ’
+0x1E90FF
+
+‘FireBrick ’
+0xB22222
+
+‘FloralWhite ’
+0xFFFAF0
+
+‘ForestGreen ’
+0x228B22
+
+‘Fuchsia ’
+0xFF00FF
+
+‘Gainsboro ’
+0xDCDCDC
+
+‘GhostWhite ’
+0xF8F8FF
+
+‘Gold ’
+0xFFD700
+
+‘GoldenRod ’
+0xDAA520
+
+‘Gray ’
+0x808080
+
+‘Green ’
+0x008000
+
+‘GreenYellow ’
+0xADFF2F
+
+‘HoneyDew ’
+0xF0FFF0
+
+‘HotPink ’
+0xFF69B4
+
+‘IndianRed ’
+0xCD5C5C
+
+‘Indigo ’
+0x4B0082
+
+‘Ivory ’
+0xFFFFF0
+
+‘Khaki ’
+0xF0E68C
+
+‘Lavender ’
+0xE6E6FA
+
+‘LavenderBlush ’
+0xFFF0F5
+
+‘LawnGreen ’
+0x7CFC00
+
+‘LemonChiffon ’
+0xFFFACD
+
+‘LightBlue ’
+0xADD8E6
+
+‘LightCoral ’
+0xF08080
+
+‘LightCyan ’
+0xE0FFFF
+
+‘LightGoldenRodYellow ’
+0xFAFAD2
+
+‘LightGreen ’
+0x90EE90
+
+‘LightGrey ’
+0xD3D3D3
+
+‘LightPink ’
+0xFFB6C1
+
+‘LightSalmon ’
+0xFFA07A
+
+‘LightSeaGreen ’
+0x20B2AA
+
+‘LightSkyBlue ’
+0x87CEFA
+
+‘LightSlateGray ’
+0x778899
+
+‘LightSteelBlue ’
+0xB0C4DE
+
+‘LightYellow ’
+0xFFFFE0
+
+‘Lime ’
+0x00FF00
+
+‘LimeGreen ’
+0x32CD32
+
+‘Linen ’
+0xFAF0E6
+
+‘Magenta ’
+0xFF00FF
+
+‘Maroon ’
+0x800000
+
+‘MediumAquaMarine ’
+0x66CDAA
+
+‘MediumBlue ’
+0x0000CD
+
+‘MediumOrchid ’
+0xBA55D3
+
+‘MediumPurple ’
+0x9370D8
+
+‘MediumSeaGreen ’
+0x3CB371
+
+‘MediumSlateBlue ’
+0x7B68EE
+
+‘MediumSpringGreen ’
+0x00FA9A
+
+‘MediumTurquoise ’
+0x48D1CC
+
+‘MediumVioletRed ’
+0xC71585
+
+‘MidnightBlue ’
+0x191970
+
+‘MintCream ’
+0xF5FFFA
+
+‘MistyRose ’
+0xFFE4E1
+
+‘Moccasin ’
+0xFFE4B5
+
+‘NavajoWhite ’
+0xFFDEAD
+
+‘Navy ’
+0x000080
+
+‘OldLace ’
+0xFDF5E6
+
+‘Olive ’
+0x808000
+
+‘OliveDrab ’
+0x6B8E23
+
+‘Orange ’
+0xFFA500
+
+‘OrangeRed ’
+0xFF4500
+
+‘Orchid ’
+0xDA70D6
+
+‘PaleGoldenRod ’
+0xEEE8AA
+
+‘PaleGreen ’
+0x98FB98
+
+‘PaleTurquoise ’
+0xAFEEEE
+
+‘PaleVioletRed ’
+0xD87093
+
+‘PapayaWhip ’
+0xFFEFD5
+
+‘PeachPuff ’
+0xFFDAB9
+
+‘Peru ’
+0xCD853F
+
+‘Pink ’
+0xFFC0CB
+
+‘Plum ’
+0xDDA0DD
+
+‘PowderBlue ’
+0xB0E0E6
+
+‘Purple ’
+0x800080
+
+‘Red ’
+0xFF0000
+
+‘RosyBrown ’
+0xBC8F8F
+
+‘RoyalBlue ’
+0x4169E1
+
+‘SaddleBrown ’
+0x8B4513
+
+‘Salmon ’
+0xFA8072
+
+‘SandyBrown ’
+0xF4A460
+
+‘SeaGreen ’
+0x2E8B57
+
+‘SeaShell ’
+0xFFF5EE
+
+‘Sienna ’
+0xA0522D
+
+‘Silver ’
+0xC0C0C0
+
+‘SkyBlue ’
+0x87CEEB
+
+‘SlateBlue ’
+0x6A5ACD
+
+‘SlateGray ’
+0x708090
+
+‘Snow ’
+0xFFFAFA
+
+‘SpringGreen ’
+0x00FF7F
+
+‘SteelBlue ’
+0x4682B4
+
+‘Tan ’
+0xD2B48C
+
+‘Teal ’
+0x008080
+
+‘Thistle ’
+0xD8BFD8
+
+‘Tomato ’
+0xFF6347
+
+‘Turquoise ’
+0x40E0D0
+
+‘Violet ’
+0xEE82EE
+
+‘Wheat ’
+0xF5DEB3
+
+‘White ’
+0xFFFFFF
+
+‘WhiteSmoke ’
+0xF5F5F5
+
+‘Yellow ’
+0xFFFF00
+
+‘YellowGreen ’
+0x9ACD32
+
+
+
+
+
2.8 Channel Layout# TOC
+
+
A channel layout specifies the spatial disposition of the channels in
+a multi-channel audio stream. To specify a channel layout, FFmpeg
+makes use of a special syntax.
+
+
Individual channels are identified by an id, as given by the table
+below:
+
+‘FL ’
+front left
+
+‘FR ’
+front right
+
+‘FC ’
+front center
+
+‘LFE ’
+low frequency
+
+‘BL ’
+back left
+
+‘BR ’
+back right
+
+‘FLC ’
+front left-of-center
+
+‘FRC ’
+front right-of-center
+
+‘BC ’
+back center
+
+‘SL ’
+side left
+
+‘SR ’
+side right
+
+‘TC ’
+top center
+
+‘TFL ’
+top front left
+
+‘TFC ’
+top front center
+
+‘TFR ’
+top front right
+
+‘TBL ’
+top back left
+
+‘TBC ’
+top back center
+
+‘TBR ’
+top back right
+
+‘DL ’
+downmix left
+
+‘DR ’
+downmix right
+
+‘WL ’
+wide left
+
+‘WR ’
+wide right
+
+‘SDL ’
+surround direct left
+
+‘SDR ’
+surround direct right
+
+‘LFE2 ’
+low frequency 2
+
+
+
+
Standard channel layout compositions can be specified by using the
+following identifiers:
+
+‘mono ’
+FC
+
+‘stereo ’
+FL+FR
+
+‘2.1 ’
+FL+FR+LFE
+
+‘3.0 ’
+FL+FR+FC
+
+‘3.0(back) ’
+FL+FR+BC
+
+‘4.0 ’
+FL+FR+FC+BC
+
+‘quad ’
+FL+FR+BL+BR
+
+‘quad(side) ’
+FL+FR+SL+SR
+
+‘3.1 ’
+FL+FR+FC+LFE
+
+‘5.0 ’
+FL+FR+FC+BL+BR
+
+‘5.0(side) ’
+FL+FR+FC+SL+SR
+
+‘4.1 ’
+FL+FR+FC+LFE+BC
+
+‘5.1 ’
+FL+FR+FC+LFE+BL+BR
+
+‘5.1(side) ’
+FL+FR+FC+LFE+SL+SR
+
+‘6.0 ’
+FL+FR+FC+BC+SL+SR
+
+‘6.0(front) ’
+FL+FR+FLC+FRC+SL+SR
+
+‘hexagonal ’
+FL+FR+FC+BL+BR+BC
+
+‘6.1 ’
+FL+FR+FC+LFE+BC+SL+SR
+
+‘6.1 ’
+FL+FR+FC+LFE+BL+BR+BC
+
+‘6.1(front) ’
+FL+FR+LFE+FLC+FRC+SL+SR
+
+‘7.0 ’
+FL+FR+FC+BL+BR+SL+SR
+
+‘7.0(front) ’
+FL+FR+FC+FLC+FRC+SL+SR
+
+‘7.1 ’
+FL+FR+FC+LFE+BL+BR+SL+SR
+
+‘7.1(wide) ’
+FL+FR+FC+LFE+BL+BR+FLC+FRC
+
+‘7.1(wide-side) ’
+FL+FR+FC+LFE+FLC+FRC+SL+SR
+
+‘octagonal ’
+FL+FR+FC+BL+BR+BC+SL+SR
+
+‘downmix ’
+DL+DR
+
+
+
+
A custom channel layout can be specified as a sequence of terms, separated by
+’+’ or ’|’. Each term can be:
+
+ the name of a standard channel layout (e.g. ‘mono ’,
+‘stereo ’, ‘4.0 ’, ‘quad ’, ‘5.0 ’, etc.)
+
+ the name of a single channel (e.g. ‘FL ’, ‘FR ’, ‘FC ’, ‘LFE ’, etc.)
+
+ a number of channels, in decimal, optionally followed by ’c’, yielding
+the default channel layout for that number of channels (see the
+function av_get_default_channel_layout
)
+
+ a channel layout mask, in hexadecimal starting with "0x" (see the
+AV_CH_*
macros in libavutil/channel_layout.h .
+
+
+
Starting from libavutil version 53 the trailing character "c" to
+specify a number of channels will be required, while a channel layout
+mask could also be specified as a decimal number (if and only if not
+followed by "c").
+
+
See also the function av_get_channel_layout
defined in
+libavutil/channel_layout.h .
+
+
+
3 Expression Evaluation# TOC
+
+
When evaluating an arithmetic expression, FFmpeg uses an internal
+formula evaluator, implemented through the libavutil/eval.h
+interface.
+
+
An expression may contain unary, binary operators, constants, and
+functions.
+
+
Two expressions expr1 and expr2 can be combined to form
+another expression "expr1 ;expr2 ".
+expr1 and expr2 are evaluated in turn, and the new
+expression evaluates to the value of expr2 .
+
+
The following binary operators are available: +
, -
,
+*
, /
, ^
.
+
+
The following unary operators are available: +
, -
.
+
+
The following functions are available:
+
+abs(x)
+Compute absolute value of x .
+
+
+acos(x)
+Compute arccosine of x .
+
+
+asin(x)
+Compute arcsine of x .
+
+
+atan(x)
+Compute arctangent of x .
+
+
+between(x, min, max)
+Return 1 if x is greater than or equal to min and lesser than or
+equal to max , 0 otherwise.
+
+
+bitand(x, y)
+bitor(x, y)
+Compute bitwise and/or operation on x and y .
+
+The results of the evaluation of x and y are converted to
+integers before executing the bitwise operation.
+
+Note that both the conversion to integer and the conversion back to
+floating point can lose precision. Beware of unexpected results for
+large numbers (usually 2^53 and larger).
+
+
+ceil(expr)
+Round the value of expression expr upwards to the nearest
+integer. For example, "ceil(1.5)" is "2.0".
+
+
+clip(x, min, max)
+Return the value of x clipped between min and max .
+
+
+cos(x)
+Compute cosine of x .
+
+
+cosh(x)
+Compute hyperbolic cosine of x .
+
+
+eq(x, y)
+Return 1 if x and y are equivalent, 0 otherwise.
+
+
+exp(x)
+Compute exponential of x (with base e
, the Euler’s number).
+
+
+floor(expr)
+Round the value of expression expr downwards to the nearest
+integer. For example, "floor(-1.5)" is "-2.0".
+
+
+gauss(x)
+Compute Gauss function of x , corresponding to
+exp(-x*x/2) / sqrt(2*PI)
.
+
+
+gcd(x, y)
+Return the greatest common divisor of x and y . If both x and
+y are 0 or either or both are less than zero then behavior is undefined.
+
+
+gt(x, y)
+Return 1 if x is greater than y , 0 otherwise.
+
+
+gte(x, y)
+Return 1 if x is greater than or equal to y , 0 otherwise.
+
+
+hypot(x, y)
+This function is similar to the C function with the same name; it returns
+"sqrt(x *x + y *y )", the length of the hypotenuse of a
+right triangle with sides of length x and y , or the distance of the
+point (x , y ) from the origin.
+
+
+if(x, y)
+Evaluate x , and if the result is non-zero return the result of
+the evaluation of y , return 0 otherwise.
+
+
+if(x, y, z)
+Evaluate x , and if the result is non-zero return the evaluation
+result of y , otherwise the evaluation result of z .
+
+
+ifnot(x, y)
+Evaluate x , and if the result is zero return the result of the
+evaluation of y , return 0 otherwise.
+
+
+ifnot(x, y, z)
+Evaluate x , and if the result is zero return the evaluation
+result of y , otherwise the evaluation result of z .
+
+
+isinf(x)
+Return 1.0 if x is +/-INFINITY, 0.0 otherwise.
+
+
+isnan(x)
+Return 1.0 if x is NAN, 0.0 otherwise.
+
+
+ld(var)
+Allow to load the value of the internal variable with number
+var , which was previously stored with st(var , expr ).
+The function returns the loaded value.
+
+
+log(x)
+Compute natural logarithm of x .
+
+
+lt(x, y)
+Return 1 if x is lesser than y , 0 otherwise.
+
+
+lte(x, y)
+Return 1 if x is lesser than or equal to y , 0 otherwise.
+
+
+max(x, y)
+Return the maximum between x and y .
+
+
+min(x, y)
+Return the maximum between x and y .
+
+
+mod(x, y)
+Compute the remainder of division of x by y .
+
+
+not(expr)
+Return 1.0 if expr is zero, 0.0 otherwise.
+
+
+pow(x, y)
+Compute the power of x elevated y , it is equivalent to
+"(x )^(y )".
+
+
+print(t)
+print(t, l)
+Print the value of expression t with loglevel l . If
+l is not specified then a default log level is used.
+Returns the value of the expression printed.
+
+Prints t with loglevel l
+
+
+random(x)
+Return a pseudo random value between 0.0 and 1.0. x is the index of the
+internal variable which will be used to save the seed/state.
+
+
+root(expr, max)
+Find an input value for which the function represented by expr
+with argument ld(0) is 0 in the interval 0..max .
+
+The expression in expr must denote a continuous function or the
+result is undefined.
+
+ld(0) is used to represent the function input value, which means
+that the given expression will be evaluated multiple times with
+various input values that the expression can access through
+ld(0)
. When the expression evaluates to 0 then the
+corresponding input value will be returned.
+
+
+sin(x)
+Compute sine of x .
+
+
+sinh(x)
+Compute hyperbolic sine of x .
+
+
+sqrt(expr)
+Compute the square root of expr . This is equivalent to
+"(expr )^.5".
+
+
+squish(x)
+Compute expression 1/(1 + exp(4*x))
.
+
+
+st(var, expr)
+Allow to store the value of the expression expr in an internal
+variable. var specifies the number of the variable where to
+store the value, and it is a value ranging from 0 to 9. The function
+returns the value stored in the internal variable.
+Note, Variables are currently not shared between expressions.
+
+
+tan(x)
+Compute tangent of x .
+
+
+tanh(x)
+Compute hyperbolic tangent of x .
+
+
+taylor(expr, x)
+taylor(expr, x, id)
+Evaluate a Taylor series at x , given an expression representing
+the ld(id)
-th derivative of a function at 0.
+
+When the series does not converge the result is undefined.
+
+ld(id) is used to represent the derivative order in expr ,
+which means that the given expression will be evaluated multiple times
+with various input values that the expression can access through
+ld(id)
. If id is not specified then 0 is assumed.
+
+Note, when you have the derivatives at y instead of 0,
+taylor(expr, x-y)
can be used.
+
+
+time(0)
+Return the current (wallclock) time in seconds.
+
+
+trunc(expr)
+Round the value of expression expr towards zero to the nearest
+integer. For example, "trunc(-1.5)" is "-1.0".
+
+
+while(cond, expr)
+Evaluate expression expr while the expression cond is
+non-zero, and returns the value of the last expr evaluation, or
+NAN if cond was always false.
+
+
+
+
The following constants are available:
+
+PI
+area of the unit disc, approximately 3.14
+
+E
+exp(1) (Euler’s number), approximately 2.718
+
+PHI
+golden ratio (1+sqrt(5))/2, approximately 1.618
+
+
+
+
Assuming that an expression is considered "true" if it has a non-zero
+value, note that:
+
+
*
works like AND
+
+
+
works like OR
+
+
For example the construct:
+
+
is equivalent to:
+
+
+
In your C code, you can extend the list of unary and binary functions,
+and define recognized constants, so that they are available for your
+expressions.
+
+
The evaluator also recognizes the International System unit prefixes.
+If ’i’ is appended after the prefix, binary prefixes are used, which
+are based on powers of 1024 instead of powers of 1000.
+The ’B’ postfix multiplies the value by 8, and can be appended after a
+unit prefix or used alone. This allows using for example ’KB’, ’MiB’,
+’G’ and ’B’ as number postfix.
+
+
The list of available International System prefixes follows, with
+indication of the corresponding powers of 10 and of 2.
+
+y
+10^-24 / 2^-80
+
+z
+10^-21 / 2^-70
+
+a
+10^-18 / 2^-60
+
+f
+10^-15 / 2^-50
+
+p
+10^-12 / 2^-40
+
+n
+10^-9 / 2^-30
+
+u
+10^-6 / 2^-20
+
+m
+10^-3 / 2^-10
+
+c
+10^-2
+
+d
+10^-1
+
+h
+10^2
+
+k
+10^3 / 2^10
+
+K
+10^3 / 2^10
+
+M
+10^6 / 2^20
+
+G
+10^9 / 2^30
+
+T
+10^12 / 2^40
+
+P
+10^15 / 2^40
+
+E
+10^18 / 2^50
+
+Z
+10^21 / 2^60
+
+Y
+10^24 / 2^70
+
+
+
+
+
+
4 OpenCL Options# TOC
+
+
When FFmpeg is configured with --enable-opencl
, it is possible
+to set the options for the global OpenCL context.
+
+
The list of supported options follows:
+
+
+build_options
+Set build options used to compile the registered kernels.
+
+See reference "OpenCL Specification Version: 1.2 chapter 5.6.4".
+
+
+platform_idx
+Select the index of the platform to run OpenCL code.
+
+The specified index must be one of the indexes in the device list
+which can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
+
+
+device_idx
+Select the index of the device used to run OpenCL code.
+
+The specified index must be one of the indexes in the device list which
+can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
+
+
+
+
+
+
+
5 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+libavutil
+
+
+
+
6 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/ffmpeg.html b/Externals/ffmpeg/dev/doc/ffmpeg.html
new file mode 100644
index 0000000000..d7524f0917
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/ffmpeg.html
@@ -0,0 +1,2109 @@
+
+
+
+
+
+
+ ffmpeg Documentation
+
+
+
+
+
+
+
+
+ ffmpeg Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Synopsis# TOC
+
+
ffmpeg [global_options ] {[input_file_options ] -i input_file } ... {[output_file_options ] output_file } ...
+
+
+
2 Description# TOC
+
+
ffmpeg
is a very fast video and audio converter that can also grab from
+a live audio/video source. It can also convert between arbitrary sample
+rates and resize video on the fly with a high quality polyphase filter.
+
+
ffmpeg
reads from an arbitrary number of input "files" (which can be regular
+files, pipes, network streams, grabbing devices, etc.), specified by the
+-i
option, and writes to an arbitrary number of output "files", which are
+specified by a plain output filename. Anything found on the command line which
+cannot be interpreted as an option is considered to be an output filename.
+
+
Each input or output file can, in principle, contain any number of streams of
+different types (video/audio/subtitle/attachment/data). The allowed number and/or
+types of streams may be limited by the container format. Selecting which
+streams from which inputs will go into which output is either done automatically
+or with the -map
option (see the Stream selection chapter).
+
+
To refer to input files in options, you must use their indices (0-based). E.g.
+the first input file is 0
, the second is 1
, etc. Similarly, streams
+within a file are referred to by their indices. E.g. 2:3
refers to the
+fourth stream in the third input file. Also see the Stream specifiers chapter.
+
+
As a general rule, options are applied to the next specified
+file. Therefore, order is important, and you can have the same
+option on the command line multiple times. Each occurrence is
+then applied to the next input or output file.
+Exceptions from this rule are the global options (e.g. verbosity level),
+which should be specified first.
+
+
Do not mix input and output files – first specify all input files, then all
+output files. Also do not mix options which belong to different files. All
+options apply ONLY to the next input or output file and are reset between files.
+
+
+ To set the video bitrate of the output file to 64 kbit/s:
+
+
ffmpeg -i input.avi -b:v 64k -bufsize 64k output.avi
+
+
+ To force the frame rate of the output file to 24 fps:
+
+
ffmpeg -i input.avi -r 24 output.avi
+
+
+ To force the frame rate of the input file (valid for raw formats only)
+to 1 fps and the frame rate of the output file to 24 fps:
+
+
ffmpeg -r 1 -i input.m2v -r 24 output.avi
+
+
+
+
The format option may be needed for raw input files.
+
+
+
+
3 Detailed description# TOC
+
+
The transcoding process in ffmpeg
for each output can be described by
+the following diagram:
+
+
+
_______ ______________
+| | | |
+| input | demuxer | encoded data | decoder
+| file | ---------> | packets | -----+
+|_______| |______________| |
+ v
+ _________
+ | |
+ | decoded |
+ | frames |
+ |_________|
+ ________ ______________ |
+| | | | |
+| output | <-------- | encoded data | <----+
+| file | muxer | packets | encoder
+|________| |______________|
+
+
+
+
+
ffmpeg
calls the libavformat library (containing demuxers) to read
+input files and get packets containing encoded data from them. When there are
+multiple input files, ffmpeg
tries to keep them synchronized by
+tracking lowest timestamp on any active input stream.
+
+
Encoded packets are then passed to the decoder (unless streamcopy is selected
+for the stream, see further for a description). The decoder produces
+uncompressed frames (raw video/PCM audio/...) which can be processed further by
+filtering (see next section). After filtering, the frames are passed to the
+encoder, which encodes them and outputs encoded packets. Finally those are
+passed to the muxer, which writes the encoded packets to the output file.
+
+
+
3.1 Filtering# TOC
+
Before encoding, ffmpeg
can process raw audio and video frames using
+filters from the libavfilter library. Several chained filters form a filter
+graph. ffmpeg
distinguishes between two types of filtergraphs:
+simple and complex.
+
+
+
3.1.1 Simple filtergraphs# TOC
+
Simple filtergraphs are those that have exactly one input and output, both of
+the same type. In the above diagram they can be represented by simply inserting
+an additional step between decoding and encoding:
+
+
+
_________ ______________
+| | | |
+| decoded | | encoded data |
+| frames |\ _ | packets |
+|_________| \ /||______________|
+ \ __________ /
+ simple _\|| | / encoder
+ filtergraph | filtered |/
+ | frames |
+ |__________|
+
+
+
+
Simple filtergraphs are configured with the per-stream -filter option
+(with -vf and -af aliases for video and audio respectively).
+A simple filtergraph for video can look for example like this:
+
+
+
_______ _____________ _______ ________
+| | | | | | | |
+| input | ---> | deinterlace | ---> | scale | ---> | output |
+|_______| |_____________| |_______| |________|
+
+
+
+
Note that some filters change frame properties but not frame contents. E.g. the
+fps
filter in the example above changes number of frames, but does not
+touch the frame contents. Another example is the setpts
filter, which
+only sets timestamps and otherwise passes the frames unchanged.
+
+
+
3.1.2 Complex filtergraphs# TOC
+
Complex filtergraphs are those which cannot be described as simply a linear
+processing chain applied to one stream. This is the case, for example, when the graph has
+more than one input and/or output, or when output stream type is different from
+input. They can be represented with the following diagram:
+
+
+
_________
+| |
+| input 0 |\ __________
+|_________| \ | |
+ \ _________ /| output 0 |
+ \ | | / |__________|
+ _________ \| complex | /
+| | | |/
+| input 1 |---->| filter |\
+|_________| | | \ __________
+ /| graph | \ | |
+ / | | \| output 1 |
+ _________ / |_________| |__________|
+| | /
+| input 2 |/
+|_________|
+
+
+
+
Complex filtergraphs are configured with the -filter_complex option.
+Note that this option is global, since a complex filtergraph, by its nature,
+cannot be unambiguously associated with a single stream or file.
+
+
The -lavfi option is equivalent to -filter_complex .
+
+
A trivial example of a complex filtergraph is the overlay
filter, which
+has two video inputs and one video output, containing one video overlaid on top
+of the other. Its audio counterpart is the amix
filter.
+
+
+
3.2 Stream copy# TOC
+
Stream copy is a mode selected by supplying the copy
parameter to the
+-codec option. It makes ffmpeg
omit the decoding and encoding
+step for the specified stream, so it does only demuxing and muxing. It is useful
+for changing the container format or modifying container-level metadata. The
+diagram above will, in this case, simplify to this:
+
+
+
_______ ______________ ________
+| | | | | |
+| input | demuxer | encoded data | muxer | output |
+| file | ---------> | packets | -------> | file |
+|_______| |______________| |________|
+
+
+
+
Since there is no decoding or encoding, it is very fast and there is no quality
+loss. However, it might not work in some cases because of many factors. Applying
+filters is obviously also impossible, since filters work on uncompressed data.
+
+
+
+
4 Stream selection# TOC
+
+
By default, ffmpeg
includes only one stream of each type (video, audio, subtitle)
+present in the input files and adds them to each output file. It picks the
+"best" of each based upon the following criteria: for video, it is the stream
+with the highest resolution, for audio, it is the stream with the most channels, for
+subtitles, it is the first subtitle stream. In the case where several streams of
+the same type rate equally, the stream with the lowest index is chosen.
+
+
You can disable some of those defaults by using the -vn/-an/-sn
options. For
+full manual control, use the -map
option, which disables the defaults just
+described.
+
+
+
+
5 Options# TOC
+
+
All the numerical options, if not specified otherwise, accept a string
+representing a number as input, which may be followed by one of the SI
+unit prefixes, for example: ’K’, ’M’, or ’G’.
+
+
If ’i’ is appended to the SI unit prefix, the complete prefix will be
+interpreted as a unit prefix for binary multiples, which are based on
+powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
+prefix multiplies the value by 8. This allows using, for example:
+’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
+
+
Options which do not take arguments are boolean options, and set the
+corresponding value to true. They can be set to false by prefixing
+the option name with "no". For example using "-nofoo"
+will set the boolean option with name "foo" to false.
+
+
+
5.1 Stream specifiers# TOC
+
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
+are used to precisely specify which stream(s) a given option belongs to.
+
+
A stream specifier is a string generally appended to the option name and
+separated from it by a colon. E.g. -codec:a:1 ac3
contains the
+a:1
stream specifier, which matches the second audio stream. Therefore, it
+would select the ac3 codec for the second audio stream.
+
+
A stream specifier can match several streams, so that the option is applied to all
+of them. E.g. the stream specifier in -b:a 128k
matches all audio
+streams.
+
+
An empty stream specifier matches all streams. For example, -codec copy
+or -codec: copy
would copy all the streams without reencoding.
+
+
Possible forms of stream specifiers are:
+
+stream_index
+Matches the stream with this index. E.g. -threads:1 4
would set the
+thread count for the second stream to 4.
+
+stream_type [:stream_index ]
+stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
+’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
+stream number stream_index of this type. Otherwise, it matches all
+streams of this type.
+
+p:program_id [:stream_index ]
+If stream_index is given, then it matches the stream with number stream_index
+in the program with the id program_id . Otherwise, it matches all streams in the
+program.
+
+#stream_id or i:stream_id
+Match the stream by stream id (e.g. PID in MPEG-TS container).
+
+m:key [:value ]
+Matches streams with the metadata tag key having the specified value. If
+value is not given, matches streams that contain the given tag with any
+value.
+
+Note that in ffmpeg
, matching by metadata will only work properly for
+input files.
+
+
+
+
+
5.2 Generic options# TOC
+
+
These options are shared amongst the ff* tools.
+
+
+-L
+Show license.
+
+
+-h, -?, -help, --help [arg ]
+Show help. An optional parameter may be specified to print help about a specific
+item. If no argument is specified, only basic (non advanced) tool
+options are shown.
+
+Possible values of arg are:
+
+long
+Print advanced tool options in addition to the basic tool options.
+
+
+full
+Print complete list of options, including shared and private options
+for encoders, decoders, demuxers, muxers, filters, etc.
+
+
+decoder=decoder_name
+Print detailed information about the decoder named decoder_name . Use the
+-decoders option to get a list of all decoders.
+
+
+encoder=encoder_name
+Print detailed information about the encoder named encoder_name . Use the
+-encoders option to get a list of all encoders.
+
+
+demuxer=demuxer_name
+Print detailed information about the demuxer named demuxer_name . Use the
+-formats option to get a list of all demuxers and muxers.
+
+
+muxer=muxer_name
+Print detailed information about the muxer named muxer_name . Use the
+-formats option to get a list of all muxers and demuxers.
+
+
+filter=filter_name
+Print detailed information about the filter name filter_name . Use the
+-filters option to get a list of all filters.
+
+
+
+
+-version
+Show version.
+
+
+-formats
+Show available formats (including devices).
+
+
+-devices
+Show available devices.
+
+
+-codecs
+Show all codecs known to libavcodec.
+
+Note that the term ’codec’ is used throughout this documentation as a shortcut
+for what is more correctly called a media bitstream format.
+
+
+-decoders
+Show available decoders.
+
+
+-encoders
+Show all available encoders.
+
+
+-bsfs
+Show available bitstream filters.
+
+
+-protocols
+Show available protocols.
+
+
+-filters
+Show available libavfilter filters.
+
+
+-pix_fmts
+Show available pixel formats.
+
+
+-sample_fmts
+Show available sample formats.
+
+
+-layouts
+Show channel names and standard channel layouts.
+
+
+-colors
+Show recognized color names.
+
+
+-sources device [,opt1 =val1 [,opt2 =val2 ]...]
+Show autodetected sources of the intput device.
+Some devices may provide system-dependent source names that cannot be autodetected.
+The returned list cannot be assumed to be always complete.
+
+
ffmpeg -sources pulse,server=192.168.0.4
+
+
+
+-sinks device [,opt1 =val1 [,opt2 =val2 ]...]
+Show autodetected sinks of the output device.
+Some devices may provide system-dependent sink names that cannot be autodetected.
+The returned list cannot be assumed to be always complete.
+
+
ffmpeg -sinks pulse,server=192.168.0.4
+
+
+
+-loglevel [repeat+]loglevel | -v [repeat+]loglevel
+Set the logging level used by the library.
+Adding "repeat+" indicates that repeated log output should not be compressed
+to the first line and the "Last message repeated n times" line will be
+omitted. "repeat" can also be used alone.
+If "repeat" is used alone, and with no prior loglevel set, the default
+loglevel will be used. If multiple loglevel parameters are given, using
+’repeat’ will not change the loglevel.
+loglevel is a string or a number containing one of the following values:
+
+‘quiet, -8 ’
+Show nothing at all; be silent.
+
+‘panic, 0 ’
+Only show fatal errors which could lead the process to crash, such as
+and assert failure. This is not currently used for anything.
+
+‘fatal, 8 ’
+Only show fatal errors. These are errors after which the process absolutely
+cannot continue after.
+
+‘error, 16 ’
+Show all errors, including ones which can be recovered from.
+
+‘warning, 24 ’
+Show all warnings and errors. Any message related to possibly
+incorrect or unexpected events will be shown.
+
+‘info, 32 ’
+Show informative messages during processing. This is in addition to
+warnings and errors. This is the default value.
+
+‘verbose, 40 ’
+Same as info
, except more verbose.
+
+‘debug, 48 ’
+Show everything, including debugging information.
+
+
+
+By default the program logs to stderr, if coloring is supported by the
+terminal, colors are used to mark errors and warnings. Log coloring
+can be disabled setting the environment variable
+AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
+the environment variable AV_LOG_FORCE_COLOR
.
+The use of the environment variable NO_COLOR
is deprecated and
+will be dropped in a following FFmpeg version.
+
+
+-report
+Dump full command line and console output to a file named
+program -YYYYMMDD -HHMMSS .log
in the current
+directory.
+This file can be useful for bug reports.
+It also implies -loglevel verbose
.
+
+Setting the environment variable FFREPORT
to any value has the
+same effect. If the value is a ’:’-separated key=value sequence, these
+options will affect the report; option values must be escaped if they
+contain special characters or the options delimiter ’:’ (see the
+“Quoting and escaping” section in the ffmpeg-utils manual).
+
+The following options are recognized:
+
+file
+set the file name to use for the report; %p
is expanded to the name
+of the program, %t
is expanded to a timestamp, %%
is expanded
+to a plain %
+
+level
+set the log verbosity level using a numerical value (see -loglevel
).
+
+
+
+For example, to output a report to a file named ffreport.log
+using a log level of 32
(alias for log level info
):
+
+
+
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
+
+
+Errors in parsing the environment variable are not fatal, and will not
+appear in the report.
+
+
+-hide_banner
+Suppress printing banner.
+
+All FFmpeg tools will normally show a copyright notice, build options
+and library versions. This option can be used to suppress printing
+this information.
+
+
+-cpuflags flags (global )
+Allows setting and clearing cpu flags. This option is intended
+for testing. Do not use it unless you know what you’re doing.
+
+
ffmpeg -cpuflags -sse+mmx ...
+ffmpeg -cpuflags mmx ...
+ffmpeg -cpuflags 0 ...
+
+Possible flags for this option are:
+
+‘x86 ’
+
+‘mmx ’
+‘mmxext ’
+‘sse ’
+‘sse2 ’
+‘sse2slow ’
+‘sse3 ’
+‘sse3slow ’
+‘ssse3 ’
+‘atom ’
+‘sse4.1 ’
+‘sse4.2 ’
+‘avx ’
+‘xop ’
+‘fma4 ’
+‘3dnow ’
+‘3dnowext ’
+‘cmov ’
+
+
+‘ARM ’
+
+‘armv5te ’
+‘armv6 ’
+‘armv6t2 ’
+‘vfp ’
+‘vfpv3 ’
+‘neon ’
+
+
+‘PowerPC ’
+
+‘altivec ’
+
+
+‘Specific Processors ’
+
+‘pentium2 ’
+‘pentium3 ’
+‘pentium4 ’
+‘k6 ’
+‘k62 ’
+‘athlon ’
+‘athlonxp ’
+‘k8 ’
+
+
+
+
+
+-opencl_bench
+Benchmark all available OpenCL devices and show the results. This option
+is only available when FFmpeg has been compiled with --enable-opencl
.
+
+
+-opencl_options options (global )
+Set OpenCL environment options. This option is only available when
+FFmpeg has been compiled with --enable-opencl
.
+
+options must be a list of key =value option pairs
+separated by ’:’. See the “OpenCL Options” section in the
+ffmpeg-utils manual for the list of supported options.
+
+
+
+
+
5.3 AVOptions# TOC
+
+
These options are provided directly by the libavformat, libavdevice and
+libavcodec libraries. To see the list of available AVOptions, use the
+-help option. They are separated into two categories:
+
+generic
+These options can be set for any container, codec or device. Generic options
+are listed under AVFormatContext options for containers/devices and under
+AVCodecContext options for codecs.
+
+private
+These options are specific to the given container, device or codec. Private
+options are listed under their corresponding containers/devices/codecs.
+
+
+
+
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
+an MP3 file, use the id3v2_version private option of the MP3
+muxer:
+
+
ffmpeg -i input.flac -id3v2_version 3 out.mp3
+
+
+
All codec AVOptions are per-stream, and thus a stream specifier
+should be attached to them.
+
+
Note: the -nooption syntax cannot be used for boolean
+AVOptions, use -option 0 /-option 1 .
+
+
Note: the old undocumented way of specifying per-stream AVOptions by
+prepending v/a/s to the options name is now obsolete and will be
+removed soon.
+
+
+
5.4 Main options# TOC
+
+
+-f fmt (input/output )
+Force input or output file format. The format is normally auto detected for input
+files and guessed from the file extension for output files, so this option is not
+needed in most cases.
+
+
+-i filename (input )
+input file name
+
+
+-y (global )
+Overwrite output files without asking.
+
+
+-n (global )
+Do not overwrite output files, and exit immediately if a specified
+output file already exists.
+
+
+-c[:stream_specifier ] codec (input/output,per-stream )
+-codec[:stream_specifier ] codec (input/output,per-stream )
+Select an encoder (when used before an output file) or a decoder (when used
+before an input file) for one or more streams. codec is the name of a
+decoder/encoder or a special value copy
(output only) to indicate that
+the stream is not to be re-encoded.
+
+For example
+
+
ffmpeg -i INPUT -map 0 -c:v libx264 -c:a copy OUTPUT
+
+encodes all video streams with libx264 and copies all audio streams.
+
+For each stream, the last matching c
option is applied, so
+
+
ffmpeg -i INPUT -map 0 -c copy -c:v:1 libx264 -c:a:137 libvorbis OUTPUT
+
+will copy all the streams except the second video, which will be encoded with
+libx264, and the 138th audio, which will be encoded with libvorbis.
+
+
+-t duration (input/output )
+When used as an input option (before -i
), limit the duration of
+data read from the input file.
+
+When used as an output option (before an output filename), stop writing the
+output after its duration reaches duration .
+
+duration may be a number in seconds, or in hh:mm:ss[.xxx]
form.
+
+-to and -t are mutually exclusive and -t has priority.
+
+
+-to position (output )
+Stop writing the output at position .
+position may be a number in seconds, or in hh:mm:ss[.xxx]
form.
+
+-to and -t are mutually exclusive and -t has priority.
+
+
+-fs limit_size (output )
+Set the file size limit, expressed in bytes.
+
+
+-ss position (input/output )
+When used as an input option (before -i
), seeks in this input file to
+position . Note the in most formats it is not possible to seek exactly, so
+ffmpeg
will seek to the closest seek point before position .
+When transcoding and -accurate_seek is enabled (the default), this
+extra segment between the seek point and position will be decoded and
+discarded. When doing stream copy or when -noaccurate_seek is used, it
+will be preserved.
+
+When used as an output option (before an output filename), decodes but discards
+input until the timestamps reach position .
+
+position may be either in seconds or in hh:mm:ss[.xxx]
form.
+
+
+-itsoffset offset (input )
+Set the input time offset.
+
+offset must be a time duration specification,
+see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
+
+The offset is added to the timestamps of the input files. Specifying
+a positive offset means that the corresponding streams are delayed by
+the time duration specified in offset .
+
+
+-timestamp date (output )
+Set the recording timestamp in the container.
+
+date must be a time duration specification,
+see (ffmpeg-utils)the Date section in the ffmpeg-utils(1) manual .
+
+
+-metadata[:metadata_specifier] key =value (output,per-metadata )
+Set a metadata key/value pair.
+
+An optional metadata_specifier may be given to set metadata
+on streams or chapters. See -map_metadata
documentation for
+details.
+
+This option overrides metadata set with -map_metadata
. It is
+also possible to delete metadata by using an empty value.
+
+For example, for setting the title in the output file:
+
+
ffmpeg -i in.avi -metadata title="my title" out.flv
+
+
+To set the language of the first audio stream:
+
+
ffmpeg -i INPUT -metadata:s:a:0 language=eng OUTPUT
+
+
+
+-target type (output )
+Specify target file type (vcd
, svcd
, dvd
, dv
,
+dv50
). type may be prefixed with pal-
, ntsc-
or
+film-
to use the corresponding standard. All the format options
+(bitrate, codecs, buffer sizes) are then set automatically. You can just type:
+
+
+
ffmpeg -i myfile.avi -target vcd /tmp/vcd.mpg
+
+
+Nevertheless you can specify additional options as long as you know
+they do not conflict with the standard, as in:
+
+
+
ffmpeg -i myfile.avi -target vcd -bf 2 /tmp/vcd.mpg
+
+
+
+-dframes number (output )
+Set the number of data frames to output. This is an alias for -frames:d
.
+
+
+-frames[:stream_specifier ] framecount (output,per-stream )
+Stop writing to the stream after framecount frames.
+
+
+-q[:stream_specifier ] q (output,per-stream )
+-qscale[:stream_specifier ] q (output,per-stream )
+Use fixed quality scale (VBR). The meaning of q /qscale is
+codec-dependent.
+If qscale is used without a stream_specifier then it applies only
+to the video stream, this is to maintain compatibility with previous behavior
+and as specifying the same codec specific value to 2 different codecs that is
+audio and video generally is not what is intended when no stream_specifier is
+used.
+
+
+-filter[:stream_specifier ] filtergraph (output,per-stream )
+Create the filtergraph specified by filtergraph and use it to
+filter the stream.
+
+filtergraph is a description of the filtergraph to apply to
+the stream, and must have a single input and a single output of the
+same type of the stream. In the filtergraph, the input is associated
+to the label in
, and the output to the label out
. See
+the ffmpeg-filters manual for more information about the filtergraph
+syntax.
+
+See the -filter_complex option if you
+want to create filtergraphs with multiple inputs and/or outputs.
+
+
+-filter_script[:stream_specifier ] filename (output,per-stream )
+This option is similar to -filter , the only difference is that its
+argument is the name of the file from which a filtergraph description is to be
+read.
+
+
+-pre[:stream_specifier ] preset_name (output,per-stream )
+Specify the preset for matching stream(s).
+
+
+-stats (global )
+Print encoding progress/statistics. It is on by default, to explicitly
+disable it you need to specify -nostats
.
+
+
+-progress url (global )
+Send program-friendly progress information to url .
+
+Progress information is written approximately every second and at the end of
+the encoding process. It is made of "key =value " lines. key
+consists of only alphanumeric characters. The last key of a sequence of
+progress information is always "progress".
+
+
+-stdin
+Enable interaction on standard input. On by default unless standard input is
+used as an input. To explicitly disable interaction you need to specify
+-nostdin
.
+
+Disabling interaction on standard input is useful, for example, if
+ffmpeg is in the background process group. Roughly the same result can
+be achieved with ffmpeg ... < /dev/null
but it requires a
+shell.
+
+
+-debug_ts (global )
+Print timestamp information. It is off by default. This option is
+mostly useful for testing and debugging purposes, and the output
+format may change from one version to another, so it should not be
+employed by portable scripts.
+
+See also the option -fdebug ts
.
+
+
+-attach filename (output )
+Add an attachment to the output file. This is supported by a few formats
+like Matroska for e.g. fonts used in rendering subtitles. Attachments
+are implemented as a specific type of stream, so this option will add
+a new stream to the file. It is then possible to use per-stream options
+on this stream in the usual way. Attachment streams created with this
+option will be created after all the other streams (i.e. those created
+with -map
or automatic mappings).
+
+Note that for Matroska you also have to set the mimetype metadata tag:
+
+
ffmpeg -i INPUT -attach DejaVuSans.ttf -metadata:s:2 mimetype=application/x-truetype-font out.mkv
+
+(assuming that the attachment stream will be third in the output file).
+
+
+-dump_attachment[:stream_specifier ] filename (input,per-stream )
+Extract the matching attachment stream into a file named filename . If
+filename is empty, then the value of the filename
metadata tag
+will be used.
+
+E.g. to extract the first attachment to a file named ’out.ttf’:
+
+
ffmpeg -dump_attachment:t:0 out.ttf -i INPUT
+
+To extract all attachments to files determined by the filename
tag:
+
+
ffmpeg -dump_attachment:t "" -i INPUT
+
+
+Technical note – attachments are implemented as codec extradata, so this
+option can actually be used to extract extradata from any stream, not just
+attachments.
+
+
+
+
+
+
5.5 Video Options# TOC
+
+
+-vframes number (output )
+Set the number of video frames to output. This is an alias for -frames:v
.
+
+-r[:stream_specifier ] fps (input/output,per-stream )
+Set frame rate (Hz value, fraction or abbreviation).
+
+As an input option, ignore any timestamps stored in the file and instead
+generate timestamps assuming constant frame rate fps .
+This is not the same as the -framerate option used for some input formats
+like image2 or v4l2 (it used to be the same in older versions of FFmpeg).
+If in doubt use -framerate instead of the input option -r .
+
+As an output option, duplicate or drop input frames to achieve constant output
+frame rate fps .
+
+
+-s[:stream_specifier ] size (input/output,per-stream )
+Set frame size.
+
+As an input option, this is a shortcut for the video_size private
+option, recognized by some demuxers for which the frame size is either not
+stored in the file or is configurable – e.g. raw video or video grabbers.
+
+As an output option, this inserts the scale
video filter to the
+end of the corresponding filtergraph. Please use the scale
filter
+directly to insert it at the beginning or some other place.
+
+The format is ‘wxh ’ (default - same as source).
+
+
+-aspect[:stream_specifier ] aspect (output,per-stream )
+Set the video display aspect ratio specified by aspect .
+
+aspect can be a floating point number string, or a string of the
+form num :den , where num and den are the
+numerator and denominator of the aspect ratio. For example "4:3",
+"16:9", "1.3333", and "1.7777" are valid argument values.
+
+If used together with -vcodec copy , it will affect the aspect ratio
+stored at container level, but not the aspect ratio stored in encoded
+frames, if it exists.
+
+
+-vn (output )
+Disable video recording.
+
+
+-vcodec codec (output )
+Set the video codec. This is an alias for -codec:v
.
+
+
+-pass[:stream_specifier ] n (output,per-stream )
+Select the pass number (1 or 2). It is used to do two-pass
+video encoding. The statistics of the video are recorded in the first
+pass into a log file (see also the option -passlogfile),
+and in the second pass that log file is used to generate the video
+at the exact requested bitrate.
+On pass 1, you may just deactivate audio and set output to null,
+examples for Windows and Unix:
+
+
ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y NUL
+ffmpeg -i foo.mov -c:v libxvid -pass 1 -an -f rawvideo -y /dev/null
+
+
+
+-passlogfile[:stream_specifier ] prefix (output,per-stream )
+Set two-pass log file name prefix to prefix , the default file name
+prefix is “ffmpeg2pass”. The complete file name will be
+PREFIX-N.log , where N is a number specific to the output
+stream
+
+
+-vf filtergraph (output )
+Create the filtergraph specified by filtergraph and use it to
+filter the stream.
+
+This is an alias for -filter:v
, see the -filter option .
+
+
+
+
+
5.6 Advanced Video options# TOC
+
+
+-pix_fmt[:stream_specifier ] format (input/output,per-stream )
+Set pixel format. Use -pix_fmts
to show all the supported
+pixel formats.
+If the selected pixel format can not be selected, ffmpeg will print a
+warning and select the best pixel format supported by the encoder.
+If pix_fmt is prefixed by a +
, ffmpeg will exit with an error
+if the requested pixel format can not be selected, and automatic conversions
+inside filtergraphs are disabled.
+If pix_fmt is a single +
, ffmpeg selects the same pixel format
+as the input (or graph output) and automatic conversions are disabled.
+
+
+-sws_flags flags (input/output )
+Set SwScaler flags.
+
+-vdt n
+Discard threshold.
+
+
+-rc_override[:stream_specifier ] override (output,per-stream )
+Rate control override for specific intervals, formatted as "int,int,int"
+list separated with slashes. Two first values are the beginning and
+end frame numbers, last one is quantizer to use if positive, or quality
+factor if negative.
+
+
+-ilme
+Force interlacing support in encoder (MPEG-2 and MPEG-4 only).
+Use this option if your input file is interlaced and you want
+to keep the interlaced format for minimum losses.
+The alternative is to deinterlace the input stream with
+-deinterlace , but deinterlacing introduces losses.
+
+-psnr
+Calculate PSNR of compressed frames.
+
+-vstats
+Dump video coding statistics to vstats_HHMMSS.log .
+
+-vstats_file file
+Dump video coding statistics to file .
+
+-top[:stream_specifier ] n (output,per-stream )
+top=1/bottom=0/auto=-1 field first
+
+-dc precision
+Intra_dc_precision.
+
+-vtag fourcc/tag (output )
+Force video tag/fourcc. This is an alias for -tag:v
.
+
+-qphist (global )
+Show QP histogram
+
+-vbsf bitstream_filter
+Deprecated see -bsf
+
+
+-force_key_frames[:stream_specifier ] time [,time ...] (output,per-stream )
+-force_key_frames[:stream_specifier ] expr:expr (output,per-stream )
+Force key frames at the specified timestamps, more precisely at the first
+frames after each specified time.
+
+If the argument is prefixed with expr:
, the string expr
+is interpreted like an expression and is evaluated for each frame. A
+key frame is forced in case the evaluation is non-zero.
+
+If one of the times is "chapters
[delta ]", it is expanded into
+the time of the beginning of all chapters in the file, shifted by
+delta , expressed as a time in seconds.
+This option can be useful to ensure that a seek point is present at a
+chapter mark or any other designated place in the output file.
+
+For example, to insert a key frame at 5 minutes, plus key frames 0.1 second
+before the beginning of every chapter:
+
+
-force_key_frames 0:05:00,chapters-0.1
+
+
+The expression in expr can contain the following constants:
+
+n
+the number of current processed frame, starting from 0
+
+n_forced
+the number of forced frames
+
+prev_forced_n
+the number of the previous forced frame, it is NAN
when no
+keyframe was forced yet
+
+prev_forced_t
+the time of the previous forced frame, it is NAN
when no
+keyframe was forced yet
+
+t
+the time of the current processed frame
+
+
+
+For example to force a key frame every 5 seconds, you can specify:
+
+
-force_key_frames expr:gte(t,n_forced*5)
+
+
+To force a key frame 5 seconds after the time of the last forced one,
+starting from second 13:
+
+
-force_key_frames expr:if(isnan(prev_forced_t),gte(t,13),gte(t,prev_forced_t+5))
+
+
+Note that forcing too many keyframes is very harmful for the lookahead
+algorithms of certain encoders: using fixed-GOP options or similar
+would be more efficient.
+
+
+-copyinkf[:stream_specifier ] (output,per-stream )
+When doing stream copy, copy also non-key frames found at the
+beginning.
+
+
+-hwaccel[:stream_specifier ] hwaccel (input,per-stream )
+Use hardware acceleration to decode the matching stream(s). The allowed values
+of hwaccel are:
+
+none
+Do not use any hardware acceleration (the default).
+
+
+auto
+Automatically select the hardware acceleration method.
+
+
+vda
+Use Apple VDA hardware acceleration.
+
+
+vdpau
+Use VDPAU (Video Decode and Presentation API for Unix) hardware acceleration.
+
+
+dxva2
+Use DXVA2 (DirectX Video Acceleration) hardware acceleration.
+
+
+
+This option has no effect if the selected hwaccel is not available or not
+supported by the chosen decoder.
+
+Note that most acceleration methods are intended for playback and will not be
+faster than software decoding on modern CPUs. Additionally, ffmpeg
+will usually need to copy the decoded frames from the GPU memory into the system
+memory, resulting in further performance loss. This option is thus mainly
+useful for testing.
+
+
+-hwaccel_device[:stream_specifier ] hwaccel_device (input,per-stream )
+Select a device to use for hardware acceleration.
+
+This option only makes sense when the -hwaccel option is also
+specified. Its exact meaning depends on the specific hardware acceleration
+method chosen.
+
+
+vdpau
+For VDPAU, this option specifies the X11 display/screen to use. If this option
+is not specified, the value of the DISPLAY environment variable is used
+
+
+dxva2
+For DXVA2, this option should contain the number of the display adapter to use.
+If this option is not specified, the default adapter is used.
+
+
+
+
+
+
+
5.7 Audio Options# TOC
+
+
+-aframes number (output )
+Set the number of audio frames to output. This is an alias for -frames:a
.
+
+-ar[:stream_specifier ] freq (input/output,per-stream )
+Set the audio sampling frequency. For output streams it is set by
+default to the frequency of the corresponding input stream. For input
+streams this option only makes sense for audio grabbing devices and raw
+demuxers and is mapped to the corresponding demuxer options.
+
+-aq q (output )
+Set the audio quality (codec-specific, VBR). This is an alias for -q:a.
+
+-ac[:stream_specifier ] channels (input/output,per-stream )
+Set the number of audio channels. For output streams it is set by
+default to the number of input audio channels. For input streams
+this option only makes sense for audio grabbing devices and raw demuxers
+and is mapped to the corresponding demuxer options.
+
+-an (output )
+Disable audio recording.
+
+-acodec codec (input/output )
+Set the audio codec. This is an alias for -codec:a
.
+
+-sample_fmt[:stream_specifier ] sample_fmt (output,per-stream )
+Set the audio sample format. Use -sample_fmts
to get a list
+of supported sample formats.
+
+
+-af filtergraph (output )
+Create the filtergraph specified by filtergraph and use it to
+filter the stream.
+
+This is an alias for -filter:a
, see the -filter option .
+
+
+
+
+
5.8 Advanced Audio options# TOC
+
+
+-atag fourcc/tag (output )
+Force audio tag/fourcc. This is an alias for -tag:a
.
+
+-absf bitstream_filter
+Deprecated, see -bsf
+
+-guess_layout_max channels (input,per-stream )
+If some input channel layout is not known, try to guess only if it
+corresponds to at most the specified number of channels. For example, 2
+tells to ffmpeg
to recognize 1 channel as mono and 2 channels as
+stereo but not 6 channels as 5.1. The default is to always try to guess. Use
+0 to disable all guessing.
+
+
+
+
+
5.9 Subtitle options# TOC
+
+
+-scodec codec (input/output )
+Set the subtitle codec. This is an alias for -codec:s
.
+
+-sn (output )
+Disable subtitle recording.
+
+-sbsf bitstream_filter
+Deprecated, see -bsf
+
+
+
+
+
5.10 Advanced Subtitle options# TOC
+
+
+-fix_sub_duration
+Fix subtitles durations. For each subtitle, wait for the next packet in the
+same stream and adjust the duration of the first to avoid overlap. This is
+necessary with some subtitles codecs, especially DVB subtitles, because the
+duration in the original packet is only a rough estimate and the end is
+actually marked by an empty subtitle frame. Failing to use this option when
+necessary can result in exaggerated durations or muxing failures due to
+non-monotonic timestamps.
+
+Note that this option will delay the output of all data until the next
+subtitle packet is decoded: it may increase memory consumption and latency a
+lot.
+
+
+-canvas_size size
+Set the size of the canvas used to render subtitles.
+
+
+
+
+
+
5.11 Advanced options# TOC
+
+
+-map [-]input_file_id [:stream_specifier ][,sync_file_id [:stream_specifier ]] | [linklabel] (output )
+
+Designate one or more input streams as a source for the output file. Each input
+stream is identified by the input file index input_file_id and
+the input stream index input_stream_id within the input
+file. Both indices start at 0. If specified,
+sync_file_id :stream_specifier sets which input stream
+is used as a presentation sync reference.
+
+The first -map
option on the command line specifies the
+source for output stream 0, the second -map
option specifies
+the source for output stream 1, etc.
+
+A -
character before the stream identifier creates a "negative" mapping.
+It disables matching streams from already created mappings.
+
+An alternative [linklabel] form will map outputs from complex filter
+graphs (see the -filter_complex option) to the output file.
+linklabel must correspond to a defined output link label in the graph.
+
+For example, to map ALL streams from the first input file to output
+
+
ffmpeg -i INPUT -map 0 output
+
+
+For example, if you have two audio streams in the first input file,
+these streams are identified by "0:0" and "0:1". You can use
+-map
to select which streams to place in an output file. For
+example:
+
+
ffmpeg -i INPUT -map 0:1 out.wav
+
+will map the input stream in INPUT identified by "0:1" to
+the (single) output stream in out.wav .
+
+For example, to select the stream with index 2 from input file
+a.mov (specified by the identifier "0:2"), and stream with
+index 6 from input b.mov (specified by the identifier "1:6"),
+and copy them to the output file out.mov :
+
+
ffmpeg -i a.mov -i b.mov -c copy -map 0:2 -map 1:6 out.mov
+
+
+To select all video and the third audio stream from an input file:
+
+
ffmpeg -i INPUT -map 0:v -map 0:a:2 OUTPUT
+
+
+To map all the streams except the second audio, use negative mappings
+
+
ffmpeg -i INPUT -map 0 -map -0:a:1 OUTPUT
+
+
+To pick the English audio stream:
+
+
ffmpeg -i INPUT -map 0:m:language:eng OUTPUT
+
+
+Note that using this option disables the default mappings for this output file.
+
+
+-map_channel [input_file_id .stream_specifier .channel_id |-1][:output_file_id .stream_specifier ]
+Map an audio channel from a given input to an output. If
+output_file_id .stream_specifier is not set, the audio channel will
+be mapped on all the audio streams.
+
+Using "-1" instead of
+input_file_id .stream_specifier .channel_id will map a muted
+channel.
+
+For example, assuming INPUT is a stereo audio file, you can switch the
+two audio channels with the following command:
+
+
ffmpeg -i INPUT -map_channel 0.0.1 -map_channel 0.0.0 OUTPUT
+
+
+If you want to mute the first channel and keep the second:
+
+
ffmpeg -i INPUT -map_channel -1 -map_channel 0.0.1 OUTPUT
+
+
+The order of the "-map_channel" option specifies the order of the channels in
+the output stream. The output channel layout is guessed from the number of
+channels mapped (mono if one "-map_channel", stereo if two, etc.). Using "-ac"
+in combination of "-map_channel" makes the channel gain levels to be updated if
+input and output channel layouts don’t match (for instance two "-map_channel"
+options and "-ac 6").
+
+You can also extract each channel of an input to specific outputs; the following
+command extracts two channels of the INPUT audio stream (file 0, stream 0)
+to the respective OUTPUT_CH0 and OUTPUT_CH1 outputs:
+
+
ffmpeg -i INPUT -map_channel 0.0.0 OUTPUT_CH0 -map_channel 0.0.1 OUTPUT_CH1
+
+
+The following example splits the channels of a stereo input into two separate
+streams, which are put into the same output file:
+
+
ffmpeg -i stereo.wav -map 0:0 -map 0:0 -map_channel 0.0.0:0.0 -map_channel 0.0.1:0.1 -y out.ogg
+
+
+Note that currently each output stream can only contain channels from a single
+input stream; you can’t for example use "-map_channel" to pick multiple input
+audio channels contained in different streams (from the same or different files)
+and merge them into a single output stream. It is therefore not currently
+possible, for example, to turn two separate mono streams into a single stereo
+stream. However splitting a stereo stream into two single channel mono streams
+is possible.
+
+If you need this feature, a possible workaround is to use the amerge
+filter. For example, if you need to merge a media (here input.mkv ) with 2
+mono audio streams into one single stereo channel audio stream (and keep the
+video stream), you can use the following command:
+
+
ffmpeg -i input.mkv -filter_complex "[0:1] [0:2] amerge" -c:a pcm_s16le -c:v copy output.mkv
+
+
+
+-map_metadata[:metadata_spec_out ] infile [:metadata_spec_in ] (output,per-metadata )
+Set metadata information of the next output file from infile . Note that
+those are file indices (zero-based), not filenames.
+Optional metadata_spec_in/out parameters specify, which metadata to copy.
+A metadata specifier can have the following forms:
+
+g
+global metadata, i.e. metadata that applies to the whole file
+
+
+s [:stream_spec ]
+per-stream metadata. stream_spec is a stream specifier as described
+in the Stream specifiers chapter. In an input metadata specifier, the first
+matching stream is copied from. In an output metadata specifier, all matching
+streams are copied to.
+
+
+c :chapter_index
+per-chapter metadata. chapter_index is the zero-based chapter index.
+
+
+p :program_index
+per-program metadata. program_index is the zero-based program index.
+
+
+If metadata specifier is omitted, it defaults to global.
+
+By default, global metadata is copied from the first input file,
+per-stream and per-chapter metadata is copied along with streams/chapters. These
+default mappings are disabled by creating any mapping of the relevant type. A negative
+file index can be used to create a dummy mapping that just disables automatic copying.
+
+For example to copy metadata from the first stream of the input file to global metadata
+of the output file:
+
+
ffmpeg -i in.ogg -map_metadata 0:s:0 out.mp3
+
+
+To do the reverse, i.e. copy global metadata to all audio streams:
+
+
ffmpeg -i in.mkv -map_metadata:s:a 0:g out.mkv
+
+Note that simple 0
would work as well in this example, since global
+metadata is assumed by default.
+
+
+-map_chapters input_file_index (output )
+Copy chapters from input file with index input_file_index to the next
+output file. If no chapter mapping is specified, then chapters are copied from
+the first input file with at least one chapter. Use a negative file index to
+disable any chapter copying.
+
+
+-benchmark (global )
+Show benchmarking information at the end of an encode.
+Shows CPU time used and maximum memory consumption.
+Maximum memory consumption is not supported on all systems,
+it will usually display as 0 if not supported.
+
+-benchmark_all (global )
+Show benchmarking information during the encode.
+Shows CPU time used in various steps (audio/video encode/decode).
+
+-timelimit duration (global )
+Exit after ffmpeg has been running for duration seconds.
+
+-dump (global )
+Dump each input packet to stderr.
+
+-hex (global )
+When dumping packets, also dump the payload.
+
+-re (input )
+Read input at native frame rate. Mainly used to simulate a grab device.
+or live input stream (e.g. when reading from a file). Should not be used
+with actual grab devices or live input streams (where it can cause packet
+loss).
+By default ffmpeg
attempts to read the input(s) as fast as possible.
+This option will slow down the reading of the input(s) to the native frame rate
+of the input(s). It is useful for real-time output (e.g. live streaming).
+
+-loop_input
+Loop over the input stream. Currently it works only for image
+streams. This option is used for automatic FFserver testing.
+This option is deprecated, use -loop 1.
+
+-loop_output number_of_times
+Repeatedly loop output for formats that support looping such as animated GIF
+(0 will loop the output infinitely).
+This option is deprecated, use -loop.
+
+-vsync parameter
+Video sync method.
+For compatibility reasons old values can be specified as numbers.
+Newly added values will have to be specified as strings always.
+
+
+0, passthrough
+Each frame is passed with its timestamp from the demuxer to the muxer.
+
+1, cfr
+Frames will be duplicated and dropped to achieve exactly the requested
+constant frame rate.
+
+2, vfr
+Frames are passed through with their timestamp or dropped so as to
+prevent 2 frames from having the same timestamp.
+
+drop
+As passthrough but destroys all timestamps, making the muxer generate
+fresh timestamps based on frame-rate.
+
+-1, auto
+Chooses between 1 and 2 depending on muxer capabilities. This is the
+default method.
+
+
+
+Note that the timestamps may be further modified by the muxer, after this.
+For example, in the case that the format option avoid_negative_ts
+is enabled.
+
+With -map you can select from which stream the timestamps should be
+taken. You can leave either video or audio unchanged and sync the
+remaining stream(s) to the unchanged one.
+
+
+-async samples_per_second
+Audio sync method. "Stretches/squeezes" the audio stream to match the timestamps,
+the parameter is the maximum samples per second by which the audio is changed.
+-async 1 is a special case where only the start of the audio stream is corrected
+without any later correction.
+
+Note that the timestamps may be further modified by the muxer, after this.
+For example, in the case that the format option avoid_negative_ts
+is enabled.
+
+This option has been deprecated. Use the aresample
audio filter instead.
+
+
+-copyts
+Do not process input timestamps, but keep their values without trying
+to sanitize them. In particular, do not remove the initial start time
+offset value.
+
+Note that, depending on the vsync option or on specific muxer
+processing (e.g. in case the format option avoid_negative_ts
+is enabled) the output timestamps may mismatch with the input
+timestamps even when this option is selected.
+
+
+-start_at_zero
+When used with copyts , shift input timestamps so they start at zero.
+
+This means that using e.g. -ss 50
will make output timestamps start at
+50 seconds, regardless of what timestamp the input file started at.
+
+
+-copytb mode
+Specify how to set the encoder timebase when stream copying. mode is an
+integer numeric value, and can assume one of the following values:
+
+
+1
+Use the demuxer timebase.
+
+The time base is copied to the output encoder from the corresponding input
+demuxer. This is sometimes required to avoid non monotonically increasing
+timestamps when copying video streams with variable frame rate.
+
+
+0
+Use the decoder timebase.
+
+The time base is copied to the output encoder from the corresponding input
+decoder.
+
+
+-1
+Try to make the choice automatically, in order to generate a sane output.
+
+
+
+Default value is -1.
+
+
+-shortest (output )
+Finish encoding when the shortest input stream ends.
+
+-dts_delta_threshold
+Timestamp discontinuity delta threshold.
+
+-muxdelay seconds (input )
+Set the maximum demux-decode delay.
+
+-muxpreload seconds (input )
+Set the initial demux-decode delay.
+
+-streamid output-stream-index :new-value (output )
+Assign a new stream-id value to an output stream. This option should be
+specified prior to the output filename to which it applies.
+For the situation where multiple output files exist, a streamid
+may be reassigned to a different value.
+
+For example, to set the stream 0 PID to 33 and the stream 1 PID to 36 for
+an output mpegts file:
+
+
ffmpeg -i infile -streamid 0:33 -streamid 1:36 out.ts
+
+
+
+-bsf[:stream_specifier ] bitstream_filters (output,per-stream )
+Set bitstream filters for matching streams. bitstream_filters is
+a comma-separated list of bitstream filters. Use the -bsfs
option
+to get the list of bitstream filters.
+
+
ffmpeg -i h264.mp4 -c:v copy -bsf:v h264_mp4toannexb -an out.h264
+
+
+
ffmpeg -i file.mov -an -vn -bsf:s mov2textsub -c:s copy -f rawvideo sub.txt
+
+
+
+-tag[:stream_specifier ] codec_tag (input/output,per-stream )
+Force a tag/fourcc for matching streams.
+
+
+-timecode hh :mm :ss SEPff
+Specify Timecode for writing. SEP is ’:’ for non drop timecode and ’;’
+(or ’.’) for drop.
+
+
ffmpeg -i input.mpg -timecode 01:02:03.04 -r 30000/1001 -s ntsc output.mpg
+
+
+
+-filter_complex filtergraph (global )
+Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or
+outputs. For simple graphs – those with one input and one output of the same
+type – see the -filter options. filtergraph is a description of
+the filtergraph, as described in the “Filtergraph syntax” section of the
+ffmpeg-filters manual.
+
+Input link labels must refer to input streams using the
+[file_index:stream_specifier]
syntax (i.e. the same as -map
+uses). If stream_specifier matches multiple streams, the first one will be
+used. An unlabeled input will be connected to the first unused input stream of
+the matching type.
+
+Output link labels are referred to with -map . Unlabeled outputs are
+added to the first output file.
+
+Note that with this option it is possible to use only lavfi sources without
+normal input files.
+
+For example, to overlay an image over video
+
+
ffmpeg -i video.mkv -i image.png -filter_complex '[0:v][1:v]overlay[out]' -map
+'[out]' out.mkv
+
+Here [0:v]
refers to the first video stream in the first input file,
+which is linked to the first (main) input of the overlay filter. Similarly the
+first video stream in the second input is linked to the second (overlay) input
+of overlay.
+
+Assuming there is only one video stream in each input file, we can omit input
+labels, so the above is equivalent to
+
+
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay[out]' -map
+'[out]' out.mkv
+
+
+Furthermore we can omit the output label and the single output from the filter
+graph will be added to the output file automatically, so we can simply write
+
+
ffmpeg -i video.mkv -i image.png -filter_complex 'overlay' out.mkv
+
+
+To generate 5 seconds of pure red video using lavfi color
source:
+
+
ffmpeg -filter_complex 'color=c=red' -t 5 out.mkv
+
+
+
+-lavfi filtergraph (global )
+Define a complex filtergraph, i.e. one with arbitrary number of inputs and/or
+outputs. Equivalent to -filter_complex .
+
+
+-filter_complex_script filename (global )
+This option is similar to -filter_complex , the only difference is that
+its argument is the name of the file from which a complex filtergraph
+description is to be read.
+
+
+-accurate_seek (input )
+This option enables or disables accurate seeking in input files with the
+-ss option. It is enabled by default, so seeking is accurate when
+transcoding. Use -noaccurate_seek to disable it, which may be useful
+e.g. when copying some streams and transcoding the others.
+
+
+-override_ffserver (global )
+Overrides the input specifications from ffserver
. Using this
+option you can map any input stream to ffserver
and control
+many aspects of the encoding from ffmpeg
. Without this
+option ffmpeg
will transmit to ffserver
what is
+requested by ffserver
.
+
+The option is intended for cases where features are needed that cannot be
+specified to ffserver
but can be to ffmpeg
.
+
+
+-sdp_file file (global )
+Print sdp information to file .
+This allows dumping sdp information when at least one output isn’t an
+rtp stream.
+
+
+-discard (input )
+Allows discarding specific streams or frames of streams at the demuxer.
+Not all demuxers support this.
+
+
+none
+Discard no frame.
+
+
+default
+Default, which discards no frames.
+
+
+noref
+Discard all non-reference frames.
+
+
+bidir
+Discard all bidirectional frames.
+
+
+nokey
+Discard all frames excepts keyframes.
+
+
+all
+Discard all frames.
+
+
+
+
+
+
+
As a special exception, you can use a bitmap subtitle stream as input: it
+will be converted into a video with the same size as the largest video in
+the file, or 720x576 if no video is present. Note that this is an
+experimental and temporary solution. It will be removed once libavfilter has
+proper support for subtitles.
+
+
For example, to hardcode subtitles on top of a DVB-T recording stored in
+MPEG-TS format, delaying the subtitles by 1 second:
+
+
ffmpeg -i input.ts -filter_complex \
+ '[#0x2ef] setpts=PTS+1/TB [sub] ; [#0x2d0] [sub] overlay' \
+ -sn -map '#0x2dc' output.mkv
+
+
(0x2d0, 0x2dc and 0x2ef are the MPEG-TS PIDs of respectively the video,
+audio and subtitles streams; 0:0, 0:3 and 0:7 would have worked too)
+
+
+
5.12 Preset files# TOC
+
A preset file contains a sequence of option =value pairs,
+one for each line, specifying a sequence of options which would be
+awkward to specify on the command line. Lines starting with the hash
+(’#’) character are ignored and are used to provide comments. Check
+the presets directory in the FFmpeg source tree for examples.
+
+
There are two types of preset files: ffpreset and avpreset files.
+
+
+
5.12.1 ffpreset files# TOC
+
ffpreset files are specified with the vpre
, apre
,
+spre
, and fpre
options. The fpre
option takes the
+filename of the preset instead of a preset name as input and can be
+used for any kind of codec. For the vpre
, apre
, and
+spre
options, the options specified in a preset file are
+applied to the currently selected codec of the same type as the preset
+option.
+
+
The argument passed to the vpre
, apre
, and spre
+preset options identifies the preset file to use according to the
+following rules:
+
+
First ffmpeg searches for a file named arg .ffpreset in the
+directories $FFMPEG_DATADIR (if set), and $HOME/.ffmpeg , and in
+the datadir defined at configuration time (usually PREFIX/share/ffmpeg )
+or in a ffpresets folder along the executable on win32,
+in that order. For example, if the argument is libvpx-1080p
, it will
+search for the file libvpx-1080p.ffpreset .
+
+
If no such file is found, then ffmpeg will search for a file named
+codec_name -arg .ffpreset in the above-mentioned
+directories, where codec_name is the name of the codec to which
+the preset file options will be applied. For example, if you select
+the video codec with -vcodec libvpx
and use -vpre 1080p
,
+then it will search for the file libvpx-1080p.ffpreset .
+
+
+
5.12.2 avpreset files# TOC
+
avpreset files are specified with the pre
option. They work similar to
+ffpreset files, but they only allow encoder- specific options. Therefore, an
+option =value pair specifying an encoder cannot be used.
+
+
When the pre
option is specified, ffmpeg will look for files with the
+suffix .avpreset in the directories $AVCONV_DATADIR (if set), and
+$HOME/.avconv , and in the datadir defined at configuration time (usually
+PREFIX/share/ffmpeg ), in that order.
+
+
First ffmpeg searches for a file named codec_name -arg .avpreset in
+the above-mentioned directories, where codec_name is the name of the codec
+to which the preset file options will be applied. For example, if you select the
+video codec with -vcodec libvpx
and use -pre 1080p
, then it will
+search for the file libvpx-1080p.avpreset .
+
+
If no such file is found, then ffmpeg will search for a file named
+arg .avpreset in the same directories.
+
+
+
+
+
+
+ For streaming at very low bitrates, use a low frame rate
+and a small GOP size. This is especially true for RealVideo where
+the Linux player does not seem to be very fast, so it can miss
+frames. An example is:
+
+
+
ffmpeg -g 3 -r 3 -t 10 -b:v 50k -s qcif -f rv10 /tmp/b.rm
+
+
+ The parameter ’q’ which is displayed while encoding is the current
+quantizer. The value 1 indicates that a very good quality could
+be achieved. The value 31 indicates the worst quality. If q=31 appears
+too often, it means that the encoder cannot compress enough to meet
+your bitrate. You must either increase the bitrate, decrease the
+frame rate or decrease the frame size.
+
+ If your computer is not fast enough, you can speed up the
+compression at the expense of the compression ratio. You can use
+’-me zero’ to speed up motion estimation, and ’-g 0’ to disable
+motion estimation completely (you have only I-frames, which means it
+is about as good as JPEG compression).
+
+ To have very low audio bitrates, reduce the sampling frequency
+(down to 22050 Hz for MPEG audio, 22050 or 11025 for AC-3).
+
+ To have a constant quality (but a variable bitrate), use the option
+’-qscale n’ when ’n’ is between 1 (excellent quality) and 31 (worst
+quality).
+
+
+
+
+
7 Examples# TOC
+
+
+
7.1 Video and Audio grabbing# TOC
+
+
If you specify the input format and device then ffmpeg can grab video
+and audio directly.
+
+
+
ffmpeg -f oss -i /dev/dsp -f video4linux2 -i /dev/video0 /tmp/out.mpg
+
+
+
Or with an ALSA audio source (mono input, card id 1) instead of OSS:
+
+
ffmpeg -f alsa -ac 1 -i hw:1 -f video4linux2 -i /dev/video0 /tmp/out.mpg
+
+
+
Note that you must activate the right video source and channel before
+launching ffmpeg with any TV viewer such as
+xawtv by Gerd Knorr. You also
+have to set the audio recording levels correctly with a
+standard mixer.
+
+
+
7.2 X11 grabbing# TOC
+
+
Grab the X11 display with ffmpeg via
+
+
+
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0 /tmp/out.mpg
+
+
+
0.0 is display.screen number of your X11 server, same as
+the DISPLAY environment variable.
+
+
+
ffmpeg -f x11grab -video_size cif -framerate 25 -i :0.0+10,20 /tmp/out.mpg
+
+
+
0.0 is display.screen number of your X11 server, same as the DISPLAY environment
+variable. 10 is the x-offset and 20 the y-offset for the grabbing.
+
+
+
7.3 Video and Audio file format conversion# TOC
+
+
Any supported file format and protocol can serve as input to ffmpeg:
+
+
Examples:
+
+ You can use YUV files as input:
+
+
+
ffmpeg -i /tmp/test%d.Y /tmp/out.mpg
+
+
+It will use the files:
+
+
/tmp/test0.Y, /tmp/test0.U, /tmp/test0.V,
+/tmp/test1.Y, /tmp/test1.U, /tmp/test1.V, etc...
+
+
+The Y files use twice the resolution of the U and V files. They are
+raw files, without header. They can be generated by all decent video
+decoders. You must specify the size of the image with the -s option
+if ffmpeg cannot guess it.
+
+ You can input from a raw YUV420P file:
+
+
+
ffmpeg -i /tmp/test.yuv /tmp/out.avi
+
+
+test.yuv is a file containing raw YUV planar data. Each frame is composed
+of the Y plane followed by the U and V planes at half vertical and
+horizontal resolution.
+
+ You can output to a raw YUV420P file:
+
+
+
ffmpeg -i mydivx.avi hugefile.yuv
+
+
+ You can set several input files and output files:
+
+
+
ffmpeg -i /tmp/a.wav -s 640x480 -i /tmp/a.yuv /tmp/a.mpg
+
+
+Converts the audio file a.wav and the raw YUV video file a.yuv
+to MPEG file a.mpg.
+
+ You can also do audio and video conversions at the same time:
+
+
+
ffmpeg -i /tmp/a.wav -ar 22050 /tmp/a.mp2
+
+
+Converts a.wav to MPEG audio at 22050 Hz sample rate.
+
+ You can encode to several formats at the same time and define a
+mapping from input stream to output streams:
+
+
+
ffmpeg -i /tmp/a.wav -map 0:a -b:a 64k /tmp/a.mp2 -map 0:a -b:a 128k /tmp/b.mp2
+
+
+Converts a.wav to a.mp2 at 64 kbits and to b.mp2 at 128 kbits. ’-map
+file:index’ specifies which input stream is used for each output
+stream, in the order of the definition of output streams.
+
+ You can transcode decrypted VOBs:
+
+
+
ffmpeg -i snatch_1.vob -f avi -c:v mpeg4 -b:v 800k -g 300 -bf 2 -c:a libmp3lame -b:a 128k snatch.avi
+
+
+This is a typical DVD ripping example; the input is a VOB file, the
+output an AVI file with MPEG-4 video and MP3 audio. Note that in this
+command we use B-frames so the MPEG-4 stream is DivX5 compatible, and
+GOP size is 300 which means one intra frame every 10 seconds for 29.97fps
+input video. Furthermore, the audio stream is MP3-encoded so you need
+to enable LAME support by passing --enable-libmp3lame
to configure.
+The mapping is particularly useful for DVD transcoding
+to get the desired audio language.
+
+NOTE: To see the supported input formats, use ffmpeg -formats
.
+
+ You can extract images from a video, or create a video from many images:
+
+For extracting images from a video:
+
+
ffmpeg -i foo.avi -r 1 -s WxH -f image2 foo-%03d.jpeg
+
+
+This will extract one video frame per second from the video and will
+output them in files named foo-001.jpeg , foo-002.jpeg ,
+etc. Images will be rescaled to fit the new WxH values.
+
+If you want to extract just a limited number of frames, you can use the
+above command in combination with the -vframes or -t option, or in
+combination with -ss to start extracting from a certain point in time.
+
+For creating a video from many images:
+
+
ffmpeg -f image2 -i foo-%03d.jpeg -r 12 -s WxH foo.avi
+
+
+The syntax foo-%03d.jpeg
specifies to use a decimal number
+composed of three digits padded with zeroes to express the sequence
+number. It is the same syntax supported by the C printf function, but
+only formats accepting a normal integer are suitable.
+
+When importing an image sequence, -i also supports expanding
+shell-like wildcard patterns (globbing) internally, by selecting the
+image2-specific -pattern_type glob
option.
+
+For example, for creating a video from filenames matching the glob pattern
+foo-*.jpeg
:
+
+
ffmpeg -f image2 -pattern_type glob -i 'foo-*.jpeg' -r 12 -s WxH foo.avi
+
+
+ You can put many streams of the same type in the output:
+
+
+
ffmpeg -i test1.avi -i test2.avi -map 1:1 -map 1:0 -map 0:1 -map 0:0 -c copy -y test12.nut
+
+
+The resulting output file test12.nut will contain the first four streams
+from the input files in reverse order.
+
+ To force CBR video output:
+
+
ffmpeg -i myfile.avi -b 4000k -minrate 4000k -maxrate 4000k -bufsize 1835k out.m2v
+
+
+ The four options lmin, lmax, mblmin and mblmax use ’lambda’ units,
+but you may use the QP2LAMBDA constant to easily convert from ’q’ units:
+
+
ffmpeg -i src.ext -lmax 21*QP2LAMBDA dst.ext
+
+
+
+
+
+
+
8 See Also# TOC
+
+
ffmpeg-all ,
+ffplay , ffprobe , ffserver ,
+ffmpeg-utils ,
+ffmpeg-scaler ,
+ffmpeg-resampler ,
+ffmpeg-codecs ,
+ffmpeg-bitstream-filters ,
+ffmpeg-formats ,
+ffmpeg-devices ,
+ffmpeg-protocols ,
+ffmpeg-filters
+
+
+
+
9 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/ffplay-all.html b/Externals/ffmpeg/dev/doc/ffplay-all.html
new file mode 100644
index 0000000000..1264b50ebd
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/ffplay-all.html
@@ -0,0 +1,21308 @@
+
+
+
+
+
+
+ ffplay Documentation
+
+
+
+
+
+
+
+
+ ffplay Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Synopsis# TOC
+
+
ffplay [options ] [input_file ]
+
+
+
2 Description# TOC
+
+
FFplay is a very simple and portable media player using the FFmpeg
+libraries and the SDL library. It is mostly used as a testbed for the
+various FFmpeg APIs.
+
+
+
3 Options# TOC
+
+
All the numerical options, if not specified otherwise, accept a string
+representing a number as input, which may be followed by one of the SI
+unit prefixes, for example: ’K’, ’M’, or ’G’.
+
+
If ’i’ is appended to the SI unit prefix, the complete prefix will be
+interpreted as a unit prefix for binary multiples, which are based on
+powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
+prefix multiplies the value by 8. This allows using, for example:
+’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
+
+
Options which do not take arguments are boolean options, and set the
+corresponding value to true. They can be set to false by prefixing
+the option name with "no". For example using "-nofoo"
+will set the boolean option with name "foo" to false.
+
+
+
3.1 Stream specifiers# TOC
+
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
+are used to precisely specify which stream(s) a given option belongs to.
+
+
A stream specifier is a string generally appended to the option name and
+separated from it by a colon. E.g. -codec:a:1 ac3
contains the
+a:1
stream specifier, which matches the second audio stream. Therefore, it
+would select the ac3 codec for the second audio stream.
+
+
A stream specifier can match several streams, so that the option is applied to all
+of them. E.g. the stream specifier in -b:a 128k
matches all audio
+streams.
+
+
An empty stream specifier matches all streams. For example, -codec copy
+or -codec: copy
would copy all the streams without reencoding.
+
+
Possible forms of stream specifiers are:
+
+stream_index
+Matches the stream with this index. E.g. -threads:1 4
would set the
+thread count for the second stream to 4.
+
+stream_type [:stream_index ]
+stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
+’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
+stream number stream_index of this type. Otherwise, it matches all
+streams of this type.
+
+p:program_id [:stream_index ]
+If stream_index is given, then it matches the stream with number stream_index
+in the program with the id program_id . Otherwise, it matches all streams in the
+program.
+
+#stream_id or i:stream_id
+Match the stream by stream id (e.g. PID in MPEG-TS container).
+
+m:key [:value ]
+Matches streams with the metadata tag key having the specified value. If
+value is not given, matches streams that contain the given tag with any
+value.
+
+Note that in ffmpeg
, matching by metadata will only work properly for
+input files.
+
+
+
+
+
3.2 Generic options# TOC
+
+
These options are shared amongst the ff* tools.
+
+
+-L
+Show license.
+
+
+-h, -?, -help, --help [arg ]
+Show help. An optional parameter may be specified to print help about a specific
+item. If no argument is specified, only basic (non advanced) tool
+options are shown.
+
+Possible values of arg are:
+
+long
+Print advanced tool options in addition to the basic tool options.
+
+
+full
+Print complete list of options, including shared and private options
+for encoders, decoders, demuxers, muxers, filters, etc.
+
+
+decoder=decoder_name
+Print detailed information about the decoder named decoder_name . Use the
+-decoders option to get a list of all decoders.
+
+
+encoder=encoder_name
+Print detailed information about the encoder named encoder_name . Use the
+-encoders option to get a list of all encoders.
+
+
+demuxer=demuxer_name
+Print detailed information about the demuxer named demuxer_name . Use the
+-formats option to get a list of all demuxers and muxers.
+
+
+muxer=muxer_name
+Print detailed information about the muxer named muxer_name . Use the
+-formats option to get a list of all muxers and demuxers.
+
+
+filter=filter_name
+Print detailed information about the filter name filter_name . Use the
+-filters option to get a list of all filters.
+
+
+
+
+-version
+Show version.
+
+
+-formats
+Show available formats (including devices).
+
+
+-devices
+Show available devices.
+
+
+-codecs
+Show all codecs known to libavcodec.
+
+Note that the term ’codec’ is used throughout this documentation as a shortcut
+for what is more correctly called a media bitstream format.
+
+
+-decoders
+Show available decoders.
+
+
+-encoders
+Show all available encoders.
+
+
+-bsfs
+Show available bitstream filters.
+
+
+-protocols
+Show available protocols.
+
+
+-filters
+Show available libavfilter filters.
+
+
+-pix_fmts
+Show available pixel formats.
+
+
+-sample_fmts
+Show available sample formats.
+
+
+-layouts
+Show channel names and standard channel layouts.
+
+
+-colors
+Show recognized color names.
+
+
+-sources device [,opt1 =val1 [,opt2 =val2 ]...]
+Show autodetected sources of the intput device.
+Some devices may provide system-dependent source names that cannot be autodetected.
+The returned list cannot be assumed to be always complete.
+
+
ffmpeg -sources pulse,server=192.168.0.4
+
+
+
+-sinks device [,opt1 =val1 [,opt2 =val2 ]...]
+Show autodetected sinks of the output device.
+Some devices may provide system-dependent sink names that cannot be autodetected.
+The returned list cannot be assumed to be always complete.
+
+
ffmpeg -sinks pulse,server=192.168.0.4
+
+
+
+-loglevel [repeat+]loglevel | -v [repeat+]loglevel
+Set the logging level used by the library.
+Adding "repeat+" indicates that repeated log output should not be compressed
+to the first line and the "Last message repeated n times" line will be
+omitted. "repeat" can also be used alone.
+If "repeat" is used alone, and with no prior loglevel set, the default
+loglevel will be used. If multiple loglevel parameters are given, using
+’repeat’ will not change the loglevel.
+loglevel is a string or a number containing one of the following values:
+
+‘quiet, -8 ’
+Show nothing at all; be silent.
+
+‘panic, 0 ’
+Only show fatal errors which could lead the process to crash, such as
+and assert failure. This is not currently used for anything.
+
+‘fatal, 8 ’
+Only show fatal errors. These are errors after which the process absolutely
+cannot continue after.
+
+‘error, 16 ’
+Show all errors, including ones which can be recovered from.
+
+‘warning, 24 ’
+Show all warnings and errors. Any message related to possibly
+incorrect or unexpected events will be shown.
+
+‘info, 32 ’
+Show informative messages during processing. This is in addition to
+warnings and errors. This is the default value.
+
+‘verbose, 40 ’
+Same as info
, except more verbose.
+
+‘debug, 48 ’
+Show everything, including debugging information.
+
+
+
+By default the program logs to stderr, if coloring is supported by the
+terminal, colors are used to mark errors and warnings. Log coloring
+can be disabled setting the environment variable
+AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
+the environment variable AV_LOG_FORCE_COLOR
.
+The use of the environment variable NO_COLOR
is deprecated and
+will be dropped in a following FFmpeg version.
+
+
+-report
+Dump full command line and console output to a file named
+program -YYYYMMDD -HHMMSS .log
in the current
+directory.
+This file can be useful for bug reports.
+It also implies -loglevel verbose
.
+
+Setting the environment variable FFREPORT
to any value has the
+same effect. If the value is a ’:’-separated key=value sequence, these
+options will affect the report; option values must be escaped if they
+contain special characters or the options delimiter ’:’ (see the
+“Quoting and escaping” section in the ffmpeg-utils manual).
+
+The following options are recognized:
+
+file
+set the file name to use for the report; %p
is expanded to the name
+of the program, %t
is expanded to a timestamp, %%
is expanded
+to a plain %
+
+level
+set the log verbosity level using a numerical value (see -loglevel
).
+
+
+
+For example, to output a report to a file named ffreport.log
+using a log level of 32
(alias for log level info
):
+
+
+
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
+
+
+Errors in parsing the environment variable are not fatal, and will not
+appear in the report.
+
+
+-hide_banner
+Suppress printing banner.
+
+All FFmpeg tools will normally show a copyright notice, build options
+and library versions. This option can be used to suppress printing
+this information.
+
+
+-cpuflags flags (global )
+Allows setting and clearing cpu flags. This option is intended
+for testing. Do not use it unless you know what you’re doing.
+
+
ffmpeg -cpuflags -sse+mmx ...
+ffmpeg -cpuflags mmx ...
+ffmpeg -cpuflags 0 ...
+
+Possible flags for this option are:
+
+‘x86 ’
+
+‘mmx ’
+‘mmxext ’
+‘sse ’
+‘sse2 ’
+‘sse2slow ’
+‘sse3 ’
+‘sse3slow ’
+‘ssse3 ’
+‘atom ’
+‘sse4.1 ’
+‘sse4.2 ’
+‘avx ’
+‘xop ’
+‘fma4 ’
+‘3dnow ’
+‘3dnowext ’
+‘cmov ’
+
+
+‘ARM ’
+
+‘armv5te ’
+‘armv6 ’
+‘armv6t2 ’
+‘vfp ’
+‘vfpv3 ’
+‘neon ’
+
+
+‘PowerPC ’
+
+‘altivec ’
+
+
+‘Specific Processors ’
+
+‘pentium2 ’
+‘pentium3 ’
+‘pentium4 ’
+‘k6 ’
+‘k62 ’
+‘athlon ’
+‘athlonxp ’
+‘k8 ’
+
+
+
+
+
+-opencl_bench
+Benchmark all available OpenCL devices and show the results. This option
+is only available when FFmpeg has been compiled with --enable-opencl
.
+
+
+-opencl_options options (global )
+Set OpenCL environment options. This option is only available when
+FFmpeg has been compiled with --enable-opencl
.
+
+options must be a list of key =value option pairs
+separated by ’:’. See the “OpenCL Options” section in the
+ffmpeg-utils manual for the list of supported options.
+
+
+
+
+
3.3 AVOptions# TOC
+
+
These options are provided directly by the libavformat, libavdevice and
+libavcodec libraries. To see the list of available AVOptions, use the
+-help option. They are separated into two categories:
+
+generic
+These options can be set for any container, codec or device. Generic options
+are listed under AVFormatContext options for containers/devices and under
+AVCodecContext options for codecs.
+
+private
+These options are specific to the given container, device or codec. Private
+options are listed under their corresponding containers/devices/codecs.
+
+
+
+
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
+an MP3 file, use the id3v2_version private option of the MP3
+muxer:
+
+
ffmpeg -i input.flac -id3v2_version 3 out.mp3
+
+
+
All codec AVOptions are per-stream, and thus a stream specifier
+should be attached to them.
+
+
Note: the -nooption syntax cannot be used for boolean
+AVOptions, use -option 0 /-option 1 .
+
+
Note: the old undocumented way of specifying per-stream AVOptions by
+prepending v/a/s to the options name is now obsolete and will be
+removed soon.
+
+
+
3.4 Main options# TOC
+
+
+-x width
+Force displayed width.
+
+-y height
+Force displayed height.
+
+-s size
+Set frame size (WxH or abbreviation), needed for videos which do
+not contain a header with the frame size like raw YUV. This option
+has been deprecated in favor of private options, try -video_size.
+
+-fs
+Start in fullscreen mode.
+
+-an
+Disable audio.
+
+-vn
+Disable video.
+
+-sn
+Disable subtitles.
+
+-ss pos
+Seek to a given position in seconds.
+
+-t duration
+play <duration> seconds of audio/video
+
+-bytes
+Seek by bytes.
+
+-nodisp
+Disable graphical display.
+
+-f fmt
+Force format.
+
+-window_title title
+Set window title (default is the input filename).
+
+-loop number
+Loops movie playback <number> times. 0 means forever.
+
+-showmode mode
+Set the show mode to use.
+Available values for mode are:
+
+‘0, video ’
+show video
+
+‘1, waves ’
+show audio waves
+
+‘2, rdft ’
+show audio frequency band using RDFT ((Inverse) Real Discrete Fourier Transform)
+
+
+
+Default value is "video", if video is not present or cannot be played
+"rdft" is automatically selected.
+
+You can interactively cycle through the available show modes by
+pressing the key w .
+
+
+-vf filtergraph
+Create the filtergraph specified by filtergraph and use it to
+filter the video stream.
+
+filtergraph is a description of the filtergraph to apply to
+the stream, and must have a single video input and a single video
+output. In the filtergraph, the input is associated to the label
+in
, and the output to the label out
. See the
+ffmpeg-filters manual for more information about the filtergraph
+syntax.
+
+You can specify this parameter multiple times and cycle through the specified
+filtergraphs along with the show modes by pressing the key w .
+
+
+-af filtergraph
+filtergraph is a description of the filtergraph to apply to
+the input audio.
+Use the option "-filters" to show all the available filters (including
+sources and sinks).
+
+
+-i input_file
+Read input_file .
+
+
+
+
+
3.5 Advanced options# TOC
+
+-pix_fmt format
+Set pixel format.
+This option has been deprecated in favor of private options, try -pixel_format.
+
+
+-stats
+Print several playback statistics, in particular show the stream
+duration, the codec parameters, the current position in the stream and
+the audio/video synchronisation drift. It is on by default, to
+explicitly disable it you need to specify -nostats
.
+
+
+-fast
+Non-spec-compliant optimizations.
+
+-genpts
+Generate pts.
+
+-sync type
+Set the master clock to audio (type=audio
), video
+(type=video
) or external (type=ext
). Default is audio. The
+master clock is used to control audio-video synchronization. Most media
+players use audio as master clock, but in some cases (streaming or high
+quality broadcast) it is necessary to change that. This option is mainly
+used for debugging purposes.
+
+-ast audio_stream_specifier
+Select the desired audio stream using the given stream specifier. The stream
+specifiers are described in the Stream specifiers chapter. If this option
+is not specified, the "best" audio stream is selected in the program of the
+already selected video stream.
+
+-vst video_stream_specifier
+Select the desired video stream using the given stream specifier. The stream
+specifiers are described in the Stream specifiers chapter. If this option
+is not specified, the "best" video stream is selected.
+
+-sst subtitle_stream_specifier
+Select the desired subtitle stream using the given stream specifier. The stream
+specifiers are described in the Stream specifiers chapter. If this option
+is not specified, the "best" subtitle stream is selected in the program of the
+already selected video or audio stream.
+
+-autoexit
+Exit when video is done playing.
+
+-exitonkeydown
+Exit if any key is pressed.
+
+-exitonmousedown
+Exit if any mouse button is pressed.
+
+
+-codec:media_specifier codec_name
+Force a specific decoder implementation for the stream identified by
+media_specifier , which can assume the values a
(audio),
+v
(video), and s
subtitle.
+
+
+-acodec codec_name
+Force a specific audio decoder.
+
+
+-vcodec codec_name
+Force a specific video decoder.
+
+
+-scodec codec_name
+Force a specific subtitle decoder.
+
+
+-autorotate
+Automatically rotate the video according to presentation metadata. Enabled by
+default, use -noautorotate to disable it.
+
+
+-framedrop
+Drop video frames if video is out of sync. Enabled by default if the master
+clock is not set to video. Use this option to enable frame dropping for all
+master clock sources, use -noframedrop to disable it.
+
+
+-infbuf
+Do not limit the input buffer size, read as much data as possible from the
+input as soon as possible. Enabled by default for realtime streams, where data
+may be dropped if not read in time. Use this option to enable infinite buffers
+for all inputs, use -noinfbuf to disable it.
+
+
+
+
+
+
3.6 While playing# TOC
+
+
+q, ESC
+Quit.
+
+
+f
+Toggle full screen.
+
+
+p, SPC
+Pause.
+
+
+a
+Cycle audio channel in the current program.
+
+
+v
+Cycle video channel.
+
+
+t
+Cycle subtitle channel in the current program.
+
+
+c
+Cycle program.
+
+
+w
+Cycle video filters or show modes.
+
+
+s
+Step to the next frame.
+
+Pause if the stream is not already paused, step to the next video
+frame, and pause.
+
+
+left/right
+Seek backward/forward 10 seconds.
+
+
+down/up
+Seek backward/forward 1 minute.
+
+
+page down/page up
+Seek to the previous/next chapter.
+or if there are no chapters
+Seek backward/forward 10 minutes.
+
+
+mouse click
+Seek to percentage in file corresponding to fraction of width.
+
+
+
+
+
+
+
4 Syntax# TOC
+
+
This section documents the syntax and formats employed by the FFmpeg
+libraries and tools.
+
+
+
4.1 Quoting and escaping# TOC
+
+
FFmpeg adopts the following quoting and escaping mechanism, unless
+explicitly specified. The following rules are applied:
+
+
+ '
and \
are special characters (respectively used for
+quoting and escaping). In addition to them, there might be other
+special characters depending on the specific syntax where the escaping
+and quoting are employed.
+
+ A special character is escaped by prefixing it with a ’\’.
+
+ All characters enclosed between ” are included literally in the
+parsed string. The quote character '
itself cannot be quoted,
+so you may need to close the quote and escape it.
+
+ Leading and trailing whitespaces, unless escaped or quoted, are
+removed from the parsed string.
+
+
+
Note that you may need to add a second level of escaping when using
+the command line or a script, which depends on the syntax of the
+adopted shell language.
+
+
The function av_get_token
defined in
+libavutil/avstring.h can be used to parse a token quoted or
+escaped according to the rules defined above.
+
+
The tool tools/ffescape in the FFmpeg source tree can be used
+to automatically quote or escape a string in a script.
+
+
+
4.1.1 Examples# TOC
+
+
+ Escape the string Crime d'Amour
containing the '
special
+character:
+
+
+ The string above contains a quote, so the '
needs to be escaped
+when quoting it:
+
+
+ Include leading or trailing whitespaces using quoting:
+
+
' this string starts and ends with whitespaces '
+
+
+ Escaping and quoting can be mixed together:
+
+
' The string '\'string\'' is a string '
+
+
+ To include a literal \
you can use either escaping or quoting:
+
+
'c:\foo' can be written as c:\\foo
+
+
+
+
+
4.2 Date# TOC
+
+
The accepted syntax is:
+
+
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
+now
+
+
+
If the value is "now" it takes the current time.
+
+
Time is local time unless Z is appended, in which case it is
+interpreted as UTC.
+If the year-month-day part is not specified it takes the current
+year-month-day.
+
+
+
4.3 Time duration# TOC
+
+
There are two accepted syntaxes for expressing time duration.
+
+
+
+
HH expresses the number of hours, MM the number of minutes
+for a maximum of 2 digits, and SS the number of seconds for a
+maximum of 2 digits. The m at the end expresses decimal value for
+SS .
+
+
or
+
+
+
+
S expresses the number of seconds, with the optional decimal part
+m .
+
+
In both expressions, the optional ‘- ’ indicates negative duration.
+
+
+
4.3.1 Examples# TOC
+
+
The following examples are all valid time duration:
+
+
+‘55 ’
+55 seconds
+
+
+‘12:03:45 ’
+12 hours, 03 minutes and 45 seconds
+
+
+‘23.189 ’
+23.189 seconds
+
+
+
+
+
4.4 Video size# TOC
+
Specify the size of the sourced video, it may be a string of the form
+width xheight , or the name of a size abbreviation.
+
+
The following abbreviations are recognized:
+
+‘ntsc ’
+720x480
+
+‘pal ’
+720x576
+
+‘qntsc ’
+352x240
+
+‘qpal ’
+352x288
+
+‘sntsc ’
+640x480
+
+‘spal ’
+768x576
+
+‘film ’
+352x240
+
+‘ntsc-film ’
+352x240
+
+‘sqcif ’
+128x96
+
+‘qcif ’
+176x144
+
+‘cif ’
+352x288
+
+‘4cif ’
+704x576
+
+‘16cif ’
+1408x1152
+
+‘qqvga ’
+160x120
+
+‘qvga ’
+320x240
+
+‘vga ’
+640x480
+
+‘svga ’
+800x600
+
+‘xga ’
+1024x768
+
+‘uxga ’
+1600x1200
+
+‘qxga ’
+2048x1536
+
+‘sxga ’
+1280x1024
+
+‘qsxga ’
+2560x2048
+
+‘hsxga ’
+5120x4096
+
+‘wvga ’
+852x480
+
+‘wxga ’
+1366x768
+
+‘wsxga ’
+1600x1024
+
+‘wuxga ’
+1920x1200
+
+‘woxga ’
+2560x1600
+
+‘wqsxga ’
+3200x2048
+
+‘wquxga ’
+3840x2400
+
+‘whsxga ’
+6400x4096
+
+‘whuxga ’
+7680x4800
+
+‘cga ’
+320x200
+
+‘ega ’
+640x350
+
+‘hd480 ’
+852x480
+
+‘hd720 ’
+1280x720
+
+‘hd1080 ’
+1920x1080
+
+‘2k ’
+2048x1080
+
+‘2kflat ’
+1998x1080
+
+‘2kscope ’
+2048x858
+
+‘4k ’
+4096x2160
+
+‘4kflat ’
+3996x2160
+
+‘4kscope ’
+4096x1716
+
+‘nhd ’
+640x360
+
+‘hqvga ’
+240x160
+
+‘wqvga ’
+400x240
+
+‘fwqvga ’
+432x240
+
+‘hvga ’
+480x320
+
+‘qhd ’
+960x540
+
+
+
+
+
4.5 Video rate# TOC
+
+
Specify the frame rate of a video, expressed as the number of frames
+generated per second. It has to be a string in the format
+frame_rate_num /frame_rate_den , an integer number, a float
+number or a valid video frame rate abbreviation.
+
+
The following abbreviations are recognized:
+
+‘ntsc ’
+30000/1001
+
+‘pal ’
+25/1
+
+‘qntsc ’
+30000/1001
+
+‘qpal ’
+25/1
+
+‘sntsc ’
+30000/1001
+
+‘spal ’
+25/1
+
+‘film ’
+24/1
+
+‘ntsc-film ’
+24000/1001
+
+
+
+
+
4.6 Ratio# TOC
+
+
A ratio can be expressed as an expression, or in the form
+numerator :denominator .
+
+
Note that a ratio with infinite (1/0) or negative value is
+considered valid, so you should check on the returned value if you
+want to exclude those values.
+
+
The undefined value can be expressed using the "0:0" string.
+
+
+
4.7 Color# TOC
+
+
It can be the name of a color as defined below (case insensitive match) or a
+[0x|#]RRGGBB[AA]
sequence, possibly followed by @ and a string
+representing the alpha component.
+
+
The alpha component may be a string composed by "0x" followed by an
+hexadecimal number or a decimal number between 0.0 and 1.0, which
+represents the opacity value (‘0x00 ’ or ‘0.0 ’ means completely
+transparent, ‘0xff ’ or ‘1.0 ’ completely opaque). If the alpha
+component is not specified then ‘0xff ’ is assumed.
+
+
The string ‘random ’ will result in a random color.
+
+
The following names of colors are recognized:
+
+‘AliceBlue ’
+0xF0F8FF
+
+‘AntiqueWhite ’
+0xFAEBD7
+
+‘Aqua ’
+0x00FFFF
+
+‘Aquamarine ’
+0x7FFFD4
+
+‘Azure ’
+0xF0FFFF
+
+‘Beige ’
+0xF5F5DC
+
+‘Bisque ’
+0xFFE4C4
+
+‘Black ’
+0x000000
+
+‘BlanchedAlmond ’
+0xFFEBCD
+
+‘Blue ’
+0x0000FF
+
+‘BlueViolet ’
+0x8A2BE2
+
+‘Brown ’
+0xA52A2A
+
+‘BurlyWood ’
+0xDEB887
+
+‘CadetBlue ’
+0x5F9EA0
+
+‘Chartreuse ’
+0x7FFF00
+
+‘Chocolate ’
+0xD2691E
+
+‘Coral ’
+0xFF7F50
+
+‘CornflowerBlue ’
+0x6495ED
+
+‘Cornsilk ’
+0xFFF8DC
+
+‘Crimson ’
+0xDC143C
+
+‘Cyan ’
+0x00FFFF
+
+‘DarkBlue ’
+0x00008B
+
+‘DarkCyan ’
+0x008B8B
+
+‘DarkGoldenRod ’
+0xB8860B
+
+‘DarkGray ’
+0xA9A9A9
+
+‘DarkGreen ’
+0x006400
+
+‘DarkKhaki ’
+0xBDB76B
+
+‘DarkMagenta ’
+0x8B008B
+
+‘DarkOliveGreen ’
+0x556B2F
+
+‘Darkorange ’
+0xFF8C00
+
+‘DarkOrchid ’
+0x9932CC
+
+‘DarkRed ’
+0x8B0000
+
+‘DarkSalmon ’
+0xE9967A
+
+‘DarkSeaGreen ’
+0x8FBC8F
+
+‘DarkSlateBlue ’
+0x483D8B
+
+‘DarkSlateGray ’
+0x2F4F4F
+
+‘DarkTurquoise ’
+0x00CED1
+
+‘DarkViolet ’
+0x9400D3
+
+‘DeepPink ’
+0xFF1493
+
+‘DeepSkyBlue ’
+0x00BFFF
+
+‘DimGray ’
+0x696969
+
+‘DodgerBlue ’
+0x1E90FF
+
+‘FireBrick ’
+0xB22222
+
+‘FloralWhite ’
+0xFFFAF0
+
+‘ForestGreen ’
+0x228B22
+
+‘Fuchsia ’
+0xFF00FF
+
+‘Gainsboro ’
+0xDCDCDC
+
+‘GhostWhite ’
+0xF8F8FF
+
+‘Gold ’
+0xFFD700
+
+‘GoldenRod ’
+0xDAA520
+
+‘Gray ’
+0x808080
+
+‘Green ’
+0x008000
+
+‘GreenYellow ’
+0xADFF2F
+
+‘HoneyDew ’
+0xF0FFF0
+
+‘HotPink ’
+0xFF69B4
+
+‘IndianRed ’
+0xCD5C5C
+
+‘Indigo ’
+0x4B0082
+
+‘Ivory ’
+0xFFFFF0
+
+‘Khaki ’
+0xF0E68C
+
+‘Lavender ’
+0xE6E6FA
+
+‘LavenderBlush ’
+0xFFF0F5
+
+‘LawnGreen ’
+0x7CFC00
+
+‘LemonChiffon ’
+0xFFFACD
+
+‘LightBlue ’
+0xADD8E6
+
+‘LightCoral ’
+0xF08080
+
+‘LightCyan ’
+0xE0FFFF
+
+‘LightGoldenRodYellow ’
+0xFAFAD2
+
+‘LightGreen ’
+0x90EE90
+
+‘LightGrey ’
+0xD3D3D3
+
+‘LightPink ’
+0xFFB6C1
+
+‘LightSalmon ’
+0xFFA07A
+
+‘LightSeaGreen ’
+0x20B2AA
+
+‘LightSkyBlue ’
+0x87CEFA
+
+‘LightSlateGray ’
+0x778899
+
+‘LightSteelBlue ’
+0xB0C4DE
+
+‘LightYellow ’
+0xFFFFE0
+
+‘Lime ’
+0x00FF00
+
+‘LimeGreen ’
+0x32CD32
+
+‘Linen ’
+0xFAF0E6
+
+‘Magenta ’
+0xFF00FF
+
+‘Maroon ’
+0x800000
+
+‘MediumAquaMarine ’
+0x66CDAA
+
+‘MediumBlue ’
+0x0000CD
+
+‘MediumOrchid ’
+0xBA55D3
+
+‘MediumPurple ’
+0x9370D8
+
+‘MediumSeaGreen ’
+0x3CB371
+
+‘MediumSlateBlue ’
+0x7B68EE
+
+‘MediumSpringGreen ’
+0x00FA9A
+
+‘MediumTurquoise ’
+0x48D1CC
+
+‘MediumVioletRed ’
+0xC71585
+
+‘MidnightBlue ’
+0x191970
+
+‘MintCream ’
+0xF5FFFA
+
+‘MistyRose ’
+0xFFE4E1
+
+‘Moccasin ’
+0xFFE4B5
+
+‘NavajoWhite ’
+0xFFDEAD
+
+‘Navy ’
+0x000080
+
+‘OldLace ’
+0xFDF5E6
+
+‘Olive ’
+0x808000
+
+‘OliveDrab ’
+0x6B8E23
+
+‘Orange ’
+0xFFA500
+
+‘OrangeRed ’
+0xFF4500
+
+‘Orchid ’
+0xDA70D6
+
+‘PaleGoldenRod ’
+0xEEE8AA
+
+‘PaleGreen ’
+0x98FB98
+
+‘PaleTurquoise ’
+0xAFEEEE
+
+‘PaleVioletRed ’
+0xD87093
+
+‘PapayaWhip ’
+0xFFEFD5
+
+‘PeachPuff ’
+0xFFDAB9
+
+‘Peru ’
+0xCD853F
+
+‘Pink ’
+0xFFC0CB
+
+‘Plum ’
+0xDDA0DD
+
+‘PowderBlue ’
+0xB0E0E6
+
+‘Purple ’
+0x800080
+
+‘Red ’
+0xFF0000
+
+‘RosyBrown ’
+0xBC8F8F
+
+‘RoyalBlue ’
+0x4169E1
+
+‘SaddleBrown ’
+0x8B4513
+
+‘Salmon ’
+0xFA8072
+
+‘SandyBrown ’
+0xF4A460
+
+‘SeaGreen ’
+0x2E8B57
+
+‘SeaShell ’
+0xFFF5EE
+
+‘Sienna ’
+0xA0522D
+
+‘Silver ’
+0xC0C0C0
+
+‘SkyBlue ’
+0x87CEEB
+
+‘SlateBlue ’
+0x6A5ACD
+
+‘SlateGray ’
+0x708090
+
+‘Snow ’
+0xFFFAFA
+
+‘SpringGreen ’
+0x00FF7F
+
+‘SteelBlue ’
+0x4682B4
+
+‘Tan ’
+0xD2B48C
+
+‘Teal ’
+0x008080
+
+‘Thistle ’
+0xD8BFD8
+
+‘Tomato ’
+0xFF6347
+
+‘Turquoise ’
+0x40E0D0
+
+‘Violet ’
+0xEE82EE
+
+‘Wheat ’
+0xF5DEB3
+
+‘White ’
+0xFFFFFF
+
+‘WhiteSmoke ’
+0xF5F5F5
+
+‘Yellow ’
+0xFFFF00
+
+‘YellowGreen ’
+0x9ACD32
+
+
+
+
+
4.8 Channel Layout# TOC
+
+
A channel layout specifies the spatial disposition of the channels in
+a multi-channel audio stream. To specify a channel layout, FFmpeg
+makes use of a special syntax.
+
+
Individual channels are identified by an id, as given by the table
+below:
+
+‘FL ’
+front left
+
+‘FR ’
+front right
+
+‘FC ’
+front center
+
+‘LFE ’
+low frequency
+
+‘BL ’
+back left
+
+‘BR ’
+back right
+
+‘FLC ’
+front left-of-center
+
+‘FRC ’
+front right-of-center
+
+‘BC ’
+back center
+
+‘SL ’
+side left
+
+‘SR ’
+side right
+
+‘TC ’
+top center
+
+‘TFL ’
+top front left
+
+‘TFC ’
+top front center
+
+‘TFR ’
+top front right
+
+‘TBL ’
+top back left
+
+‘TBC ’
+top back center
+
+‘TBR ’
+top back right
+
+‘DL ’
+downmix left
+
+‘DR ’
+downmix right
+
+‘WL ’
+wide left
+
+‘WR ’
+wide right
+
+‘SDL ’
+surround direct left
+
+‘SDR ’
+surround direct right
+
+‘LFE2 ’
+low frequency 2
+
+
+
+
Standard channel layout compositions can be specified by using the
+following identifiers:
+
+‘mono ’
+FC
+
+‘stereo ’
+FL+FR
+
+‘2.1 ’
+FL+FR+LFE
+
+‘3.0 ’
+FL+FR+FC
+
+‘3.0(back) ’
+FL+FR+BC
+
+‘4.0 ’
+FL+FR+FC+BC
+
+‘quad ’
+FL+FR+BL+BR
+
+‘quad(side) ’
+FL+FR+SL+SR
+
+‘3.1 ’
+FL+FR+FC+LFE
+
+‘5.0 ’
+FL+FR+FC+BL+BR
+
+‘5.0(side) ’
+FL+FR+FC+SL+SR
+
+‘4.1 ’
+FL+FR+FC+LFE+BC
+
+‘5.1 ’
+FL+FR+FC+LFE+BL+BR
+
+‘5.1(side) ’
+FL+FR+FC+LFE+SL+SR
+
+‘6.0 ’
+FL+FR+FC+BC+SL+SR
+
+‘6.0(front) ’
+FL+FR+FLC+FRC+SL+SR
+
+‘hexagonal ’
+FL+FR+FC+BL+BR+BC
+
+‘6.1 ’
+FL+FR+FC+LFE+BC+SL+SR
+
+‘6.1 ’
+FL+FR+FC+LFE+BL+BR+BC
+
+‘6.1(front) ’
+FL+FR+LFE+FLC+FRC+SL+SR
+
+‘7.0 ’
+FL+FR+FC+BL+BR+SL+SR
+
+‘7.0(front) ’
+FL+FR+FC+FLC+FRC+SL+SR
+
+‘7.1 ’
+FL+FR+FC+LFE+BL+BR+SL+SR
+
+‘7.1(wide) ’
+FL+FR+FC+LFE+BL+BR+FLC+FRC
+
+‘7.1(wide-side) ’
+FL+FR+FC+LFE+FLC+FRC+SL+SR
+
+‘octagonal ’
+FL+FR+FC+BL+BR+BC+SL+SR
+
+‘downmix ’
+DL+DR
+
+
+
+
A custom channel layout can be specified as a sequence of terms, separated by
+’+’ or ’|’. Each term can be:
+
+ the name of a standard channel layout (e.g. ‘mono ’,
+‘stereo ’, ‘4.0 ’, ‘quad ’, ‘5.0 ’, etc.)
+
+ the name of a single channel (e.g. ‘FL ’, ‘FR ’, ‘FC ’, ‘LFE ’, etc.)
+
+ a number of channels, in decimal, optionally followed by ’c’, yielding
+the default channel layout for that number of channels (see the
+function av_get_default_channel_layout
)
+
+ a channel layout mask, in hexadecimal starting with "0x" (see the
+AV_CH_*
macros in libavutil/channel_layout.h .
+
+
+
Starting from libavutil version 53 the trailing character "c" to
+specify a number of channels will be required, while a channel layout
+mask could also be specified as a decimal number (if and only if not
+followed by "c").
+
+
See also the function av_get_channel_layout
defined in
+libavutil/channel_layout.h .
+
+
+
5 Expression Evaluation# TOC
+
+
When evaluating an arithmetic expression, FFmpeg uses an internal
+formula evaluator, implemented through the libavutil/eval.h
+interface.
+
+
An expression may contain unary, binary operators, constants, and
+functions.
+
+
Two expressions expr1 and expr2 can be combined to form
+another expression "expr1 ;expr2 ".
+expr1 and expr2 are evaluated in turn, and the new
+expression evaluates to the value of expr2 .
+
+
The following binary operators are available: +
, -
,
+*
, /
, ^
.
+
+
The following unary operators are available: +
, -
.
+
+
The following functions are available:
+
+abs(x)
+Compute absolute value of x .
+
+
+acos(x)
+Compute arccosine of x .
+
+
+asin(x)
+Compute arcsine of x .
+
+
+atan(x)
+Compute arctangent of x .
+
+
+between(x, min, max)
+Return 1 if x is greater than or equal to min and lesser than or
+equal to max , 0 otherwise.
+
+
+bitand(x, y)
+bitor(x, y)
+Compute bitwise and/or operation on x and y .
+
+The results of the evaluation of x and y are converted to
+integers before executing the bitwise operation.
+
+Note that both the conversion to integer and the conversion back to
+floating point can lose precision. Beware of unexpected results for
+large numbers (usually 2^53 and larger).
+
+
+ceil(expr)
+Round the value of expression expr upwards to the nearest
+integer. For example, "ceil(1.5)" is "2.0".
+
+
+clip(x, min, max)
+Return the value of x clipped between min and max .
+
+
+cos(x)
+Compute cosine of x .
+
+
+cosh(x)
+Compute hyperbolic cosine of x .
+
+
+eq(x, y)
+Return 1 if x and y are equivalent, 0 otherwise.
+
+
+exp(x)
+Compute exponential of x (with base e
, the Euler’s number).
+
+
+floor(expr)
+Round the value of expression expr downwards to the nearest
+integer. For example, "floor(-1.5)" is "-2.0".
+
+
+gauss(x)
+Compute Gauss function of x , corresponding to
+exp(-x*x/2) / sqrt(2*PI)
.
+
+
+gcd(x, y)
+Return the greatest common divisor of x and y . If both x and
+y are 0 or either or both are less than zero then behavior is undefined.
+
+
+gt(x, y)
+Return 1 if x is greater than y , 0 otherwise.
+
+
+gte(x, y)
+Return 1 if x is greater than or equal to y , 0 otherwise.
+
+
+hypot(x, y)
+This function is similar to the C function with the same name; it returns
+"sqrt(x *x + y *y )", the length of the hypotenuse of a
+right triangle with sides of length x and y , or the distance of the
+point (x , y ) from the origin.
+
+
+if(x, y)
+Evaluate x , and if the result is non-zero return the result of
+the evaluation of y , return 0 otherwise.
+
+
+if(x, y, z)
+Evaluate x , and if the result is non-zero return the evaluation
+result of y , otherwise the evaluation result of z .
+
+
+ifnot(x, y)
+Evaluate x , and if the result is zero return the result of the
+evaluation of y , return 0 otherwise.
+
+
+ifnot(x, y, z)
+Evaluate x , and if the result is zero return the evaluation
+result of y , otherwise the evaluation result of z .
+
+
+isinf(x)
+Return 1.0 if x is +/-INFINITY, 0.0 otherwise.
+
+
+isnan(x)
+Return 1.0 if x is NAN, 0.0 otherwise.
+
+
+ld(var)
+Allow to load the value of the internal variable with number
+var , which was previously stored with st(var , expr ).
+The function returns the loaded value.
+
+
+log(x)
+Compute natural logarithm of x .
+
+
+lt(x, y)
+Return 1 if x is lesser than y , 0 otherwise.
+
+
+lte(x, y)
+Return 1 if x is lesser than or equal to y , 0 otherwise.
+
+
+max(x, y)
+Return the maximum between x and y .
+
+
+min(x, y)
+Return the maximum between x and y .
+
+
+mod(x, y)
+Compute the remainder of division of x by y .
+
+
+not(expr)
+Return 1.0 if expr is zero, 0.0 otherwise.
+
+
+pow(x, y)
+Compute the power of x elevated y , it is equivalent to
+"(x )^(y )".
+
+
+print(t)
+print(t, l)
+Print the value of expression t with loglevel l . If
+l is not specified then a default log level is used.
+Returns the value of the expression printed.
+
+Prints t with loglevel l
+
+
+random(x)
+Return a pseudo random value between 0.0 and 1.0. x is the index of the
+internal variable which will be used to save the seed/state.
+
+
+root(expr, max)
+Find an input value for which the function represented by expr
+with argument ld(0) is 0 in the interval 0..max .
+
+The expression in expr must denote a continuous function or the
+result is undefined.
+
+ld(0) is used to represent the function input value, which means
+that the given expression will be evaluated multiple times with
+various input values that the expression can access through
+ld(0)
. When the expression evaluates to 0 then the
+corresponding input value will be returned.
+
+
+sin(x)
+Compute sine of x .
+
+
+sinh(x)
+Compute hyperbolic sine of x .
+
+
+sqrt(expr)
+Compute the square root of expr . This is equivalent to
+"(expr )^.5".
+
+
+squish(x)
+Compute expression 1/(1 + exp(4*x))
.
+
+
+st(var, expr)
+Allow to store the value of the expression expr in an internal
+variable. var specifies the number of the variable where to
+store the value, and it is a value ranging from 0 to 9. The function
+returns the value stored in the internal variable.
+Note, Variables are currently not shared between expressions.
+
+
+tan(x)
+Compute tangent of x .
+
+
+tanh(x)
+Compute hyperbolic tangent of x .
+
+
+taylor(expr, x)
+taylor(expr, x, id)
+Evaluate a Taylor series at x , given an expression representing
+the ld(id)
-th derivative of a function at 0.
+
+When the series does not converge the result is undefined.
+
+ld(id) is used to represent the derivative order in expr ,
+which means that the given expression will be evaluated multiple times
+with various input values that the expression can access through
+ld(id)
. If id is not specified then 0 is assumed.
+
+Note, when you have the derivatives at y instead of 0,
+taylor(expr, x-y)
can be used.
+
+
+time(0)
+Return the current (wallclock) time in seconds.
+
+
+trunc(expr)
+Round the value of expression expr towards zero to the nearest
+integer. For example, "trunc(-1.5)" is "-1.0".
+
+
+while(cond, expr)
+Evaluate expression expr while the expression cond is
+non-zero, and returns the value of the last expr evaluation, or
+NAN if cond was always false.
+
+
+
+
The following constants are available:
+
+PI
+area of the unit disc, approximately 3.14
+
+E
+exp(1) (Euler’s number), approximately 2.718
+
+PHI
+golden ratio (1+sqrt(5))/2, approximately 1.618
+
+
+
+
Assuming that an expression is considered "true" if it has a non-zero
+value, note that:
+
+
*
works like AND
+
+
+
works like OR
+
+
For example the construct:
+
+
is equivalent to:
+
+
+
In your C code, you can extend the list of unary and binary functions,
+and define recognized constants, so that they are available for your
+expressions.
+
+
The evaluator also recognizes the International System unit prefixes.
+If ’i’ is appended after the prefix, binary prefixes are used, which
+are based on powers of 1024 instead of powers of 1000.
+The ’B’ postfix multiplies the value by 8, and can be appended after a
+unit prefix or used alone. This allows using for example ’KB’, ’MiB’,
+’G’ and ’B’ as number postfix.
+
+
The list of available International System prefixes follows, with
+indication of the corresponding powers of 10 and of 2.
+
+y
+10^-24 / 2^-80
+
+z
+10^-21 / 2^-70
+
+a
+10^-18 / 2^-60
+
+f
+10^-15 / 2^-50
+
+p
+10^-12 / 2^-40
+
+n
+10^-9 / 2^-30
+
+u
+10^-6 / 2^-20
+
+m
+10^-3 / 2^-10
+
+c
+10^-2
+
+d
+10^-1
+
+h
+10^2
+
+k
+10^3 / 2^10
+
+K
+10^3 / 2^10
+
+M
+10^6 / 2^20
+
+G
+10^9 / 2^30
+
+T
+10^12 / 2^40
+
+P
+10^15 / 2^40
+
+E
+10^18 / 2^50
+
+Z
+10^21 / 2^60
+
+Y
+10^24 / 2^70
+
+
+
+
+
+
6 OpenCL Options# TOC
+
+
When FFmpeg is configured with --enable-opencl
, it is possible
+to set the options for the global OpenCL context.
+
+
The list of supported options follows:
+
+
+build_options
+Set build options used to compile the registered kernels.
+
+See reference "OpenCL Specification Version: 1.2 chapter 5.6.4".
+
+
+platform_idx
+Select the index of the platform to run OpenCL code.
+
+The specified index must be one of the indexes in the device list
+which can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
+
+
+device_idx
+Select the index of the device used to run OpenCL code.
+
+The specified index must be one of the indexes in the device list which
+can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
+
+
+
+
+
+
7 Codec Options# TOC
+
+
libavcodec provides some generic global options, which can be set on
+all the encoders and decoders. In addition each codec may support
+so-called private options, which are specific for a given codec.
+
+
Sometimes, a global option may only affect a specific kind of codec,
+and may be nonsensical or ignored by another, so you need to be aware
+of the meaning of the specified options. Also some options are
+meant only for decoding or encoding.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, or by setting the value explicitly in the
+AVCodecContext
options or using the libavutil/opt.h API
+for programmatic use.
+
+
The list of supported options follow:
+
+
+b integer (encoding,audio,video )
+Set bitrate in bits/s. Default value is 200K.
+
+
+ab integer (encoding,audio )
+Set audio bitrate (in bits/s). Default value is 128K.
+
+
+bt integer (encoding,video )
+Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate
+tolerance specifies how far ratecontrol is willing to deviate from the
+target average bitrate value. This is not related to min/max
+bitrate. Lowering tolerance too much has an adverse effect on quality.
+
+
+flags flags (decoding/encoding,audio,video,subtitles )
+Set generic flags.
+
+Possible values:
+
+‘mv4 ’
+Use four motion vector by macroblock (mpeg4).
+
+‘qpel ’
+Use 1/4 pel motion compensation.
+
+‘loop ’
+Use loop filter.
+
+‘qscale ’
+Use fixed qscale.
+
+‘gmc ’
+Use gmc.
+
+‘mv0 ’
+Always try a mb with mv=<0,0>.
+
+‘input_preserved ’
+‘pass1 ’
+Use internal 2pass ratecontrol in first pass mode.
+
+‘pass2 ’
+Use internal 2pass ratecontrol in second pass mode.
+
+‘gray ’
+Only decode/encode grayscale.
+
+‘emu_edge ’
+Do not draw edges.
+
+‘psnr ’
+Set error[?] variables during encoding.
+
+‘truncated ’
+‘naq ’
+Normalize adaptive quantization.
+
+‘ildct ’
+Use interlaced DCT.
+
+‘low_delay ’
+Force low delay.
+
+‘global_header ’
+Place global headers in extradata instead of every keyframe.
+
+‘bitexact ’
+Only write platform-, build- and time-independent data. (except (I)DCT).
+This ensures that file and data checksums are reproducible and match between
+platforms. Its primary use is for regression testing.
+
+‘aic ’
+Apply H263 advanced intra coding / mpeg4 ac prediction.
+
+‘cbp ’
+Deprecated, use mpegvideo private options instead.
+
+‘qprd ’
+Deprecated, use mpegvideo private options instead.
+
+‘ilme ’
+Apply interlaced motion estimation.
+
+‘cgop ’
+Use closed gop.
+
+
+
+
+me_method integer (encoding,video )
+Set motion estimation method.
+
+Possible values:
+
+‘zero ’
+zero motion estimation (fastest)
+
+‘full ’
+full motion estimation (slowest)
+
+‘epzs ’
+EPZS motion estimation (default)
+
+‘esa ’
+esa motion estimation (alias for full)
+
+‘tesa ’
+tesa motion estimation
+
+‘dia ’
+dia motion estimation (alias for epzs)
+
+‘log ’
+log motion estimation
+
+‘phods ’
+phods motion estimation
+
+‘x1 ’
+X1 motion estimation
+
+‘hex ’
+hex motion estimation
+
+‘umh ’
+umh motion estimation
+
+‘iter ’
+iter motion estimation
+
+
+
+
+extradata_size integer
+Set extradata size.
+
+
+time_base rational number
+Set codec time base.
+
+It is the fundamental unit of time (in seconds) in terms of which
+frame timestamps are represented. For fixed-fps content, timebase
+should be 1 / frame_rate
and timestamp increments should be
+identically 1.
+
+
+g integer (encoding,video )
+Set the group of picture size. Default value is 12.
+
+
+ar integer (decoding/encoding,audio )
+Set audio sampling rate (in Hz).
+
+
+ac integer (decoding/encoding,audio )
+Set number of audio channels.
+
+
+cutoff integer (encoding,audio )
+Set cutoff bandwidth.
+
+
+frame_size integer (encoding,audio )
+Set audio frame size.
+
+Each submitted frame except the last must contain exactly frame_size
+samples per channel. May be 0 when the codec has
+CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not
+restricted. It is set by some decoders to indicate constant frame
+size.
+
+
+frame_number integer
+Set the frame number.
+
+
+delay integer
+qcomp float (encoding,video )
+Set video quantizer scale compression (VBR). It is used as a constant
+in the ratecontrol equation. Recommended range for default rc_eq:
+0.0-1.0.
+
+
+qblur float (encoding,video )
+Set video quantizer scale blur (VBR).
+
+
+qmin integer (encoding,video )
+Set min video quantizer scale (VBR). Must be included between -1 and
+69, default value is 2.
+
+
+qmax integer (encoding,video )
+Set max video quantizer scale (VBR). Must be included between -1 and
+1024, default value is 31.
+
+
+qdiff integer (encoding,video )
+Set max difference between the quantizer scale (VBR).
+
+
+bf integer (encoding,video )
+Set max number of B frames between non-B-frames.
+
+Must be an integer between -1 and 16. 0 means that B-frames are
+disabled. If a value of -1 is used, it will choose an automatic value
+depending on the encoder.
+
+Default value is 0.
+
+
+b_qfactor float (encoding,video )
+Set qp factor between P and B frames.
+
+
+rc_strategy integer (encoding,video )
+Set ratecontrol method.
+
+
+b_strategy integer (encoding,video )
+Set strategy to choose between I/P/B-frames.
+
+
+ps integer (encoding,video )
+Set RTP payload size in bytes.
+
+
+mv_bits integer
+header_bits integer
+i_tex_bits integer
+p_tex_bits integer
+i_count integer
+p_count integer
+skip_count integer
+misc_bits integer
+frame_bits integer
+codec_tag integer
+bug flags (decoding,video )
+Workaround not auto detected encoder bugs.
+
+Possible values:
+
+‘autodetect ’
+‘old_msmpeg4 ’
+some old lavc generated msmpeg4v3 files (no autodetection)
+
+‘xvid_ilace ’
+Xvid interlacing bug (autodetected if fourcc==XVIX)
+
+‘ump4 ’
+(autodetected if fourcc==UMP4)
+
+‘no_padding ’
+padding bug (autodetected)
+
+‘amv ’
+‘ac_vlc ’
+illegal vlc bug (autodetected per fourcc)
+
+‘qpel_chroma ’
+‘std_qpel ’
+old standard qpel (autodetected per fourcc/version)
+
+‘qpel_chroma2 ’
+‘direct_blocksize ’
+direct-qpel-blocksize bug (autodetected per fourcc/version)
+
+‘edge ’
+edge padding bug (autodetected per fourcc/version)
+
+‘hpel_chroma ’
+‘dc_clip ’
+‘ms ’
+Workaround various bugs in microsoft broken decoders.
+
+‘trunc ’
+trancated frames
+
+
+
+
+lelim integer (encoding,video )
+Set single coefficient elimination threshold for luminance (negative
+values also consider DC coefficient).
+
+
+celim integer (encoding,video )
+Set single coefficient elimination threshold for chrominance (negative
+values also consider dc coefficient)
+
+
+strict integer (decoding/encoding,audio,video )
+Specify how strictly to follow the standards.
+
+Possible values:
+
+‘very ’
+strictly conform to a older more strict version of the spec or reference software
+
+‘strict ’
+strictly conform to all the things in the spec no matter what consequences
+
+‘normal ’
+‘unofficial ’
+allow unofficial extensions
+
+‘experimental ’
+allow non standardized experimental things, experimental
+(unfinished/work in progress/not well tested) decoders and encoders.
+Note: experimental decoders can pose a security risk, do not use this for
+decoding untrusted input.
+
+
+
+
+b_qoffset float (encoding,video )
+Set QP offset between P and B frames.
+
+
+err_detect flags (decoding,audio,video )
+Set error detection flags.
+
+Possible values:
+
+‘crccheck ’
+verify embedded CRCs
+
+‘bitstream ’
+detect bitstream specification deviations
+
+‘buffer ’
+detect improper bitstream length
+
+‘explode ’
+abort decoding on minor error detection
+
+‘ignore_err ’
+ignore decoding errors, and continue decoding.
+This is useful if you want to analyze the content of a video and thus want
+everything to be decoded no matter what. This option will not result in a video
+that is pleasing to watch in case of errors.
+
+‘careful ’
+consider things that violate the spec and have not been seen in the wild as errors
+
+‘compliant ’
+consider all spec non compliancies as errors
+
+‘aggressive ’
+consider things that a sane encoder should not do as an error
+
+
+
+
+has_b_frames integer
+block_align integer
+mpeg_quant integer (encoding,video )
+Use MPEG quantizers instead of H.263.
+
+
+qsquish float (encoding,video )
+How to keep quantizer between qmin and qmax (0 = clip, 1 = use
+differentiable function).
+
+
+rc_qmod_amp float (encoding,video )
+Set experimental quantizer modulation.
+
+
+rc_qmod_freq integer (encoding,video )
+Set experimental quantizer modulation.
+
+
+rc_override_count integer
+rc_eq string (encoding,video )
+Set rate control equation. When computing the expression, besides the
+standard functions defined in the section ’Expression Evaluation’, the
+following functions are available: bits2qp(bits), qp2bits(qp). Also
+the following constants are available: iTex pTex tex mv fCode iCount
+mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex
+avgTex.
+
+
+maxrate integer (encoding,audio,video )
+Set max bitrate tolerance (in bits/s). Requires bufsize to be set.
+
+
+minrate integer (encoding,audio,video )
+Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR
+encode. It is of little use elsewise.
+
+
+bufsize integer (encoding,audio,video )
+Set ratecontrol buffer size (in bits).
+
+
+rc_buf_aggressivity float (encoding,video )
+Currently useless.
+
+
+i_qfactor float (encoding,video )
+Set QP factor between P and I frames.
+
+
+i_qoffset float (encoding,video )
+Set QP offset between P and I frames.
+
+
+rc_init_cplx float (encoding,video )
+Set initial complexity for 1-pass encoding.
+
+
+dct integer (encoding,video )
+Set DCT algorithm.
+
+Possible values:
+
+‘auto ’
+autoselect a good one (default)
+
+‘fastint ’
+fast integer
+
+‘int ’
+accurate integer
+
+‘mmx ’
+‘altivec ’
+‘faan ’
+floating point AAN DCT
+
+
+
+
+lumi_mask float (encoding,video )
+Compress bright areas stronger than medium ones.
+
+
+tcplx_mask float (encoding,video )
+Set temporal complexity masking.
+
+
+scplx_mask float (encoding,video )
+Set spatial complexity masking.
+
+
+p_mask float (encoding,video )
+Set inter masking.
+
+
+dark_mask float (encoding,video )
+Compress dark areas stronger than medium ones.
+
+
+idct integer (decoding/encoding,video )
+Select IDCT implementation.
+
+Possible values:
+
+‘auto ’
+‘int ’
+‘simple ’
+‘simplemmx ’
+‘simpleauto ’
+Automatically pick a IDCT compatible with the simple one
+
+
+‘arm ’
+‘altivec ’
+‘sh4 ’
+‘simplearm ’
+‘simplearmv5te ’
+‘simplearmv6 ’
+‘simpleneon ’
+‘simplealpha ’
+‘ipp ’
+‘xvidmmx ’
+‘faani ’
+floating point AAN IDCT
+
+
+
+
+slice_count integer
+ec flags (decoding,video )
+Set error concealment strategy.
+
+Possible values:
+
+‘guess_mvs ’
+iterative motion vector (MV) search (slow)
+
+‘deblock ’
+use strong deblock filter for damaged MBs
+
+‘favor_inter ’
+favor predicting from the previous frame instead of the current
+
+
+
+
+bits_per_coded_sample integer
+pred integer (encoding,video )
+Set prediction method.
+
+Possible values:
+
+‘left ’
+‘plane ’
+‘median ’
+
+
+
+aspect rational number (encoding,video )
+Set sample aspect ratio.
+
+
+debug flags (decoding/encoding,audio,video,subtitles )
+Print specific debug info.
+
+Possible values:
+
+‘pict ’
+picture info
+
+‘rc ’
+rate control
+
+‘bitstream ’
+‘mb_type ’
+macroblock (MB) type
+
+‘qp ’
+per-block quantization parameter (QP)
+
+‘mv ’
+motion vector
+
+‘dct_coeff ’
+‘skip ’
+‘startcode ’
+‘pts ’
+‘er ’
+error recognition
+
+‘mmco ’
+memory management control operations (H.264)
+
+‘bugs ’
+‘vis_qp ’
+visualize quantization parameter (QP), lower QP are tinted greener
+
+‘vis_mb_type ’
+visualize block types
+
+‘buffers ’
+picture buffer allocations
+
+‘thread_ops ’
+threading operations
+
+‘nomc ’
+skip motion compensation
+
+
+
+
+vismv integer (decoding,video )
+Visualize motion vectors (MVs).
+
+This option is deprecated, see the codecview filter instead.
+
+Possible values:
+
+‘pf ’
+forward predicted MVs of P-frames
+
+‘bf ’
+forward predicted MVs of B-frames
+
+‘bb ’
+backward predicted MVs of B-frames
+
+
+
+
+cmp integer (encoding,video )
+Set full pel me compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+subcmp integer (encoding,video )
+Set sub pel me compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+mbcmp integer (encoding,video )
+Set macroblock compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+ildctcmp integer (encoding,video )
+Set interlaced dct compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+dia_size integer (encoding,video )
+Set diamond type & size for motion estimation.
+
+
+last_pred integer (encoding,video )
+Set amount of motion predictors from the previous frame.
+
+
+preme integer (encoding,video )
+Set pre motion estimation.
+
+
+precmp integer (encoding,video )
+Set pre motion estimation compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+pre_dia_size integer (encoding,video )
+Set diamond type & size for motion estimation pre-pass.
+
+
+subq integer (encoding,video )
+Set sub pel motion estimation quality.
+
+
+dtg_active_format integer
+me_range integer (encoding,video )
+Set limit motion vectors range (1023 for DivX player).
+
+
+ibias integer (encoding,video )
+Set intra quant bias.
+
+
+pbias integer (encoding,video )
+Set inter quant bias.
+
+
+color_table_id integer
+global_quality integer (encoding,audio,video )
+coder integer (encoding,video )
+
+Possible values:
+
+‘vlc ’
+variable length coder / huffman coder
+
+‘ac ’
+arithmetic coder
+
+‘raw ’
+raw (no encoding)
+
+‘rle ’
+run-length coder
+
+‘deflate ’
+deflate-based coder
+
+
+
+
+context integer (encoding,video )
+Set context model.
+
+
+slice_flags integer
+xvmc_acceleration integer
+mbd integer (encoding,video )
+Set macroblock decision algorithm (high quality mode).
+
+Possible values:
+
+‘simple ’
+use mbcmp (default)
+
+‘bits ’
+use fewest bits
+
+‘rd ’
+use best rate distortion
+
+
+
+
+stream_codec_tag integer
+sc_threshold integer (encoding,video )
+Set scene change threshold.
+
+
+lmin integer (encoding,video )
+Set min lagrange factor (VBR).
+
+
+lmax integer (encoding,video )
+Set max lagrange factor (VBR).
+
+
+nr integer (encoding,video )
+Set noise reduction.
+
+
+rc_init_occupancy integer (encoding,video )
+Set number of bits which should be loaded into the rc buffer before
+decoding starts.
+
+
+flags2 flags (decoding/encoding,audio,video )
+
+Possible values:
+
+‘fast ’
+Allow non spec compliant speedup tricks.
+
+‘sgop ’
+Deprecated, use mpegvideo private options instead.
+
+‘noout ’
+Skip bitstream encoding.
+
+‘ignorecrop ’
+Ignore cropping information from sps.
+
+‘local_header ’
+Place global headers at every keyframe instead of in extradata.
+
+‘chunks ’
+Frame data might be split into multiple chunks.
+
+‘showall ’
+Show all frames before the first keyframe.
+
+‘skiprd ’
+Deprecated, use mpegvideo private options instead.
+
+‘export_mvs ’
+Export motion vectors into frame side-data (see AV_FRAME_DATA_MOTION_VECTORS
)
+for codecs that support it. See also doc/examples/export_mvs.c .
+
+
+
+
+error integer (encoding,video )
+qns integer (encoding,video )
+Deprecated, use mpegvideo private options instead.
+
+
+threads integer (decoding/encoding,video )
+
+Possible values:
+
+‘auto ’
+detect a good number of threads
+
+
+
+
+me_threshold integer (encoding,video )
+Set motion estimation threshold.
+
+
+mb_threshold integer (encoding,video )
+Set macroblock threshold.
+
+
+dc integer (encoding,video )
+Set intra_dc_precision.
+
+
+nssew integer (encoding,video )
+Set nsse weight.
+
+
+skip_top integer (decoding,video )
+Set number of macroblock rows at the top which are skipped.
+
+
+skip_bottom integer (decoding,video )
+Set number of macroblock rows at the bottom which are skipped.
+
+
+profile integer (encoding,audio,video )
+
+Possible values:
+
+‘unknown ’
+‘aac_main ’
+‘aac_low ’
+‘aac_ssr ’
+‘aac_ltp ’
+‘aac_he ’
+‘aac_he_v2 ’
+‘aac_ld ’
+‘aac_eld ’
+‘mpeg2_aac_low ’
+‘mpeg2_aac_he ’
+‘mpeg4_sp ’
+‘mpeg4_core ’
+‘mpeg4_main ’
+‘mpeg4_asp ’
+‘dts ’
+‘dts_es ’
+‘dts_96_24 ’
+‘dts_hd_hra ’
+‘dts_hd_ma ’
+
+
+
+level integer (encoding,audio,video )
+
+Possible values:
+
+‘unknown ’
+
+
+
+lowres integer (decoding,audio,video )
+Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
+
+
+skip_threshold integer (encoding,video )
+Set frame skip threshold.
+
+
+skip_factor integer (encoding,video )
+Set frame skip factor.
+
+
+skip_exp integer (encoding,video )
+Set frame skip exponent.
+Negative values behave identical to the corresponding positive ones, except
+that the score is normalized.
+Positive values exist primarily for compatibility reasons and are not so useful.
+
+
+skipcmp integer (encoding,video )
+Set frame skip compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+border_mask float (encoding,video )
+Increase the quantizer for macroblocks close to borders.
+
+
+mblmin integer (encoding,video )
+Set min macroblock lagrange factor (VBR).
+
+
+mblmax integer (encoding,video )
+Set max macroblock lagrange factor (VBR).
+
+
+mepc integer (encoding,video )
+Set motion estimation bitrate penalty compensation (1.0 = 256).
+
+
+skip_loop_filter integer (decoding,video )
+skip_idct integer (decoding,video )
+skip_frame integer (decoding,video )
+
+Make decoder discard processing depending on the frame type selected
+by the option value.
+
+skip_loop_filter skips frame loop filtering, skip_idct
+skips frame IDCT/dequantization, skip_frame skips decoding.
+
+Possible values:
+
+‘none ’
+Discard no frame.
+
+
+‘default ’
+Discard useless frames like 0-sized frames.
+
+
+‘noref ’
+Discard all non-reference frames.
+
+
+‘bidir ’
+Discard all bidirectional frames.
+
+
+‘nokey ’
+Discard all frames excepts keyframes.
+
+
+‘all ’
+Discard all frames.
+
+
+
+Default value is ‘default ’.
+
+
+bidir_refine integer (encoding,video )
+Refine the two motion vectors used in bidirectional macroblocks.
+
+
+brd_scale integer (encoding,video )
+Downscale frames for dynamic B-frame decision.
+
+
+keyint_min integer (encoding,video )
+Set minimum interval between IDR-frames.
+
+
+refs integer (encoding,video )
+Set reference frames to consider for motion compensation.
+
+
+chromaoffset integer (encoding,video )
+Set chroma qp offset from luma.
+
+
+trellis integer (encoding,audio,video )
+Set rate-distortion optimal quantization.
+
+
+sc_factor integer (encoding,video )
+Set value multiplied by qscale for each frame and added to
+scene_change_score.
+
+
+mv0_threshold integer (encoding,video )
+b_sensitivity integer (encoding,video )
+Adjust sensitivity of b_frame_strategy 1.
+
+
+compression_level integer (encoding,audio,video )
+min_prediction_order integer (encoding,audio )
+max_prediction_order integer (encoding,audio )
+timecode_frame_start integer (encoding,video )
+Set GOP timecode frame start number, in non drop frame format.
+
+
+request_channels integer (decoding,audio )
+Set desired number of audio channels.
+
+
+bits_per_raw_sample integer
+channel_layout integer (decoding/encoding,audio )
+
+Possible values:
+
+request_channel_layout integer (decoding,audio )
+
+Possible values:
+
+rc_max_vbv_use float (encoding,video )
+rc_min_vbv_use float (encoding,video )
+ticks_per_frame integer (decoding/encoding,audio,video )
+color_primaries integer (decoding/encoding,video )
+color_trc integer (decoding/encoding,video )
+colorspace integer (decoding/encoding,video )
+color_range integer (decoding/encoding,video )
+chroma_sample_location integer (decoding/encoding,video )
+log_level_offset integer
+Set the log level offset.
+
+
+slices integer (encoding,video )
+Number of slices, used in parallelized encoding.
+
+
+thread_type flags (decoding/encoding,video )
+Select which multithreading methods to use.
+
+Use of ‘frame ’ will increase decoding delay by one frame per
+thread, so clients which cannot provide future frames should not use
+it.
+
+Possible values:
+
+‘slice ’
+Decode more than one part of a single frame at once.
+
+Multithreading using slices works only when the video was encoded with
+slices.
+
+
+‘frame ’
+Decode more than one frame at once.
+
+
+
+Default value is ‘slice+frame ’.
+
+
+audio_service_type integer (encoding,audio )
+Set audio service type.
+
+Possible values:
+
+‘ma ’
+Main Audio Service
+
+‘ef ’
+Effects
+
+‘vi ’
+Visually Impaired
+
+‘hi ’
+Hearing Impaired
+
+‘di ’
+Dialogue
+
+‘co ’
+Commentary
+
+‘em ’
+Emergency
+
+‘vo ’
+Voice Over
+
+‘ka ’
+Karaoke
+
+
+
+
+request_sample_fmt sample_fmt (decoding,audio )
+Set sample format audio decoders should prefer. Default value is
+none
.
+
+
+pkt_timebase rational number
+sub_charenc encoding (decoding,subtitles )
+Set the input subtitles character encoding.
+
+
+field_order field_order (video )
+Set/override the field order of the video.
+Possible values:
+
+‘progressive ’
+Progressive video
+
+‘tt ’
+Interlaced video, top field coded and displayed first
+
+‘bb ’
+Interlaced video, bottom field coded and displayed first
+
+‘tb ’
+Interlaced video, top coded first, bottom displayed first
+
+‘bt ’
+Interlaced video, bottom coded first, top displayed first
+
+
+
+
+skip_alpha integer (decoding,video )
+Set to 1 to disable processing alpha (transparency). This works like the
+‘gray ’ flag in the flags option which skips chroma information
+instead of alpha. Default is 0.
+
+
+codec_whitelist list (input )
+"," separated List of allowed decoders. By default all are allowed.
+
+
+dump_separator string (input )
+Separator used to separate the fields printed on the command line about the
+Stream parameters.
+For example to separate the fields with newlines and indention:
+
+
ffprobe -dump_separator "
+ " -i ~/videos/matrixbench_mpeg2.mpg
+
+
+
+
+
+
+
+
8 Decoders# TOC
+
+
Decoders are configured elements in FFmpeg which allow the decoding of
+multimedia streams.
+
+
When you configure your FFmpeg build, all the supported native decoders
+are enabled by default. Decoders requiring an external library must be enabled
+manually via the corresponding --enable-lib
option. You can list all
+available decoders using the configure option --list-decoders
.
+
+
You can disable all the decoders with the configure option
+--disable-decoders
and selectively enable / disable single decoders
+with the options --enable-decoder=DECODER
/
+--disable-decoder=DECODER
.
+
+
The option -decoders
of the ff* tools will display the list of
+enabled decoders.
+
+
+
+
9 Video Decoders# TOC
+
+
A description of some of the currently available video decoders
+follows.
+
+
+
9.1 rawvideo# TOC
+
+
Raw video decoder.
+
+
This decoder decodes rawvideo streams.
+
+
+
9.1.1 Options# TOC
+
+
+top top_field_first
+Specify the assumed field type of the input video.
+
+-1
+the video is assumed to be progressive (default)
+
+0
+bottom-field-first is assumed
+
+1
+top-field-first is assumed
+
+
+
+
+
+
+
+
+
10 Audio Decoders# TOC
+
+
A description of some of the currently available audio decoders
+follows.
+
+
+
10.1 ac3# TOC
+
+
AC-3 audio decoder.
+
+
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
+the undocumented RealAudio 3 (a.k.a. dnet).
+
+
+
10.1.1 AC-3 Decoder Options# TOC
+
+
+-drc_scale value
+Dynamic Range Scale Factor. The factor to apply to dynamic range values
+from the AC-3 stream. This factor is applied exponentially.
+There are 3 notable scale factor ranges:
+
+drc_scale == 0
+DRC disabled. Produces full range audio.
+
+0 < drc_scale <= 1
+DRC enabled. Applies a fraction of the stream DRC value.
+Audio reproduction is between full range and full compression.
+
+drc_scale > 1
+DRC enabled. Applies drc_scale asymmetrically.
+Loud sounds are fully compressed. Soft sounds are enhanced.
+
+
+
+
+
+
+
+
10.2 ffwavesynth# TOC
+
+
Internal wave synthetizer.
+
+
This decoder generates wave patterns according to predefined sequences. Its
+use is purely internal and the format of the data it accepts is not publicly
+documented.
+
+
+
10.3 libcelt# TOC
+
+
libcelt decoder wrapper.
+
+
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
+Requires the presence of the libcelt headers and library during configuration.
+You need to explicitly configure the build with --enable-libcelt
.
+
+
+
10.4 libgsm# TOC
+
+
libgsm decoder wrapper.
+
+
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
+the presence of the libgsm headers and library during configuration. You need
+to explicitly configure the build with --enable-libgsm
.
+
+
This decoder supports both the ordinary GSM and the Microsoft variant.
+
+
+
10.5 libilbc# TOC
+
+
libilbc decoder wrapper.
+
+
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
+audio codec. Requires the presence of the libilbc headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libilbc
.
+
+
+
10.5.1 Options# TOC
+
+
The following option is supported by the libilbc wrapper.
+
+
+enhance
+
+Enable the enhancement of the decoded audio when set to 1. The default
+value is 0 (disabled).
+
+
+
+
+
+
10.6 libopencore-amrnb# TOC
+
+
libopencore-amrnb decoder wrapper.
+
+
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
+Narrowband audio codec. Using it requires the presence of the
+libopencore-amrnb headers and library during configuration. You need to
+explicitly configure the build with --enable-libopencore-amrnb
.
+
+
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
+without this library.
+
+
+
10.7 libopencore-amrwb# TOC
+
+
libopencore-amrwb decoder wrapper.
+
+
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
+Wideband audio codec. Using it requires the presence of the
+libopencore-amrwb headers and library during configuration. You need to
+explicitly configure the build with --enable-libopencore-amrwb
.
+
+
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
+without this library.
+
+
+
10.8 libopus# TOC
+
+
libopus decoder wrapper.
+
+
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
+Requires the presence of the libopus headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libopus
.
+
+
An FFmpeg native decoder for Opus exists, so users can decode Opus
+without this library.
+
+
+
+
11 Subtitles Decoders# TOC
+
+
+
11.1 dvdsub# TOC
+
+
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
+also be found in VobSub file pairs and in some Matroska files.
+
+
+
11.1.1 Options# TOC
+
+
+palette
+Specify the global palette used by the bitmaps. When stored in VobSub, the
+palette is normally specified in the index file; in Matroska, the palette is
+stored in the codec extra-data in the same format as in VobSub. In DVDs, the
+palette is stored in the IFO file, and therefore not available when reading
+from dumped VOB files.
+
+The format for this option is a string containing 16 24-bits hexadecimal
+numbers (without 0x prefix) separated by comas, for example 0d00ee,
+ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
+7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b
.
+
+
+ifo_palette
+Specify the IFO file from which the global palette is obtained.
+(experimental)
+
+
+forced_subs_only
+Only decode subtitle entries marked as forced. Some titles have forced
+and non-forced subtitles in the same track. Setting this flag to 1
+will only keep the forced subtitles. Default value is 0
.
+
+
+
+
+
11.2 libzvbi-teletext# TOC
+
+
Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext
+subtitles. Requires the presence of the libzvbi headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libzvbi
.
+
+
+
11.2.1 Options# TOC
+
+
+txt_page
+List of teletext page numbers to decode. You may use the special * string to
+match all pages. Pages that do not match the specified list are dropped.
+Default value is *.
+
+txt_chop_top
+Discards the top teletext line. Default value is 1.
+
+txt_format
+Specifies the format of the decoded subtitles. The teletext decoder is capable
+of decoding the teletext pages to bitmaps or to simple text, you should use
+"bitmap" for teletext pages, because certain graphics and colors cannot be
+expressed in simple text. You might use "text" for teletext based subtitles if
+your application can handle simple text based subtitles. Default value is
+bitmap.
+
+txt_left
+X offset of generated bitmaps, default is 0.
+
+txt_top
+Y offset of generated bitmaps, default is 0.
+
+txt_chop_spaces
+Chops leading and trailing spaces and removes empty lines from the generated
+text. This option is useful for teletext based subtitles where empty spaces may
+be present at the start or at the end of the lines or empty lines may be
+present between the subtitle lines because of double-sized teletext charactes.
+Default value is 1.
+
+txt_duration
+Sets the display duration of the decoded teletext pages or subtitles in
+miliseconds. Default value is 30000 which is 30 seconds.
+
+txt_transparent
+Force transparent background of the generated teletext bitmaps. Default value
+is 0 which means an opaque (black) background.
+
+
+
+
+
12 Bitstream Filters# TOC
+
+
When you configure your FFmpeg build, all the supported bitstream
+filters are enabled by default. You can list all available ones using
+the configure option --list-bsfs
.
+
+
You can disable all the bitstream filters using the configure option
+--disable-bsfs
, and selectively enable any bitstream filter using
+the option --enable-bsf=BSF
, or you can disable a particular
+bitstream filter using the option --disable-bsf=BSF
.
+
+
The option -bsfs
of the ff* tools will display the list of
+all the supported bitstream filters included in your build.
+
+
The ff* tools have a -bsf option applied per stream, taking a
+comma-separated list of filters, whose parameters follow the filter
+name after a ’=’.
+
+
+
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
+
+
+
Below is a description of the currently available bitstream filters,
+with their parameters, if any.
+
+
+
12.1 aac_adtstoasc# TOC
+
+
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
+bitstream filter.
+
+
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
+ADTS header and removes the ADTS header.
+
+
This is required for example when copying an AAC stream from a raw
+ADTS AAC container to a FLV or a MOV/MP4 file.
+
+
+
12.2 chomp# TOC
+
+
Remove zero padding at the end of a packet.
+
+
+
12.3 dump_extra# TOC
+
+
Add extradata to the beginning of the filtered packets.
+
+
The additional argument specifies which packets should be filtered.
+It accepts the values:
+
+‘a ’
+add extradata to all key packets, but only if local_header is
+set in the flags2 codec context field
+
+
+‘k ’
+add extradata to all key packets
+
+
+‘e ’
+add extradata to all packets
+
+
+
+
If not specified it is assumed ‘k ’.
+
+
For example the following ffmpeg
command forces a global
+header (thus disabling individual packet headers) in the H.264 packets
+generated by the libx264
encoder, but corrects them by adding
+the header stored in extradata to the key packets:
+
+
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
+
+
+
+
12.4 h264_mp4toannexb# TOC
+
+
Convert an H.264 bitstream from length prefixed mode to start code
+prefixed mode (as defined in the Annex B of the ITU-T H.264
+specification).
+
+
This is required by some streaming formats, typically the MPEG-2
+transport stream format ("mpegts").
+
+
For example to remux an MP4 file containing an H.264 stream to mpegts
+format with ffmpeg
, you can use the command:
+
+
+
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
+
+
+
+
12.5 imxdump# TOC
+
+
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
+Pro decoder. This filter only applies to the mpeg2video codec, and is
+likely not needed for Final Cut Pro 7 and newer with the appropriate
+-tag:v .
+
+
For example, to remux 30 MB/sec NTSC IMX to MOV:
+
+
+
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
+
+
+
+
12.6 mjpeg2jpeg# TOC
+
+
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
+
+
MJPEG is a video codec wherein each video frame is essentially a
+JPEG image. The individual frames can be extracted without loss,
+e.g. by
+
+
+
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
+
+
+
Unfortunately, these chunks are incomplete JPEG images, because
+they lack the DHT segment required for decoding. Quoting from
+http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml :
+
+
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
+commented that "MJPEG, or at least the MJPEG in AVIs having the
+MJPG fourcc, is restricted JPEG with a fixed – and *omitted* –
+Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
+and it must use basic Huffman encoding, not arithmetic or
+progressive. . . . You can indeed extract the MJPEG frames and
+decode them with a regular JPEG decoder, but you have to prepend
+the DHT segment to them, or else the decoder won’t have any idea
+how to decompress the data. The exact table necessary is given in
+the OpenDML spec."
+
+
This bitstream filter patches the header of frames extracted from an MJPEG
+stream (carrying the AVI1 header ID and lacking a DHT segment) to
+produce fully qualified JPEG images.
+
+
+
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
+exiftran -i -9 frame*.jpg
+ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
+
+
+
+
12.7 mjpega_dump_header# TOC
+
+
+
12.8 movsub# TOC
+
+
+
12.9 mp3_header_decompress# TOC
+
+
+
12.10 noise# TOC
+
+
Damages the contents of packets without damaging the container. Can be
+used for fuzzing or testing error resilience/concealment.
+
+
Parameters:
+A numeral string, whose value is related to how often output bytes will
+be modified. Therefore, values below or equal to 0 are forbidden, and
+the lower the more frequent bytes will be modified, with 1 meaning
+every byte is modified.
+
+
+
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
+
+
applies the modification to every byte.
+
+
+
12.11 remove_extra# TOC
+
+
+
13 Format Options# TOC
+
+
The libavformat library provides some generic global options, which
+can be set on all the muxers and demuxers. In addition each muxer or
+demuxer may support so-called private options, which are specific for
+that component.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, or by setting the value explicitly in the
+AVFormatContext
options or using the libavutil/opt.h API
+for programmatic use.
+
+
The list of supported options follows:
+
+
+avioflags flags (input/output )
+Possible values:
+
+‘direct ’
+Reduce buffering.
+
+
+
+
+probesize integer (input )
+Set probing size in bytes, i.e. the size of the data to analyze to get
+stream information. A higher value will allow to detect more
+information in case it is dispersed into the stream, but will increase
+latency. Must be an integer not lesser than 32. It is 5000000 by default.
+
+
+packetsize integer (output )
+Set packet size.
+
+
+fflags flags (input/output )
+Set format flags.
+
+Possible values:
+
+‘ignidx ’
+Ignore index.
+
+‘genpts ’
+Generate PTS.
+
+‘nofillin ’
+Do not fill in missing values that can be exactly calculated.
+
+‘noparse ’
+Disable AVParsers, this needs +nofillin
too.
+
+‘igndts ’
+Ignore DTS.
+
+‘discardcorrupt ’
+Discard corrupted frames.
+
+‘sortdts ’
+Try to interleave output packets by DTS.
+
+‘keepside ’
+Do not merge side data.
+
+‘latm ’
+Enable RTP MP4A-LATM payload.
+
+‘nobuffer ’
+Reduce the latency introduced by optional buffering
+
+‘bitexact ’
+Only write platform-, build- and time-independent data.
+This ensures that file and data checksums are reproducible and match between
+platforms. Its primary use is for regression testing.
+
+
+
+
+seek2any integer (input )
+Allow seeking to non-keyframes on demuxer level when supported if set to 1.
+Default is 0.
+
+
+analyzeduration integer (input )
+Specify how many microseconds are analyzed to probe the input. A
+higher value will allow to detect more accurate information, but will
+increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
+
+
+cryptokey hexadecimal string (input )
+Set decryption key.
+
+
+indexmem integer (input )
+Set max memory used for timestamp index (per stream).
+
+
+rtbufsize integer (input )
+Set max memory used for buffering real-time frames.
+
+
+fdebug flags (input/output )
+Print specific debug info.
+
+Possible values:
+
+‘ts ’
+
+
+
+max_delay integer (input/output )
+Set maximum muxing or demuxing delay in microseconds.
+
+
+fpsprobesize integer (input )
+Set number of frames used to probe fps.
+
+
+audio_preload integer (output )
+Set microseconds by which audio packets should be interleaved earlier.
+
+
+chunk_duration integer (output )
+Set microseconds for each chunk.
+
+
+chunk_size integer (output )
+Set size in bytes for each chunk.
+
+
+err_detect, f_err_detect flags (input )
+Set error detection flags. f_err_detect
is deprecated and
+should be used only via the ffmpeg
tool.
+
+Possible values:
+
+‘crccheck ’
+Verify embedded CRCs.
+
+‘bitstream ’
+Detect bitstream specification deviations.
+
+‘buffer ’
+Detect improper bitstream length.
+
+‘explode ’
+Abort decoding on minor error detection.
+
+‘careful ’
+Consider things that violate the spec and have not been seen in the
+wild as errors.
+
+‘compliant ’
+Consider all spec non compliancies as errors.
+
+‘aggressive ’
+Consider things that a sane encoder should not do as an error.
+
+
+
+
+use_wallclock_as_timestamps integer (input )
+Use wallclock as timestamps.
+
+
+avoid_negative_ts integer (output )
+
+Possible values:
+
+‘make_non_negative ’
+Shift timestamps to make them non-negative.
+Also note that this affects only leading negative timestamps, and not
+non-monotonic negative timestamps.
+
+‘make_zero ’
+Shift timestamps so that the first timestamp is 0.
+
+‘auto (default) ’
+Enables shifting when required by the target format.
+
+‘disabled ’
+Disables shifting of timestamp.
+
+
+
+When shifting is enabled, all output timestamps are shifted by the
+same amount. Audio, video, and subtitles desynching and relative
+timestamp differences are preserved compared to how they would have
+been without shifting.
+
+
+skip_initial_bytes integer (input )
+Set number of bytes to skip before reading header and frames if set to 1.
+Default is 0.
+
+
+correct_ts_overflow integer (input )
+Correct single timestamp overflows if set to 1. Default is 1.
+
+
+flush_packets integer (output )
+Flush the underlying I/O stream after each packet. Default 1 enables it, and
+has the effect of reducing the latency; 0 disables it and may slightly
+increase performance in some cases.
+
+
+output_ts_offset offset (output )
+Set the output time offset.
+
+offset must be a time duration specification,
+see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
+
+The offset is added by the muxer to the output timestamps.
+
+Specifying a positive offset means that the corresponding streams are
+delayed bt the time duration specified in offset . Default value
+is 0
(meaning that no offset is applied).
+
+
+format_whitelist list (input )
+"," separated List of allowed demuxers. By default all are allowed.
+
+
+dump_separator string (input )
+Separator used to separate the fields printed on the command line about the
+Stream parameters.
+For example to separate the fields with newlines and indention:
+
+
ffprobe -dump_separator "
+ " -i ~/videos/matrixbench_mpeg2.mpg
+
+
+
+
+
+
+
13.1 Format stream specifiers# TOC
+
+
Format stream specifiers allow selection of one or more streams that
+match specific properties.
+
+
Possible forms of stream specifiers are:
+
+stream_index
+Matches the stream with this index.
+
+
+stream_type [:stream_index ]
+stream_type is one of following: ’v’ for video, ’a’ for audio,
+’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If
+stream_index is given, then it matches the stream number
+stream_index of this type. Otherwise, it matches all streams of
+this type.
+
+
+p:program_id [:stream_index ]
+If stream_index is given, then it matches the stream with number
+stream_index in the program with the id
+program_id . Otherwise, it matches all streams in the program.
+
+
+#stream_id
+Matches the stream by a format-specific ID.
+
+
+
+
The exact semantics of stream specifiers is defined by the
+avformat_match_stream_specifier()
function declared in the
+libavformat/avformat.h header.
+
+
+
14 Demuxers# TOC
+
+
Demuxers are configured elements in FFmpeg that can read the
+multimedia streams from a particular type of file.
+
+
When you configure your FFmpeg build, all the supported demuxers
+are enabled by default. You can list all available ones using the
+configure option --list-demuxers
.
+
+
You can disable all the demuxers using the configure option
+--disable-demuxers
, and selectively enable a single demuxer with
+the option --enable-demuxer=DEMUXER
, or disable it
+with the option --disable-demuxer=DEMUXER
.
+
+
The option -formats
of the ff* tools will display the list of
+enabled demuxers.
+
+
The description of some of the currently available demuxers follows.
+
+
+
14.1 applehttp# TOC
+
+
Apple HTTP Live Streaming demuxer.
+
+
This demuxer presents all AVStreams from all variant streams.
+The id field is set to the bitrate variant index number. By setting
+the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay),
+the caller can decide which variant streams to actually receive.
+The total bitrate of the variant that the stream belongs to is
+available in a metadata key named "variant_bitrate".
+
+
+
14.2 apng# TOC
+
+
Animated Portable Network Graphics demuxer.
+
+
This demuxer is used to demux APNG files.
+All headers, but the PNG signature, up to (but not including) the first
+fcTL chunk are transmitted as extradata.
+Frames are then split as being all the chunks between two fcTL ones, or
+between the last fcTL and IEND chunks.
+
+
+-ignore_loop bool
+Ignore the loop variable in the file if set.
+
+-max_fps int
+Maximum framerate in frames per second (0 for no limit).
+
+-default_fps int
+Default framerate in frames per second when none is specified in the file
+(0 meaning as fast as possible).
+
+
+
+
+
14.3 asf# TOC
+
+
Advanced Systems Format demuxer.
+
+
This demuxer is used to demux ASF files and MMS network streams.
+
+
+-no_resync_search bool
+Do not try to resynchronize by looking for a certain optional start code.
+
+
+
+
+
14.4 concat# TOC
+
+
Virtual concatenation script demuxer.
+
+
This demuxer reads a list of files and other directives from a text file and
+demuxes them one after the other, as if all their packet had been muxed
+together.
+
+
The timestamps in the files are adjusted so that the first file starts at 0
+and each next file starts where the previous one finishes. Note that it is
+done globally and may cause gaps if all streams do not have exactly the same
+length.
+
+
All files must have the same streams (same codecs, same time base, etc.).
+
+
The duration of each file is used to adjust the timestamps of the next file:
+if the duration is incorrect (because it was computed using the bit-rate or
+because the file is truncated, for example), it can cause artifacts. The
+duration
directive can be used to override the duration stored in
+each file.
+
+
+
14.4.1 Syntax# TOC
+
+
The script is a text file in extended-ASCII, with one directive per line.
+Empty lines, leading spaces and lines starting with ’#’ are ignored. The
+following directive is recognized:
+
+
+file path
+Path to a file to read; special characters and spaces must be escaped with
+backslash or single quotes.
+
+All subsequent file-related directives apply to that file.
+
+
+ffconcat version 1.0
+Identify the script type and version. It also sets the safe option
+to 1 if it was to its default -1.
+
+To make FFmpeg recognize the format automatically, this directive must
+appears exactly as is (no extra space or byte-order-mark) on the very first
+line of the script.
+
+
+duration dur
+Duration of the file. This information can be specified from the file;
+specifying it here may be more efficient or help if the information from the
+file is not available or accurate.
+
+If the duration is set for all files, then it is possible to seek in the
+whole concatenated video.
+
+
+stream
+Introduce a stream in the virtual file.
+All subsequent stream-related directives apply to the last introduced
+stream.
+Some streams properties must be set in order to allow identifying the
+matching streams in the subfiles.
+If no streams are defined in the script, the streams from the first file are
+copied.
+
+
+exact_stream_id id
+Set the id of the stream.
+If this directive is given, the string with the corresponding id in the
+subfiles will be used.
+This is especially useful for MPEG-PS (VOB) files, where the order of the
+streams is not reliable.
+
+
+
+
+
+
14.4.2 Options# TOC
+
+
This demuxer accepts the following option:
+
+
+safe
+If set to 1, reject unsafe file paths. A file path is considered safe if it
+does not contain a protocol specification and is relative and all components
+only contain characters from the portable character set (letters, digits,
+period, underscore and hyphen) and have no period at the beginning of a
+component.
+
+If set to 0, any file name is accepted.
+
+The default is -1, it is equivalent to 1 if the format was automatically
+probed and 0 otherwise.
+
+
+auto_convert
+If set to 1, try to perform automatic conversions on packet data to make the
+streams concatenable.
+
+Currently, the only conversion is adding the h264_mp4toannexb bitstream
+filter to H.264 streams in MP4 format. This is necessary in particular if
+there are resolution changes.
+
+
+
+
+
+
14.5 flv# TOC
+
+
Adobe Flash Video Format demuxer.
+
+
This demuxer is used to demux FLV files and RTMP network streams.
+
+
+-flv_metadata bool
+Allocate the streams according to the onMetaData array content.
+
+
+
+
+
14.6 libgme# TOC
+
+
The Game Music Emu library is a collection of video game music file emulators.
+
+
See http://code.google.com/p/game-music-emu/ for more information.
+
+
Some files have multiple tracks. The demuxer will pick the first track by
+default. The track_index option can be used to select a different
+track. Track indexes start at 0. The demuxer exports the number of tracks as
+tracks meta data entry.
+
+
For very large files, the max_size option may have to be adjusted.
+
+
+
14.7 libquvi# TOC
+
+
Play media from Internet services using the quvi project.
+
+
The demuxer accepts a format option to request a specific quality. It
+is by default set to best .
+
+
See http://quvi.sourceforge.net/ for more information.
+
+
FFmpeg needs to be built with --enable-libquvi
for this demuxer to be
+enabled.
+
+
+
14.8 gif# TOC
+
+
Animated GIF demuxer.
+
+
It accepts the following options:
+
+
+min_delay
+Set the minimum valid delay between frames in hundredths of seconds.
+Range is 0 to 6000. Default value is 2.
+
+
+default_delay
+Set the default delay between frames in hundredths of seconds.
+Range is 0 to 6000. Default value is 10.
+
+
+ignore_loop
+GIF files can contain information to loop a certain number of times (or
+infinitely). If ignore_loop is set to 1, then the loop setting
+from the input will be ignored and looping will not occur. If set to 0,
+then looping will occur and will cycle the number of times according to
+the GIF. Default value is 1.
+
+
+
+
For example, with the overlay filter, place an infinitely looping GIF
+over another video:
+
+
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
+
+
+
Note that in the above example the shortest option for overlay filter is
+used to end the output video at the length of the shortest input file,
+which in this case is input.mp4 as the GIF in this example loops
+infinitely.
+
+
+
14.9 image2# TOC
+
+
Image file demuxer.
+
+
This demuxer reads from a list of image files specified by a pattern.
+The syntax and meaning of the pattern is specified by the
+option pattern_type .
+
+
The pattern may contain a suffix which is used to automatically
+determine the format of the images contained in the files.
+
+
The size, the pixel format, and the format of each image must be the
+same for all the files in the sequence.
+
+
This demuxer accepts the following options:
+
+framerate
+Set the frame rate for the video stream. It defaults to 25.
+
+loop
+If set to 1, loop over the input. Default value is 0.
+
+pattern_type
+Select the pattern type used to interpret the provided filename.
+
+pattern_type accepts one of the following values.
+
+sequence
+Select a sequence pattern type, used to specify a sequence of files
+indexed by sequential numbers.
+
+A sequence pattern may contain the string "%d" or "%0N d", which
+specifies the position of the characters representing a sequential
+number in each filename matched by the pattern. If the form
+"%d0N d" is used, the string representing the number in each
+filename is 0-padded and N is the total number of 0-padded
+digits representing the number. The literal character ’%’ can be
+specified in the pattern with the string "%%".
+
+If the sequence pattern contains "%d" or "%0N d", the first filename of
+the file list specified by the pattern must contain a number
+inclusively contained between start_number and
+start_number +start_number_range -1, and all the following
+numbers must be sequential.
+
+For example the pattern "img-%03d.bmp" will match a sequence of
+filenames of the form img-001.bmp , img-002.bmp , ...,
+img-010.bmp , etc.; the pattern "i%%m%%g-%d.jpg" will match a
+sequence of filenames of the form i%m%g-1.jpg ,
+i%m%g-2.jpg , ..., i%m%g-10.jpg , etc.
+
+Note that the pattern must not necessarily contain "%d" or
+"%0N d", for example to convert a single image file
+img.jpeg you can employ the command:
+
+
ffmpeg -i img.jpeg img.png
+
+
+
+glob
+Select a glob wildcard pattern type.
+
+The pattern is interpreted like a glob()
pattern. This is only
+selectable if libavformat was compiled with globbing support.
+
+
+glob_sequence (deprecated, will be removed)
+Select a mixed glob wildcard/sequence pattern.
+
+If your version of libavformat was compiled with globbing support, and
+the provided pattern contains at least one glob meta character among
+%*?[]{}
that is preceded by an unescaped "%", the pattern is
+interpreted like a glob()
pattern, otherwise it is interpreted
+like a sequence pattern.
+
+All glob special characters %*?[]{}
must be prefixed
+with "%". To escape a literal "%" you shall use "%%".
+
+For example the pattern foo-%*.jpeg
will match all the
+filenames prefixed by "foo-" and terminating with ".jpeg", and
+foo-%?%?%?.jpeg
will match all the filenames prefixed with
+"foo-", followed by a sequence of three characters, and terminating
+with ".jpeg".
+
+This pattern type is deprecated in favor of glob and
+sequence .
+
+
+
+Default value is glob_sequence .
+
+pixel_format
+Set the pixel format of the images to read. If not specified the pixel
+format is guessed from the first image file in the sequence.
+
+start_number
+Set the index of the file matched by the image file pattern to start
+to read from. Default value is 0.
+
+start_number_range
+Set the index interval range to check when looking for the first image
+file in the sequence, starting from start_number . Default value
+is 5.
+
+ts_from_file
+If set to 1, will set frame timestamp to modification time of image file. Note
+that monotonity of timestamps is not provided: images go in the same order as
+without this option. Default value is 0.
+If set to 2, will set frame timestamp to the modification time of the image file in
+nanosecond precision.
+
+video_size
+Set the video size of the images to read. If not specified the video
+size is guessed from the first image file in the sequence.
+
+
+
+
+
14.9.1 Examples# TOC
+
+
+ Use ffmpeg
for creating a video from the images in the file
+sequence img-001.jpeg , img-002.jpeg , ..., assuming an
+input frame rate of 10 frames per second:
+
+
ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
+
+
+ As above, but start by reading from a file with index 100 in the sequence:
+
+
ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
+
+
+ Read images matching the "*.png" glob pattern , that is all the files
+terminating with the ".png" suffix:
+
+
ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
+
+
+
+
+
14.10 mpegts# TOC
+
+
MPEG-2 transport stream demuxer.
+
+
+fix_teletext_pts
+Overrides teletext packet PTS and DTS values with the timestamps calculated
+from the PCR of the first program which the teletext stream is part of and is
+not discarded. Default value is 1, set this option to 0 if you want your
+teletext packet PTS and DTS values untouched.
+
+
+
+
+
14.11 rawvideo# TOC
+
+
Raw video demuxer.
+
+
This demuxer allows one to read raw video data. Since there is no header
+specifying the assumed video parameters, the user must specify them
+in order to be able to decode the data correctly.
+
+
This demuxer accepts the following options:
+
+framerate
+Set input video frame rate. Default value is 25.
+
+
+pixel_format
+Set the input video pixel format. Default value is yuv420p
.
+
+
+video_size
+Set the input video size. This value must be specified explicitly.
+
+
+
+
For example to read a rawvideo file input.raw with
+ffplay
, assuming a pixel format of rgb24
, a video
+size of 320x240
, and a frame rate of 10 images per second, use
+the command:
+
+
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
+
+
+
+
14.12 sbg# TOC
+
+
SBaGen script demuxer.
+
+
This demuxer reads the script language used by SBaGen
+http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG
+script looks like that:
+
+
-SE
+a: 300-2.5/3 440+4.5/0
+b: 300-2.5/0 440+4.5/3
+off: -
+NOW == a
++0:07:00 == b
++0:14:00 == a
++0:21:00 == b
++0:30:00 off
+
+
+
A SBG script can mix absolute and relative timestamps. If the script uses
+either only absolute timestamps (including the script start time) or only
+relative ones, then its layout is fixed, and the conversion is
+straightforward. On the other hand, if the script mixes both kind of
+timestamps, then the NOW reference for relative timestamps will be
+taken from the current time of day at the time the script is read, and the
+script layout will be frozen according to that reference. That means that if
+the script is directly played, the actual times will match the absolute
+timestamps up to the sound controller’s clock accuracy, but if the user
+somehow pauses the playback or seeks, all times will be shifted accordingly.
+
+
+
14.13 tedcaptions# TOC
+
+
JSON captions used for TED Talks .
+
+
TED does not provide links to the captions, but they can be guessed from the
+page. The file tools/bookmarklets.html from the FFmpeg source tree
+contains a bookmarklet to expose them.
+
+
This demuxer accepts the following option:
+
+start_time
+Set the start time of the TED talk, in milliseconds. The default is 15000
+(15s). It is used to sync the captions with the downloadable videos, because
+they include a 15s intro.
+
+
+
+
Example: convert the captions to a format most players understand:
+
+
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
+
+
+
+
15 Metadata# TOC
+
+
FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded
+INI-like text file and then load it back using the metadata muxer/demuxer.
+
+
The file format is as follows:
+
+ A file consists of a header and a number of metadata tags divided into sections,
+each on its own line.
+
+ The header is a ’;FFMETADATA’ string, followed by a version number (now 1).
+
+ Metadata tags are of the form ’key=value’
+
+ Immediately after header follows global metadata
+
+ After global metadata there may be sections with per-stream/per-chapter
+metadata.
+
+ A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in
+brackets (’[’, ’]’) and ends with next section or end of file.
+
+ At the beginning of a chapter section there may be an optional timebase to be
+used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and
+den are integers. If the timebase is missing then start/end times are assumed to
+be in milliseconds.
+Next a chapter section must contain chapter start and end times in form
+’START=num’, ’END=num’, where num is a positive integer.
+
+ Empty lines and lines starting with ’;’ or ’#’ are ignored.
+
+ Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a
+newline) must be escaped with a backslash ’\’.
+
+ Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of
+the tag (in the example above key is ’foo ’, value is ’ bar’).
+
+
+
A ffmetadata file might look like this:
+
+
;FFMETADATA1
+title=bike\\shed
+;this is a comment
+artist=FFmpeg troll team
+
+[CHAPTER]
+TIMEBASE=1/1000
+START=0
+#chapter ends at 0:01:00
+END=60000
+title=chapter \#1
+[STREAM]
+title=multi\
+line
+
+
+
By using the ffmetadata muxer and demuxer it is possible to extract
+metadata from an input file to an ffmetadata file, and then transcode
+the file into an output file with the edited ffmetadata file.
+
+
Extracting an ffmetadata file with ffmpeg goes as follows:
+
+
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
+
+
+
Reinserting edited metadata information from the FFMETADATAFILE file can
+be done as:
+
+
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
+
+
+
+
16 Protocols# TOC
+
+
Protocols are configured elements in FFmpeg that enable access to
+resources that require specific protocols.
+
+
When you configure your FFmpeg build, all the supported protocols are
+enabled by default. You can list all available ones using the
+configure option "–list-protocols".
+
+
You can disable all the protocols using the configure option
+"–disable-protocols", and selectively enable a protocol using the
+option "–enable-protocol=PROTOCOL ", or you can disable a
+particular protocol using the option
+"–disable-protocol=PROTOCOL ".
+
+
The option "-protocols" of the ff* tools will display the list of
+supported protocols.
+
+
A description of the currently available protocols follows.
+
+
+
16.1 bluray# TOC
+
+
Read BluRay playlist.
+
+
The accepted options are:
+
+angle
+BluRay angle
+
+
+chapter
+Start chapter (1...N)
+
+
+playlist
+Playlist to read (BDMV/PLAYLIST/?????.mpls)
+
+
+
+
+
Examples:
+
+
Read longest playlist from BluRay mounted to /mnt/bluray:
+
+
+
Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2:
+
+
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
+
+
+
+
16.2 cache# TOC
+
+
Caching wrapper for input stream.
+
+
Cache the input stream to temporary file. It brings seeking capability to live streams.
+
+
+
+
+
16.3 concat# TOC
+
+
Physical concatenation protocol.
+
+
Allow to read and seek from many resource in sequence as if they were
+a unique resource.
+
+
A URL accepted by this protocol has the syntax:
+
+
concat:URL1 |URL2 |...|URLN
+
+
+
where URL1 , URL2 , ..., URLN are the urls of the
+resource to be concatenated, each one possibly specifying a distinct
+protocol.
+
+
For example to read a sequence of files split1.mpeg ,
+split2.mpeg , split3.mpeg with ffplay
use the
+command:
+
+
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
+
+
+
Note that you may need to escape the character "|" which is special for
+many shells.
+
+
+
16.4 crypto# TOC
+
+
AES-encrypted stream reading protocol.
+
+
The accepted options are:
+
+key
+Set the AES decryption key binary block from given hexadecimal representation.
+
+
+iv
+Set the AES decryption initialization vector binary block from given hexadecimal representation.
+
+
+
+
Accepted URL formats:
+
+
crypto:URL
+crypto+URL
+
+
+
+
16.5 data# TOC
+
+
Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme .
+
+
For example, to convert a GIF file given inline with ffmpeg
:
+
+
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
+
+
+
+
16.6 file# TOC
+
+
File access protocol.
+
+
Allow to read from or write to a file.
+
+
A file URL can have the form:
+
+
+
where filename is the path of the file to read.
+
+
An URL that does not have a protocol prefix will be assumed to be a
+file URL. Depending on the build, an URL that looks like a Windows
+path with the drive letter at the beginning will also be assumed to be
+a file URL (usually not the case in builds for unix-like systems).
+
+
For example to read from a file input.mpeg with ffmpeg
+use the command:
+
+
ffmpeg -i file:input.mpeg output.mpeg
+
+
+
This protocol accepts the following options:
+
+
+truncate
+Truncate existing files on write, if set to 1. A value of 0 prevents
+truncating. Default value is 1.
+
+
+blocksize
+Set I/O operation maximum block size, in bytes. Default value is
+INT_MAX
, which results in not limiting the requested block size.
+Setting this value reasonably low improves user termination request reaction
+time, which is valuable for files on slow medium.
+
+
+
+
+
16.7 ftp# TOC
+
+
FTP (File Transfer Protocol).
+
+
Allow to read from or write to remote resources using FTP protocol.
+
+
Following syntax is required.
+
+
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+
+
This protocol accepts the following options.
+
+
+timeout
+Set timeout in microseconds of socket I/O operations used by the underlying low level
+operation. By default it is set to -1, which means that the timeout is
+not specified.
+
+
+ftp-anonymous-password
+Password used when login as anonymous user. Typically an e-mail address
+should be used.
+
+
+ftp-write-seekable
+Control seekability of connection during encoding. If set to 1 the
+resource is supposed to be seekable, if set to 0 it is assumed not
+to be seekable. Default value is 0.
+
+
+
+
NOTE: Protocol can be used as output, but it is recommended to not do
+it, unless special care is taken (tests, customized server configuration
+etc.). Different FTP servers behave in different way during seek
+operation. ff* tools may produce incomplete content due to server limitations.
+
+
+
16.8 gopher# TOC
+
+
Gopher protocol.
+
+
+
16.9 hls# TOC
+
+
Read Apple HTTP Live Streaming compliant segmented stream as
+a uniform one. The M3U8 playlists describing the segments can be
+remote HTTP resources or local files, accessed using the standard
+file protocol.
+The nested protocol is declared by specifying
+"+proto " after the hls URI scheme name, where proto
+is either "file" or "http".
+
+
+
hls+http://host/path/to/remote/resource.m3u8
+hls+file://path/to/local/resource.m3u8
+
+
+
Using this protocol is discouraged - the hls demuxer should work
+just as well (if not, please report the issues) and is more complete.
+To use the hls demuxer instead, simply use the direct URLs to the
+m3u8 files.
+
+
+
16.10 http# TOC
+
+
HTTP (Hyper Text Transfer Protocol).
+
+
This protocol accepts the following options:
+
+
+seekable
+Control seekability of connection. If set to 1 the resource is
+supposed to be seekable, if set to 0 it is assumed not to be seekable,
+if set to -1 it will try to autodetect if it is seekable. Default
+value is -1.
+
+
+chunked_post
+If set to 1 use chunked Transfer-Encoding for posts, default is 1.
+
+
+content_type
+Set a specific content type for the POST messages.
+
+
+headers
+Set custom HTTP headers, can override built in default headers. The
+value must be a string encoding the headers.
+
+
+multiple_requests
+Use persistent connections if set to 1, default is 0.
+
+
+post_data
+Set custom HTTP post data.
+
+
+user-agent
+user_agent
+Override the User-Agent header. If not specified the protocol will use a
+string describing the libavformat build. ("Lavf/<version>")
+
+
+timeout
+Set timeout in microseconds of socket I/O operations used by the underlying low level
+operation. By default it is set to -1, which means that the timeout is
+not specified.
+
+
+mime_type
+Export the MIME type.
+
+
+icy
+If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
+supports this, the metadata has to be retrieved by the application by reading
+the icy_metadata_headers and icy_metadata_packet options.
+The default is 1.
+
+
+icy_metadata_headers
+If the server supports ICY metadata, this contains the ICY-specific HTTP reply
+headers, separated by newline characters.
+
+
+icy_metadata_packet
+If the server supports ICY metadata, and icy was set to 1, this
+contains the last non-empty metadata packet sent by the server. It should be
+polled in regular intervals by applications interested in mid-stream metadata
+updates.
+
+
+cookies
+Set the cookies to be sent in future requests. The format of each cookie is the
+same as the value of a Set-Cookie HTTP response field. Multiple cookies can be
+delimited by a newline character.
+
+
+offset
+Set initial byte offset.
+
+
+end_offset
+Try to limit the request to bytes preceding this offset.
+
+
+
+
+
16.10.1 HTTP Cookies# TOC
+
+
Some HTTP requests will be denied unless cookie values are passed in with the
+request. The cookies option allows these cookies to be specified. At
+the very least, each cookie must specify a value along with a path and domain.
+HTTP requests that match both the domain and path will automatically include the
+cookie value in the HTTP Cookie header field. Multiple cookies can be delimited
+by a newline.
+
+
The required syntax to play a stream specifying a cookie is:
+
+
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
+
+
+
+
16.11 Icecast# TOC
+
+
Icecast protocol (stream to Icecast servers)
+
+
This protocol accepts the following options:
+
+
+ice_genre
+Set the stream genre.
+
+
+ice_name
+Set the stream name.
+
+
+ice_description
+Set the stream description.
+
+
+ice_url
+Set the stream website URL.
+
+
+ice_public
+Set if the stream should be public.
+The default is 0 (not public).
+
+
+user_agent
+Override the User-Agent header. If not specified a string of the form
+"Lavf/<version>" will be used.
+
+
+password
+Set the Icecast mountpoint password.
+
+
+content_type
+Set the stream content type. This must be set if it is different from
+audio/mpeg.
+
+
+legacy_icecast
+This enables support for Icecast versions < 2.4.0, that do not support the
+HTTP PUT method but the SOURCE method.
+
+
+
+
+
+
icecast://[username [:password ]@]server :port /mountpoint
+
+
+
+
16.12 mmst# TOC
+
+
MMS (Microsoft Media Server) protocol over TCP.
+
+
+
16.13 mmsh# TOC
+
+
MMS (Microsoft Media Server) protocol over HTTP.
+
+
The required syntax is:
+
+
mmsh://server [:port ][/app ][/playpath ]
+
+
+
+
16.14 md5# TOC
+
+
MD5 output protocol.
+
+
Computes the MD5 hash of the data to be written, and on close writes
+this to the designated output or stdout if none is specified. It can
+be used to test muxers without writing an actual file.
+
+
Some examples follow.
+
+
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
+ffmpeg -i input.flv -f avi -y md5:output.avi.md5
+
+# Write the MD5 hash of the encoded AVI file to stdout.
+ffmpeg -i input.flv -f avi -y md5:
+
+
+
Note that some formats (typically MOV) require the output protocol to
+be seekable, so they will fail with the MD5 output protocol.
+
+
+
16.15 pipe# TOC
+
+
UNIX pipe access protocol.
+
+
Allow to read and write from UNIX pipes.
+
+
The accepted syntax is:
+
+
+
number is the number corresponding to the file descriptor of the
+pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number
+is not specified, by default the stdout file descriptor will be used
+for writing, stdin for reading.
+
+
For example to read from stdin with ffmpeg
:
+
+
cat test.wav | ffmpeg -i pipe:0
+# ...this is the same as...
+cat test.wav | ffmpeg -i pipe:
+
+
+
For writing to stdout with ffmpeg
:
+
+
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
+# ...this is the same as...
+ffmpeg -i test.wav -f avi pipe: | cat > test.avi
+
+
+
This protocol accepts the following options:
+
+
+blocksize
+Set I/O operation maximum block size, in bytes. Default value is
+INT_MAX
, which results in not limiting the requested block size.
+Setting this value reasonably low improves user termination request reaction
+time, which is valuable if data transmission is slow.
+
+
+
+
Note that some formats (typically MOV), require the output protocol to
+be seekable, so they will fail with the pipe output protocol.
+
+
+
16.16 rtmp# TOC
+
+
Real-Time Messaging Protocol.
+
+
The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia
+content across a TCP/IP network.
+
+
The required syntax is:
+
+
rtmp://[username :password @]server [:port ][/app ][/instance ][/playpath ]
+
+
+
The accepted parameters are:
+
+username
+An optional username (mostly for publishing).
+
+
+password
+An optional password (mostly for publishing).
+
+
+server
+The address of the RTMP server.
+
+
+port
+The number of the TCP port to use (by default is 1935).
+
+
+app
+It is the name of the application to access. It usually corresponds to
+the path where the application is installed on the RTMP server
+(e.g. /ondemand/ , /flash/live/ , etc.). You can override
+the value parsed from the URI through the rtmp_app
option, too.
+
+
+playpath
+It is the path or name of the resource to play with reference to the
+application specified in app , may be prefixed by "mp4:". You
+can override the value parsed from the URI through the rtmp_playpath
+option, too.
+
+
+listen
+Act as a server, listening for an incoming connection.
+
+
+timeout
+Maximum time to wait for the incoming connection. Implies listen.
+
+
+
+
Additionally, the following parameters can be set via command line options
+(or in code via AVOption
s):
+
+rtmp_app
+Name of application to connect on the RTMP server. This option
+overrides the parameter specified in the URI.
+
+
+rtmp_buffer
+Set the client buffer time in milliseconds. The default is 3000.
+
+
+rtmp_conn
+Extra arbitrary AMF connection parameters, parsed from a string,
+e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0
.
+Each value is prefixed by a single character denoting the type,
+B for Boolean, N for number, S for string, O for object, or Z for null,
+followed by a colon. For Booleans the data must be either 0 or 1 for
+FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or
+1 to end or begin an object, respectively. Data items in subobjects may
+be named, by prefixing the type with ’N’ and specifying the name before
+the value (i.e. NB:myFlag:1
). This option may be used multiple
+times to construct arbitrary AMF sequences.
+
+
+rtmp_flashver
+Version of the Flash plugin used to run the SWF player. The default
+is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible;
+<libavformat version>).)
+
+
+rtmp_flush_interval
+Number of packets flushed in the same request (RTMPT only). The default
+is 10.
+
+
+rtmp_live
+Specify that the media is a live stream. No resuming or seeking in
+live streams is possible. The default value is any
, which means the
+subscriber first tries to play the live stream specified in the
+playpath. If a live stream of that name is not found, it plays the
+recorded stream. The other possible values are live
and
+recorded
.
+
+
+rtmp_pageurl
+URL of the web page in which the media was embedded. By default no
+value will be sent.
+
+
+rtmp_playpath
+Stream identifier to play or to publish. This option overrides the
+parameter specified in the URI.
+
+
+rtmp_subscribe
+Name of live stream to subscribe to. By default no value will be sent.
+It is only sent if the option is specified or if rtmp_live
+is set to live.
+
+
+rtmp_swfhash
+SHA256 hash of the decompressed SWF file (32 bytes).
+
+
+rtmp_swfsize
+Size of the decompressed SWF file, required for SWFVerification.
+
+
+rtmp_swfurl
+URL of the SWF player for the media. By default no value will be sent.
+
+
+rtmp_swfverify
+URL to player swf file, compute hash/size automatically.
+
+
+rtmp_tcurl
+URL of the target stream. Defaults to proto://host[:port]/app.
+
+
+
+
+
For example to read with ffplay
a multimedia resource named
+"sample" from the application "vod" from an RTMP server "myserver":
+
+
ffplay rtmp://myserver/vod/sample
+
+
+
To publish to a password protected server, passing the playpath and
+app names separately:
+
+
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
+
+
+
+
16.17 rtmpe# TOC
+
+
Encrypted Real-Time Messaging Protocol.
+
+
The Encrypted Real-Time Messaging Protocol (RTMPE) is used for
+streaming multimedia content within standard cryptographic primitives,
+consisting of Diffie-Hellman key exchange and HMACSHA256, generating
+a pair of RC4 keys.
+
+
+
16.18 rtmps# TOC
+
+
Real-Time Messaging Protocol over a secure SSL connection.
+
+
The Real-Time Messaging Protocol (RTMPS) is used for streaming
+multimedia content across an encrypted connection.
+
+
+
16.19 rtmpt# TOC
+
+
Real-Time Messaging Protocol tunneled through HTTP.
+
+
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
+for streaming multimedia content within HTTP requests to traverse
+firewalls.
+
+
+
16.20 rtmpte# TOC
+
+
Encrypted Real-Time Messaging Protocol tunneled through HTTP.
+
+
The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE)
+is used for streaming multimedia content within HTTP requests to traverse
+firewalls.
+
+
+
16.21 rtmpts# TOC
+
+
Real-Time Messaging Protocol tunneled through HTTPS.
+
+
The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
+for streaming multimedia content within HTTPS requests to traverse
+firewalls.
+
+
+
16.22 libsmbclient# TOC
+
+
libsmbclient permits one to manipulate CIFS/SMB network resources.
+
+
Following syntax is required.
+
+
+
smb://[[domain:]user[:password@]]server[/share[/path[/file]]]
+
+
+
This protocol accepts the following options.
+
+
+timeout
+Set timeout in miliseconds of socket I/O operations used by the underlying
+low level operation. By default it is set to -1, which means that the timeout
+is not specified.
+
+
+truncate
+Truncate existing files on write, if set to 1. A value of 0 prevents
+truncating. Default value is 1.
+
+
+workgroup
+Set the workgroup used for making connections. By default workgroup is not specified.
+
+
+
+
+
For more information see: http://www.samba.org/ .
+
+
+
16.23 libssh# TOC
+
+
Secure File Transfer Protocol via libssh
+
+
Allow to read from or write to remote resources using SFTP protocol.
+
+
Following syntax is required.
+
+
+
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+
+
This protocol accepts the following options.
+
+
+timeout
+Set timeout of socket I/O operations used by the underlying low level
+operation. By default it is set to -1, which means that the timeout
+is not specified.
+
+
+truncate
+Truncate existing files on write, if set to 1. A value of 0 prevents
+truncating. Default value is 1.
+
+
+private_key
+Specify the path of the file containing private key to use during authorization.
+By default libssh searches for keys in the ~/.ssh/ directory.
+
+
+
+
+
Example: Play a file stored on remote server.
+
+
+
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
+
+
+
+
16.24 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte# TOC
+
+
Real-Time Messaging Protocol and its variants supported through
+librtmp.
+
+
Requires the presence of the librtmp headers and library during
+configuration. You need to explicitly configure the build with
+"–enable-librtmp". If enabled this will replace the native RTMP
+protocol.
+
+
This protocol provides most client functions and a few server
+functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT),
+encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled
+variants of these encrypted types (RTMPTE, RTMPTS).
+
+
The required syntax is:
+
+
rtmp_proto ://server [:port ][/app ][/playpath ] options
+
+
+
where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe",
+"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and
+server , port , app and playpath have the same
+meaning as specified for the RTMP native protocol.
+options contains a list of space-separated options of the form
+key =val .
+
+
See the librtmp manual page (man 3 librtmp) for more information.
+
+
For example, to stream a file in real-time to an RTMP server using
+ffmpeg
:
+
+
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
+
+
+
To play the same stream using ffplay
:
+
+
ffplay "rtmp://myserver/live/mystream live=1"
+
+
+
+
16.25 rtp# TOC
+
+
Real-time Transport Protocol.
+
+
The required syntax for an RTP URL is:
+rtp://hostname [:port ][?option =val ...]
+
+
port specifies the RTP port to use.
+
+
The following URL options are supported:
+
+
+ttl=n
+Set the TTL (Time-To-Live) value (for multicast only).
+
+
+rtcpport=n
+Set the remote RTCP port to n .
+
+
+localrtpport=n
+Set the local RTP port to n .
+
+
+localrtcpport=n '
+Set the local RTCP port to n .
+
+
+pkt_size=n
+Set max packet size (in bytes) to n .
+
+
+connect=0|1
+Do a connect()
on the UDP socket (if set to 1) or not (if set
+to 0).
+
+
+sources=ip [,ip ]
+List allowed source IP addresses.
+
+
+block=ip [,ip ]
+List disallowed (blocked) source IP addresses.
+
+
+write_to_source=0|1
+Send packets to the source address of the latest received packet (if
+set to 1) or to a default remote address (if set to 0).
+
+
+localport=n
+Set the local RTP port to n .
+
+This is a deprecated option. Instead, localrtpport should be
+used.
+
+
+
+
+
Important notes:
+
+
+ If rtcpport is not set the RTCP port will be set to the RTP
+port value plus 1.
+
+ If localrtpport (the local RTP port) is not set any available
+port will be used for the local RTP and RTCP ports.
+
+ If localrtcpport (the local RTCP port) is not set it will be
+set to the local RTP port value plus 1.
+
+
+
+
16.26 rtsp# TOC
+
+
Real-Time Streaming Protocol.
+
+
RTSP is not technically a protocol handler in libavformat, it is a demuxer
+and muxer. The demuxer supports both normal RTSP (with data transferred
+over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with
+data transferred over RDT).
+
+
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
+supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s
+RTSP server ).
+
+
The required syntax for a RTSP url is:
+
+
rtsp://hostname [:port ]/path
+
+
+
Options can be set on the ffmpeg
/ffplay
command
+line, or set in code via AVOption
s or in
+avformat_open_input
.
+
+
The following options are supported.
+
+
+initial_pause
+Do not start playing the stream immediately if set to 1. Default value
+is 0.
+
+
+rtsp_transport
+Set RTSP transport protocols.
+
+It accepts the following values:
+
+‘udp ’
+Use UDP as lower transport protocol.
+
+
+‘tcp ’
+Use TCP (interleaving within the RTSP control channel) as lower
+transport protocol.
+
+
+‘udp_multicast ’
+Use UDP multicast as lower transport protocol.
+
+
+‘http ’
+Use HTTP tunneling as lower transport protocol, which is useful for
+passing proxies.
+
+
+
+Multiple lower transport protocols may be specified, in that case they are
+tried one at a time (if the setup of one fails, the next one is tried).
+For the muxer, only the ‘tcp ’ and ‘udp ’ options are supported.
+
+
+rtsp_flags
+Set RTSP flags.
+
+The following values are accepted:
+
+‘filter_src ’
+Accept packets only from negotiated peer address and port.
+
+‘listen ’
+Act as a server, listening for an incoming connection.
+
+‘prefer_tcp ’
+Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
+
+
+
+Default value is ‘none ’.
+
+
+allowed_media_types
+Set media types to accept from the server.
+
+The following flags are accepted:
+
+‘video ’
+‘audio ’
+‘data ’
+
+
+By default it accepts all media types.
+
+
+min_port
+Set minimum local UDP port. Default value is 5000.
+
+
+max_port
+Set maximum local UDP port. Default value is 65000.
+
+
+timeout
+Set maximum timeout (in seconds) to wait for incoming connections.
+
+A value of -1 means infinite (default). This option implies the
+rtsp_flags set to ‘listen ’.
+
+
+reorder_queue_size
+Set number of packets to buffer for handling of reordered packets.
+
+
+stimeout
+Set socket TCP I/O timeout in microseconds.
+
+
+user-agent
+Override User-Agent header. If not specified, it defaults to the
+libavformat identifier string.
+
+
+
+
When receiving data over UDP, the demuxer tries to reorder received packets
+(since they may arrive out of order, or packets may get lost totally). This
+can be disabled by setting the maximum demuxing delay to zero (via
+the max_delay
field of AVFormatContext).
+
+
When watching multi-bitrate Real-RTSP streams with ffplay
, the
+streams to display can be chosen with -vst
n and
+-ast
n for video and audio respectively, and can be switched
+on the fly by pressing v
and a
.
+
+
+
16.26.1 Examples# TOC
+
+
The following examples all make use of the ffplay
and
+ffmpeg
tools.
+
+
+ Watch a stream over UDP, with a max reordering delay of 0.5 seconds:
+
+
ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
+
+
+ Watch a stream tunneled over HTTP:
+
+
ffplay -rtsp_transport http rtsp://server/video.mp4
+
+
+ Send a stream in realtime to a RTSP server, for others to watch:
+
+
ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
+
+
+ Receive a stream in realtime:
+
+
ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
+
+
+
+
+
16.27 sap# TOC
+
+
Session Announcement Protocol (RFC 2974). This is not technically a
+protocol handler in libavformat, it is a muxer and demuxer.
+It is used for signalling of RTP streams, by announcing the SDP for the
+streams regularly on a separate port.
+
+
+
16.27.1 Muxer# TOC
+
+
The syntax for a SAP url given to the muxer is:
+
+
sap://destination [:port ][?options ]
+
+
+
The RTP packets are sent to destination on port port ,
+or to port 5004 if no port is specified.
+options is a &
-separated list. The following options
+are supported:
+
+
+announce_addr=address
+Specify the destination IP address for sending the announcements to.
+If omitted, the announcements are sent to the commonly used SAP
+announcement multicast address 224.2.127.254 (sap.mcast.net), or
+ff0e::2:7ffe if destination is an IPv6 address.
+
+
+announce_port=port
+Specify the port to send the announcements on, defaults to
+9875 if not specified.
+
+
+ttl=ttl
+Specify the time to live value for the announcements and RTP packets,
+defaults to 255.
+
+
+same_port=0|1
+If set to 1, send all RTP streams on the same port pair. If zero (the
+default), all streams are sent on unique ports, with each stream on a
+port 2 numbers higher than the previous.
+VLC/Live555 requires this to be set to 1, to be able to receive the stream.
+The RTP stack in libavformat for receiving requires all streams to be sent
+on unique ports.
+
+
+
+
Example command lines follow.
+
+
To broadcast a stream on the local subnet, for watching in VLC:
+
+
+
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
+
+
+
Similarly, for watching in ffplay
:
+
+
+
ffmpeg -re -i input -f sap sap://224.0.0.255
+
+
+
And for watching in ffplay
, over IPv6:
+
+
+
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
+
+
+
+
16.27.2 Demuxer# TOC
+
+
The syntax for a SAP url given to the demuxer is:
+
+
sap://[address ][:port ]
+
+
+
address is the multicast address to listen for announcements on,
+if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port
+is the port that is listened on, 9875 if omitted.
+
+
The demuxers listens for announcements on the given address and port.
+Once an announcement is received, it tries to receive that particular stream.
+
+
Example command lines follow.
+
+
To play back the first stream announced on the normal SAP multicast address:
+
+
+
+
To play back the first stream announced on one the default IPv6 SAP multicast address:
+
+
+
ffplay sap://[ff0e::2:7ffe]
+
+
+
+
16.28 sctp# TOC
+
+
Stream Control Transmission Protocol.
+
+
The accepted URL syntax is:
+
+
sctp://host :port [?options ]
+
+
+
The protocol accepts the following options:
+
+listen
+If set to any value, listen for an incoming connection. Outgoing connection is done by default.
+
+
+max_streams
+Set the maximum number of streams. By default no limit is set.
+
+
+
+
+
16.29 srtp# TOC
+
+
Secure Real-time Transport Protocol.
+
+
The accepted options are:
+
+srtp_in_suite
+srtp_out_suite
+Select input and output encoding suites.
+
+Supported values:
+
+‘AES_CM_128_HMAC_SHA1_80 ’
+‘SRTP_AES128_CM_HMAC_SHA1_80 ’
+‘AES_CM_128_HMAC_SHA1_32 ’
+‘SRTP_AES128_CM_HMAC_SHA1_32 ’
+
+
+
+srtp_in_params
+srtp_out_params
+Set input and output encoding parameters, which are expressed by a
+base64-encoded representation of a binary block. The first 16 bytes of
+this binary block are used as master key, the following 14 bytes are
+used as master salt.
+
+
+
+
+
16.30 subfile# TOC
+
+
Virtually extract a segment of a file or another stream.
+The underlying stream must be seekable.
+
+
Accepted options:
+
+start
+Start offset of the extracted segment, in bytes.
+
+end
+End offset of the extracted segment, in bytes.
+
+
+
+
Examples:
+
+
Extract a chapter from a DVD VOB file (start and end sectors obtained
+externally and multiplied by 2048):
+
+
subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
+
+
+
Play an AVI file directly from a TAR archive:
+subfile,,start,183241728,end,366490624,,:archive.tar
+
+
+
16.31 tcp# TOC
+
+
Transmission Control Protocol.
+
+
The required syntax for a TCP url is:
+
+
tcp://hostname :port [?options ]
+
+
+
options contains a list of &-separated options of the form
+key =val .
+
+
The list of supported options follows.
+
+
+listen=1|0
+Listen for an incoming connection. Default value is 0.
+
+
+timeout=microseconds
+Set raise error timeout, expressed in microseconds.
+
+This option is only relevant in read mode: if no data arrived in more
+than this time interval, raise error.
+
+
+listen_timeout=microseconds
+Set listen timeout, expressed in microseconds.
+
+
+
+
The following example shows how to setup a listening TCP connection
+with ffmpeg
, which is then accessed with ffplay
:
+
+
ffmpeg -i input -f format tcp://hostname :port ?listen
+ffplay tcp://hostname :port
+
+
+
+
16.32 tls# TOC
+
+
Transport Layer Security (TLS) / Secure Sockets Layer (SSL)
+
+
The required syntax for a TLS/SSL url is:
+
+
tls://hostname :port [?options ]
+
+
+
The following parameters can be set via command line options
+(or in code via AVOption
s):
+
+
+ca_file, cafile=filename
+A file containing certificate authority (CA) root certificates to treat
+as trusted. If the linked TLS library contains a default this might not
+need to be specified for verification to work, but not all libraries and
+setups have defaults built in.
+The file must be in OpenSSL PEM format.
+
+
+tls_verify=1|0
+If enabled, try to verify the peer that we are communicating with.
+Note, if using OpenSSL, this currently only makes sure that the
+peer certificate is signed by one of the root certificates in the CA
+database, but it does not validate that the certificate actually
+matches the host name we are trying to connect to. (With GnuTLS,
+the host name is validated as well.)
+
+This is disabled by default since it requires a CA database to be
+provided by the caller in many cases.
+
+
+cert_file, cert=filename
+A file containing a certificate to use in the handshake with the peer.
+(When operating as server, in listen mode, this is more often required
+by the peer, while client certificates only are mandated in certain
+setups.)
+
+
+key_file, key=filename
+A file containing the private key for the certificate.
+
+
+listen=1|0
+If enabled, listen for connections on the provided port, and assume
+the server role in the handshake instead of the client role.
+
+
+
+
+
Example command lines:
+
+
To create a TLS/SSL server that serves an input stream.
+
+
+
ffmpeg -i input -f format tls://hostname :port ?listen&cert=server.crt &key=server.key
+
+
+
To play back a stream from the TLS/SSL server using ffplay
:
+
+
+
ffplay tls://hostname :port
+
+
+
+
16.33 udp# TOC
+
+
User Datagram Protocol.
+
+
The required syntax for an UDP URL is:
+
+
udp://hostname :port [?options ]
+
+
+
options contains a list of &-separated options of the form key =val .
+
+
In case threading is enabled on the system, a circular buffer is used
+to store the incoming data, which allows one to reduce loss of data due to
+UDP socket buffer overruns. The fifo_size and
+overrun_nonfatal options are related to this buffer.
+
+
The list of supported options follows.
+
+
+buffer_size=size
+Set the UDP maximum socket buffer size in bytes. This is used to set either
+the receive or send buffer size, depending on what the socket is used for.
+Default is 64KB. See also fifo_size .
+
+
+localport=port
+Override the local UDP port to bind with.
+
+
+localaddr=addr
+Choose the local IP address. This is useful e.g. if sending multicast
+and the host has multiple interfaces, where the user can choose
+which interface to send on by specifying the IP address of that interface.
+
+
+pkt_size=size
+Set the size in bytes of UDP packets.
+
+
+reuse=1|0
+Explicitly allow or disallow reusing UDP sockets.
+
+
+ttl=ttl
+Set the time to live value (for multicast only).
+
+
+connect=1|0
+Initialize the UDP socket with connect()
. In this case, the
+destination address can’t be changed with ff_udp_set_remote_url later.
+If the destination address isn’t known at the start, this option can
+be specified in ff_udp_set_remote_url, too.
+This allows finding out the source address for the packets with getsockname,
+and makes writes return with AVERROR(ECONNREFUSED) if "destination
+unreachable" is received.
+For receiving, this gives the benefit of only receiving packets from
+the specified peer address/port.
+
+
+sources=address [,address ]
+Only receive packets sent to the multicast group from one of the
+specified sender IP addresses.
+
+
+block=address [,address ]
+Ignore packets sent to the multicast group from the specified
+sender IP addresses.
+
+
+fifo_size=units
+Set the UDP receiving circular buffer size, expressed as a number of
+packets with size of 188 bytes. If not specified defaults to 7*4096.
+
+
+overrun_nonfatal=1|0
+Survive in case of UDP receiving circular buffer overrun. Default
+value is 0.
+
+
+timeout=microseconds
+Set raise error timeout, expressed in microseconds.
+
+This option is only relevant in read mode: if no data arrived in more
+than this time interval, raise error.
+
+
+broadcast=1|0
+Explicitly allow or disallow UDP broadcasting.
+
+Note that broadcasting may not work properly on networks having
+a broadcast storm protection.
+
+
+
+
+
16.33.1 Examples# TOC
+
+
+ Use ffmpeg
to stream over UDP to a remote endpoint:
+
+
ffmpeg -i input -f format udp://hostname :port
+
+
+ Use ffmpeg
to stream in mpegts format over UDP using 188
+sized UDP packets, using a large input buffer:
+
+
ffmpeg -i input -f mpegts udp://hostname :port ?pkt_size=188&buffer_size=65535
+
+
+ Use ffmpeg
to receive over UDP from a remote endpoint:
+
+
ffmpeg -i udp://[multicast-address ]:port ...
+
+
+
+
+
16.34 unix# TOC
+
+
Unix local socket
+
+
The required syntax for a Unix socket URL is:
+
+
+
+
The following parameters can be set via command line options
+(or in code via AVOption
s):
+
+
+timeout
+Timeout in ms.
+
+listen
+Create the Unix socket in listening mode.
+
+
+
+
+
17 Device Options# TOC
+
+
The libavdevice library provides the same interface as
+libavformat. Namely, an input device is considered like a demuxer, and
+an output device like a muxer, and the interface and generic device
+options are the same provided by libavformat (see the ffmpeg-formats
+manual).
+
+
In addition each input or output device may support so-called private
+options, which are specific for that component.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, or by setting the value explicitly in the device
+AVFormatContext
options or using the libavutil/opt.h API
+for programmatic use.
+
+
+
+
18 Input Devices# TOC
+
+
Input devices are configured elements in FFmpeg which allow to access
+the data coming from a multimedia device attached to your system.
+
+
When you configure your FFmpeg build, all the supported input devices
+are enabled by default. You can list all available ones using the
+configure option "–list-indevs".
+
+
You can disable all the input devices using the configure option
+"–disable-indevs", and selectively enable an input device using the
+option "–enable-indev=INDEV ", or you can disable a particular
+input device using the option "–disable-indev=INDEV ".
+
+
The option "-devices" of the ff* tools will display the list of
+supported input devices.
+
+
A description of the currently available input devices follows.
+
+
+
18.1 alsa# TOC
+
+
ALSA (Advanced Linux Sound Architecture) input device.
+
+
To enable this input device during configuration you need libasound
+installed on your system.
+
+
This device allows capturing from an ALSA device. The name of the
+device to capture has to be an ALSA card identifier.
+
+
An ALSA identifier has the syntax:
+
+
hw:CARD [,DEV [,SUBDEV ]]
+
+
+
where the DEV and SUBDEV components are optional.
+
+
The three arguments (in order: CARD ,DEV ,SUBDEV )
+specify card number or identifier, device number and subdevice number
+(-1 means any).
+
+
To see the list of cards currently recognized by your system check the
+files /proc/asound/cards and /proc/asound/devices .
+
+
For example to capture with ffmpeg
from an ALSA device with
+card id 0, you may run the command:
+
+
ffmpeg -f alsa -i hw:0 alsaout.wav
+
+
+
For more information see:
+http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html
+
+
+
18.2 avfoundation# TOC
+
+
AVFoundation input device.
+
+
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
+The older QTKit framework has been marked deprecated since OSX version 10.7.
+
+
The input filename has to be given in the following syntax:
+
+
-i "[[VIDEO]:[AUDIO]]"
+
+
The first entry selects the video input while the latter selects the audio input.
+The stream has to be specified by the device name or the device index as shown by the device list.
+Alternatively, the video and/or audio input device can be chosen by index using the
+
+ -video_device_index <INDEX>
+
+and/or
+
+ -audio_device_index <INDEX>
+
+, overriding any
+device name or index given in the input filename.
+
+
All available devices can be enumerated by using -list_devices true , listing
+all device names and corresponding indices.
+
+
There are two device name aliases:
+
+default
+Select the AVFoundation default device of the corresponding type.
+
+
+none
+Do not record the corresponding media type.
+This is equivalent to specifying an empty device name or index.
+
+
+
+
+
+
18.2.1 Options# TOC
+
+
AVFoundation supports the following options:
+
+
+-list_devices <TRUE|FALSE>
+If set to true, a list of all available input devices is given showing all
+device names and indices.
+
+
+-video_device_index <INDEX>
+Specify the video device by its index. Overrides anything given in the input filename.
+
+
+-audio_device_index <INDEX>
+Specify the audio device by its index. Overrides anything given in the input filename.
+
+
+-pixel_format <FORMAT>
+Request the video device to use a specific pixel format.
+If the specified format is not supported, a list of available formats is given
+und the first one in this list is used instead. Available pixel formats are:
+monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
+ bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
+ yuv420p, nv12, yuyv422, gray
+
+
+
+
+
+
18.2.2 Examples# TOC
+
+
+ Print the list of AVFoundation supported devices and exit:
+
+
$ ffmpeg -f avfoundation -list_devices true -i ""
+
+
+ Record video from video device 0 and audio from audio device 0 into out.avi:
+
+
$ ffmpeg -f avfoundation -i "0:0" out.avi
+
+
+ Record video from video device 2 and audio from audio device 1 into out.avi:
+
+
$ ffmpeg -f avfoundation -video_device_index 2 -i ":1" out.avi
+
+
+ Record video from the system default video device using the pixel format bgr0 and do not record any audio into out.avi:
+
+
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
+
+
+
+
+
+
18.3 bktr# TOC
+
+
BSD video input device.
+
+
+
18.4 dshow# TOC
+
+
Windows DirectShow input device.
+
+
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
+Currently only audio and video devices are supported.
+
+
Multiple devices may be opened as separate inputs, but they may also be
+opened on the same input, which should improve synchronism between them.
+
+
The input name should be in the format:
+
+
+
+
where TYPE can be either audio or video ,
+and NAME is the device’s name.
+
+
+
18.4.1 Options# TOC
+
+
If no options are specified, the device’s defaults are used.
+If the device does not support the requested options, it will
+fail to open.
+
+
+video_size
+Set the video size in the captured video.
+
+
+framerate
+Set the frame rate in the captured video.
+
+
+sample_rate
+Set the sample rate (in Hz) of the captured audio.
+
+
+sample_size
+Set the sample size (in bits) of the captured audio.
+
+
+channels
+Set the number of channels in the captured audio.
+
+
+list_devices
+If set to true , print a list of devices and exit.
+
+
+list_options
+If set to true , print a list of selected device’s options
+and exit.
+
+
+video_device_number
+Set video device number for devices with same name (starts at 0,
+defaults to 0).
+
+
+audio_device_number
+Set audio device number for devices with same name (starts at 0,
+defaults to 0).
+
+
+pixel_format
+Select pixel format to be used by DirectShow. This may only be set when
+the video codec is not set or set to rawvideo.
+
+
+audio_buffer_size
+Set audio device buffer size in milliseconds (which can directly
+impact latency, depending on the device).
+Defaults to using the audio device’s
+default buffer size (typically some multiple of 500ms).
+Setting this value too low can degrade performance.
+See also
+http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx
+
+
+
+
+
+
18.4.2 Examples# TOC
+
+
+ Print the list of DirectShow supported devices and exit:
+
+
$ ffmpeg -list_devices true -f dshow -i dummy
+
+
+ Open video device Camera :
+
+
$ ffmpeg -f dshow -i video="Camera"
+
+
+ Open second video device with name Camera :
+
+
$ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
+
+
+ Open video device Camera and audio device Microphone :
+
+
$ ffmpeg -f dshow -i video="Camera":audio="Microphone"
+
+
+ Print the list of supported options in selected device and exit:
+
+
$ ffmpeg -list_options true -f dshow -i video="Camera"
+
+
+
+
+
+
18.5 dv1394# TOC
+
+
Linux DV 1394 input device.
+
+
+
18.6 fbdev# TOC
+
+
Linux framebuffer input device.
+
+
The Linux framebuffer is a graphic hardware-independent abstraction
+layer to show graphics on a computer monitor, typically on the
+console. It is accessed through a file device node, usually
+/dev/fb0 .
+
+
For more detailed information read the file
+Documentation/fb/framebuffer.txt included in the Linux source tree.
+
+
To record from the framebuffer device /dev/fb0 with
+ffmpeg
:
+
+
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
+
+
+
You can take a single screenshot image with the command:
+
+
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
+
+
+
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
+
+
+
18.7 gdigrab# TOC
+
+
Win32 GDI-based screen capture device.
+
+
This device allows you to capture a region of the display on Windows.
+
+
There are two options for the input filename:
+
+
or
+
+
+
The first option will capture the entire desktop, or a fixed region of the
+desktop. The second option will instead capture the contents of a single
+window, regardless of its position on the screen.
+
+
For example, to grab the entire desktop using ffmpeg
:
+
+
ffmpeg -f gdigrab -framerate 6 -i desktop out.mpg
+
+
+
Grab a 640x480 region at position 10,20
:
+
+
ffmpeg -f gdigrab -framerate 6 -offset_x 10 -offset_y 20 -video_size vga -i desktop out.mpg
+
+
+
Grab the contents of the window named "Calculator"
+
+
ffmpeg -f gdigrab -framerate 6 -i title=Calculator out.mpg
+
+
+
+
18.7.1 Options# TOC
+
+
+draw_mouse
+Specify whether to draw the mouse pointer. Use the value 0
to
+not draw the pointer. Default value is 1
.
+
+
+framerate
+Set the grabbing frame rate. Default value is ntsc
,
+corresponding to a frame rate of 30000/1001
.
+
+
+show_region
+Show grabbed region on screen.
+
+If show_region is specified with 1
, then the grabbing
+region will be indicated on screen. With this option, it is easy to
+know what is being grabbed if only a portion of the screen is grabbed.
+
+Note that show_region is incompatible with grabbing the contents
+of a single window.
+
+For example:
+
+
ffmpeg -f gdigrab -show_region 1 -framerate 6 -video_size cif -offset_x 10 -offset_y 20 -i desktop out.mpg
+
+
+
+video_size
+Set the video frame size. The default is to capture the full screen if desktop is selected, or the full window size if title=window_title is selected.
+
+
+offset_x
+When capturing a region with video_size , set the distance from the left edge of the screen or desktop.
+
+Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned to the left of your primary monitor, you will need to use a negative offset_x value to move the region to that monitor.
+
+
+offset_y
+When capturing a region with video_size , set the distance from the top edge of the screen or desktop.
+
+Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned above your primary monitor, you will need to use a negative offset_y value to move the region to that monitor.
+
+
+
+
+
+
18.8 iec61883# TOC
+
+
FireWire DV/HDV input device using libiec61883.
+
+
To enable this input device, you need libiec61883, libraw1394 and
+libavc1394 installed on your system. Use the configure option
+--enable-libiec61883
to compile with the device enabled.
+
+
The iec61883 capture device supports capturing from a video device
+connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
+FireWire stack (juju). This is the default DV/HDV input method in Linux
+Kernel 2.6.37 and later, since the old FireWire stack was removed.
+
+
Specify the FireWire port to be used as input file, or "auto"
+to choose the first port connected.
+
+
+
18.8.1 Options# TOC
+
+
+dvtype
+Override autodetection of DV/HDV. This should only be used if auto
+detection does not work, or if usage of a different device type
+should be prohibited. Treating a DV device as HDV (or vice versa) will
+not work and result in undefined behavior.
+The values auto , dv and hdv are supported.
+
+
+dvbuffer
+Set maximum size of buffer for incoming data, in frames. For DV, this
+is an exact value. For HDV, it is not frame exact, since HDV does
+not have a fixed frame size.
+
+
+dvguid
+Select the capture device by specifying it’s GUID. Capturing will only
+be performed from the specified device and fails if no device with the
+given GUID is found. This is useful to select the input if multiple
+devices are connected at the same time.
+Look at /sys/bus/firewire/devices to find out the GUIDs.
+
+
+
+
+
+
18.8.2 Examples# TOC
+
+
+ Grab and show the input of a FireWire DV/HDV device.
+
+
ffplay -f iec61883 -i auto
+
+
+ Grab and record the input of a FireWire DV/HDV device,
+using a packet buffer of 100000 packets if the source is HDV.
+
+
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
+
+
+
+
+
+
18.9 jack# TOC
+
+
JACK input device.
+
+
To enable this input device during configuration you need libjack
+installed on your system.
+
+
A JACK input device creates one or more JACK writable clients, one for
+each audio channel, with name client_name :input_N , where
+client_name is the name provided by the application, and N
+is a number which identifies the channel.
+Each writable client will send the acquired data to the FFmpeg input
+device.
+
+
Once you have created one or more JACK readable clients, you need to
+connect them to one or more JACK writable clients.
+
+
To connect or disconnect JACK clients you can use the jack_connect
+and jack_disconnect
programs, or do it through a graphical interface,
+for example with qjackctl
.
+
+
To list the JACK clients and their properties you can invoke the command
+jack_lsp
.
+
+
Follows an example which shows how to capture a JACK readable client
+with ffmpeg
.
+
+
# Create a JACK writable client with name "ffmpeg".
+$ ffmpeg -f jack -i ffmpeg -y out.wav
+
+# Start the sample jack_metro readable client.
+$ jack_metro -b 120 -d 0.2 -f 4000
+
+# List the current JACK clients.
+$ jack_lsp -c
+system:capture_1
+system:capture_2
+system:playback_1
+system:playback_2
+ffmpeg:input_1
+metro:120_bpm
+
+# Connect metro to the ffmpeg writable client.
+$ jack_connect metro:120_bpm ffmpeg:input_1
+
+
+
For more information read:
+http://jackaudio.org/
+
+
+
18.10 lavfi# TOC
+
+
Libavfilter input virtual device.
+
+
This input device reads data from the open output pads of a libavfilter
+filtergraph.
+
+
For each filtergraph open output, the input device will create a
+corresponding stream which is mapped to the generated output. Currently
+only video data is supported. The filtergraph is specified through the
+option graph .
+
+
+
18.10.1 Options# TOC
+
+
+graph
+Specify the filtergraph to use as input. Each video open output must be
+labelled by a unique string of the form "outN ", where N is a
+number starting from 0 corresponding to the mapped input stream
+generated by the device.
+The first unlabelled output is automatically assigned to the "out0"
+label, but all the others need to be specified explicitly.
+
+The suffix "+subcc" can be appended to the output label to create an extra
+stream with the closed captions packets attached to that output
+(experimental; only for EIA-608 / CEA-708 for now).
+The subcc streams are created after all the normal streams, in the order of
+the corresponding stream.
+For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
+stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
+
+If not specified defaults to the filename specified for the input
+device.
+
+
+graph_file
+Set the filename of the filtergraph to be read and sent to the other
+filters. Syntax of the filtergraph is the same as the one specified by
+the option graph .
+
+
+
+
+
+
18.10.2 Examples# TOC
+
+
+ Create a color video stream and play it back with ffplay
:
+
+
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
+
+
+ As the previous example, but use filename for specifying the graph
+description, and omit the "out0" label:
+
+
ffplay -f lavfi color=c=pink
+
+
+ Create three different video test filtered sources and play them:
+
+
ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
+
+
+ Read an audio stream from a file using the amovie source and play it
+back with ffplay
:
+
+
ffplay -f lavfi "amovie=test.wav"
+
+
+ Read an audio stream and a video stream and play it back with
+ffplay
:
+
+
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
+
+
+ Dump decoded frames to images and closed captions to a file (experimental):
+
+
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
+
+
+
+
+
+
18.11 libcdio# TOC
+
+
Audio-CD input device based on cdio.
+
+
To enable this input device during configuration you need libcdio
+installed on your system. Requires the configure option
+--enable-libcdio
.
+
+
This device allows playing and grabbing from an Audio-CD.
+
+
For example to copy with ffmpeg
the entire Audio-CD in /dev/sr0,
+you may run the command:
+
+
ffmpeg -f libcdio -i /dev/sr0 cd.wav
+
+
+
+
18.12 libdc1394# TOC
+
+
IIDC1394 input device, based on libdc1394 and libraw1394.
+
+
Requires the configure option --enable-libdc1394
.
+
+
+
18.13 openal# TOC
+
+
The OpenAL input device provides audio capture on all systems with a
+working OpenAL 1.1 implementation.
+
+
To enable this input device during configuration, you need OpenAL
+headers and libraries installed on your system, and need to configure
+FFmpeg with --enable-openal
.
+
+
OpenAL headers and libraries should be provided as part of your OpenAL
+implementation, or as an additional download (an SDK). Depending on your
+installation you may need to specify additional flags via the
+--extra-cflags
and --extra-ldflags
for allowing the build
+system to locate the OpenAL headers and libraries.
+
+
An incomplete list of OpenAL implementations follows:
+
+
+Creative
+The official Windows implementation, providing hardware acceleration
+with supported devices and software fallback.
+See http://openal.org/ .
+
+OpenAL Soft
+Portable, open source (LGPL) software implementation. Includes
+backends for the most common sound APIs on the Windows, Linux,
+Solaris, and BSD operating systems.
+See http://kcat.strangesoft.net/openal.html .
+
+Apple
+OpenAL is part of Core Audio, the official Mac OS X Audio interface.
+See http://developer.apple.com/technologies/mac/audio-and-video.html
+
+
+
+
This device allows one to capture from an audio input device handled
+through OpenAL.
+
+
You need to specify the name of the device to capture in the provided
+filename. If the empty string is provided, the device will
+automatically select the default device. You can get the list of the
+supported devices by using the option list_devices .
+
+
+
18.13.1 Options# TOC
+
+
+channels
+Set the number of channels in the captured audio. Only the values
+1 (monaural) and 2 (stereo) are currently supported.
+Defaults to 2 .
+
+
+sample_size
+Set the sample size (in bits) of the captured audio. Only the values
+8 and 16 are currently supported. Defaults to
+16 .
+
+
+sample_rate
+Set the sample rate (in Hz) of the captured audio.
+Defaults to 44.1k .
+
+
+list_devices
+If set to true , print a list of devices and exit.
+Defaults to false .
+
+
+
+
+
+
18.13.2 Examples# TOC
+
+
Print the list of OpenAL supported devices and exit:
+
+
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
+
+
+
Capture from the OpenAL device DR-BT101 via PulseAudio :
+
+
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
+
+
+
Capture from the default device (note the empty string ” as filename):
+
+
$ ffmpeg -f openal -i '' out.ogg
+
+
+
Capture from two devices simultaneously, writing to two different files,
+within the same ffmpeg
command:
+
+
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
+
+
Note: not all OpenAL implementations support multiple simultaneous capture -
+try the latest OpenAL Soft if the above does not work.
+
+
+
18.14 oss# TOC
+
+
Open Sound System input device.
+
+
The filename to provide to the input device is the device node
+representing the OSS input device, and is usually set to
+/dev/dsp .
+
+
For example to grab from /dev/dsp using ffmpeg
use the
+command:
+
+
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
+
+
+
For more information about OSS see:
+http://manuals.opensound.com/usersguide/dsp.html
+
+
+
18.15 pulse# TOC
+
+
PulseAudio input device.
+
+
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
+
+
The filename to provide to the input device is a source device or the
+string "default"
+
+
To list the PulseAudio source devices and their properties you can invoke
+the command pactl list sources
.
+
+
More information about PulseAudio can be found on http://www.pulseaudio.org .
+
+
+
18.15.1 Options# TOC
+
+server
+Connect to a specific PulseAudio server, specified by an IP address.
+Default server is used when not provided.
+
+
+name
+Specify the application name PulseAudio will use when showing active clients,
+by default it is the LIBAVFORMAT_IDENT
string.
+
+
+stream_name
+Specify the stream name PulseAudio will use when showing active streams,
+by default it is "record".
+
+
+sample_rate
+Specify the samplerate in Hz, by default 48kHz is used.
+
+
+channels
+Specify the channels in use, by default 2 (stereo) is set.
+
+
+frame_size
+Specify the number of bytes per frame, by default it is set to 1024.
+
+
+fragment_size
+Specify the minimal buffering fragment in PulseAudio, it will affect the
+audio latency. By default it is unset.
+
+
+
+
+
18.15.2 Examples# TOC
+
Record a stream from default device:
+
+
ffmpeg -f pulse -i default /tmp/pulse.wav
+
+
+
+
18.16 qtkit# TOC
+
+
QTKit input device.
+
+
The filename passed as input is parsed to contain either a device name or index.
+The device index can also be given by using -video_device_index.
+A given device index will override any given device name.
+If the desired device consists of numbers only, use -video_device_index to identify it.
+The default device will be chosen if an empty string or the device name "default" is given.
+The available devices can be enumerated by using -list_devices.
+
+
+
ffmpeg -f qtkit -i "0" out.mpg
+
+
+
+
ffmpeg -f qtkit -video_device_index 0 -i "" out.mpg
+
+
+
+
ffmpeg -f qtkit -i "default" out.mpg
+
+
+
+
ffmpeg -f qtkit -list_devices true -i ""
+
+
+
+
18.17 sndio# TOC
+
+
sndio input device.
+
+
To enable this input device during configuration you need libsndio
+installed on your system.
+
+
The filename to provide to the input device is the device node
+representing the sndio input device, and is usually set to
+/dev/audio0 .
+
+
For example to grab from /dev/audio0 using ffmpeg
use the
+command:
+
+
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
+
+
+
+
18.18 video4linux2, v4l2# TOC
+
+
Video4Linux2 input video device.
+
+
"v4l2" can be used as alias for "video4linux2".
+
+
If FFmpeg is built with v4l-utils support (by using the
+--enable-libv4l2
configure option), it is possible to use it with the
+-use_libv4l2
input device option.
+
+
The name of the device to grab is a file device node, usually Linux
+systems tend to automatically create such nodes when the device
+(e.g. an USB webcam) is plugged into the system, and has a name of the
+kind /dev/videoN , where N is a number associated to
+the device.
+
+
Video4Linux2 devices usually support a limited set of
+width xheight sizes and frame rates. You can check which are
+supported using -list_formats all
for Video4Linux2 devices.
+Some devices, like TV cards, support one or more standards. It is possible
+to list all the supported standards using -list_standards all
.
+
+
The time base for the timestamps is 1 microsecond. Depending on the kernel
+version and configuration, the timestamps may be derived from the real time
+clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
+boot time, unaffected by NTP or manual changes to the clock). The
+-timestamps abs or -ts abs option can be used to force
+conversion into the real time clock.
+
+
Some usage examples of the video4linux2 device with ffmpeg
+and ffplay
:
+
+ Grab and show the input of a video4linux2 device:
+
+
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
+
+
+ Grab and record the input of a video4linux2 device, leave the
+frame rate and size as previously set:
+
+
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
+
+
+
+
For more information about Video4Linux, check http://linuxtv.org/ .
+
+
+
18.18.1 Options# TOC
+
+
+standard
+Set the standard. Must be the name of a supported standard. To get a
+list of the supported standards, use the list_standards
+option.
+
+
+channel
+Set the input channel number. Default to -1, which means using the
+previously selected channel.
+
+
+video_size
+Set the video frame size. The argument must be a string in the form
+WIDTH xHEIGHT or a valid size abbreviation.
+
+
+pixel_format
+Select the pixel format (only valid for raw video input).
+
+
+input_format
+Set the preferred pixel format (for raw video) or a codec name.
+This option allows one to select the input format, when several are
+available.
+
+
+framerate
+Set the preferred video frame rate.
+
+
+list_formats
+List available formats (supported pixel formats, codecs, and frame
+sizes) and exit.
+
+Available values are:
+
+‘all ’
+Show all available (compressed and non-compressed) formats.
+
+
+‘raw ’
+Show only raw video (non-compressed) formats.
+
+
+‘compressed ’
+Show only compressed formats.
+
+
+
+
+list_standards
+List supported standards and exit.
+
+Available values are:
+
+‘all ’
+Show all supported standards.
+
+
+
+
+timestamps, ts
+Set type of timestamps for grabbed frames.
+
+Available values are:
+
+‘default ’
+Use timestamps from the kernel.
+
+
+‘abs ’
+Use absolute timestamps (wall clock).
+
+
+‘mono2abs ’
+Force conversion from monotonic to absolute timestamps.
+
+
+
+Default value is default
.
+
+
+
+
+
18.19 vfwcap# TOC
+
+
VfW (Video for Windows) capture input device.
+
+
The filename passed as input is the capture driver number, ranging from
+0 to 9. You may use "list" as filename to print a list of drivers. Any
+other filename will be interpreted as device number 0.
+
+
+
18.20 x11grab# TOC
+
+
X11 video input device.
+
+
Depends on X11, Xext, and Xfixes. Requires the configure option
+--enable-x11grab
.
+
+
This device allows one to capture a region of an X11 display.
+
+
The filename passed as input has the syntax:
+
+
[hostname ]:display_number .screen_number [+x_offset ,y_offset ]
+
+
+
hostname :display_number .screen_number specifies the
+X11 display name of the screen to grab from. hostname can be
+omitted, and defaults to "localhost". The environment variable
+DISPLAY
contains the default display name.
+
+
x_offset and y_offset specify the offsets of the grabbed
+area with respect to the top-left border of the X11 screen. They
+default to 0.
+
+
Check the X11 documentation (e.g. man X) for more detailed information.
+
+
Use the dpyinfo
program for getting basic information about the
+properties of your X11 display (e.g. grep for "name" or "dimensions").
+
+
For example to grab from :0.0 using ffmpeg
:
+
+
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+
Grab at position 10,20
:
+
+
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+
+
+
18.20.1 Options# TOC
+
+
+draw_mouse
+Specify whether to draw the mouse pointer. A value of 0
specify
+not to draw the pointer. Default value is 1
.
+
+
+follow_mouse
+Make the grabbed area follow the mouse. The argument can be
+centered
or a number of pixels PIXELS .
+
+When it is specified with "centered", the grabbing region follows the mouse
+pointer and keeps the pointer at the center of region; otherwise, the region
+follows only when the mouse pointer reaches within PIXELS (greater than
+zero) to the edge of region.
+
+For example:
+
+
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+To follow only when the mouse pointer reaches within 100 pixels to edge:
+
+
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+
+framerate
+Set the grabbing frame rate. Default value is ntsc
,
+corresponding to a frame rate of 30000/1001
.
+
+
+show_region
+Show grabbed region on screen.
+
+If show_region is specified with 1
, then the grabbing
+region will be indicated on screen. With this option, it is easy to
+know what is being grabbed if only a portion of the screen is grabbed.
+
+For example:
+
+
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+
+With follow_mouse :
+
+
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+
+video_size
+Set the video frame size. Default value is vga
.
+
+
+use_shm
+Use the MIT-SHM extension for shared memory. Default value is 1
.
+It may be necessary to disable it for remote displays.
+
+
+
+
+
18.21 decklink# TOC
+
+
The decklink input device provides capture capabilities for Blackmagic
+DeckLink devices.
+
+
To enable this input device, you need the Blackmagic DeckLink SDK and you
+need to configure with the appropriate --extra-cflags
+and --extra-ldflags
.
+On Windows, you need to run the IDL files through widl
.
+
+
DeckLink is very picky about the formats it supports. Pixel format is always
+uyvy422, framerate and video size must be determined for your device with
+-list_formats 1
. Audio sample rate is always 48 kHz and the number
+of channels currently is limited to 2 (stereo).
+
+
+
18.21.1 Options# TOC
+
+
+list_devices
+If set to true , print a list of devices and exit.
+Defaults to false .
+
+
+list_formats
+If set to true , print a list of supported formats and exit.
+Defaults to false .
+
+
+
+
+
+
18.21.2 Examples# TOC
+
+
+ List input devices:
+
+
ffmpeg -f decklink -list_devices 1 -i dummy
+
+
+ List supported formats:
+
+
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
+
+
+ Capture video clip at 1080i50 (format 11):
+
+
ffmpeg -f decklink -i 'Intensity Pro@11' -acodec copy -vcodec copy output.avi
+
+
+
+
+
+
+
19 Resampler Options# TOC
+
+
The audio resampler supports the following named options.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, option =value for the aresample filter,
+by setting the value explicitly in the
+SwrContext
options or using the libavutil/opt.h API for
+programmatic use.
+
+
+ich, in_channel_count
+Set the number of input channels. Default value is 0. Setting this
+value is not mandatory if the corresponding channel layout
+in_channel_layout is set.
+
+
+och, out_channel_count
+Set the number of output channels. Default value is 0. Setting this
+value is not mandatory if the corresponding channel layout
+out_channel_layout is set.
+
+
+uch, used_channel_count
+Set the number of used input channels. Default value is 0. This option is
+only used for special remapping.
+
+
+isr, in_sample_rate
+Set the input sample rate. Default value is 0.
+
+
+osr, out_sample_rate
+Set the output sample rate. Default value is 0.
+
+
+isf, in_sample_fmt
+Specify the input sample format. It is set by default to none
.
+
+
+osf, out_sample_fmt
+Specify the output sample format. It is set by default to none
.
+
+
+tsf, internal_sample_fmt
+Set the internal sample format. Default value is none
.
+This will automatically be chosen when it is not explicitly set.
+
+
+icl, in_channel_layout
+ocl, out_channel_layout
+Set the input/output channel layout.
+
+See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
+for the required syntax.
+
+
+clev, center_mix_level
+Set the center mix level. It is a value expressed in deciBel, and must be
+in the interval [-32,32].
+
+
+slev, surround_mix_level
+Set the surround mix level. It is a value expressed in deciBel, and must
+be in the interval [-32,32].
+
+
+lfe_mix_level
+Set LFE mix into non LFE level. It is used when there is a LFE input but no
+LFE output. It is a value expressed in deciBel, and must
+be in the interval [-32,32].
+
+
+rmvol, rematrix_volume
+Set rematrix volume. Default value is 1.0.
+
+
+rematrix_maxval
+Set maximum output value for rematrixing.
+This can be used to prevent clipping vs. preventing volumn reduction
+A value of 1.0 prevents cliping.
+
+
+flags, swr_flags
+Set flags used by the converter. Default value is 0.
+
+It supports the following individual flags:
+
+res
+force resampling, this flag forces resampling to be used even when the
+input and output sample rates match.
+
+
+
+
+dither_scale
+Set the dither scale. Default value is 1.
+
+
+dither_method
+Set dither method. Default value is 0.
+
+Supported values:
+
+‘rectangular ’
+select rectangular dither
+
+‘triangular ’
+select triangular dither
+
+‘triangular_hp ’
+select triangular dither with high pass
+
+‘lipshitz ’
+select lipshitz noise shaping dither
+
+‘shibata ’
+select shibata noise shaping dither
+
+‘low_shibata ’
+select low shibata noise shaping dither
+
+‘high_shibata ’
+select high shibata noise shaping dither
+
+‘f_weighted ’
+select f-weighted noise shaping dither
+
+‘modified_e_weighted ’
+select modified-e-weighted noise shaping dither
+
+‘improved_e_weighted ’
+select improved-e-weighted noise shaping dither
+
+
+
+
+
+resampler
+Set resampling engine. Default value is swr.
+
+Supported values:
+
+‘swr ’
+select the native SW Resampler; filter options precision and cheby are not
+applicable in this case.
+
+‘soxr ’
+select the SoX Resampler (where available); compensation, and filter options
+filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
+case.
+
+
+
+
+filter_size
+For swr only, set resampling filter size, default value is 32.
+
+
+phase_shift
+For swr only, set resampling phase shift, default value is 10, and must be in
+the interval [0,30].
+
+
+linear_interp
+Use Linear Interpolation if set to 1, default value is 0.
+
+
+cutoff
+Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
+value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
+(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
+
+
+precision
+For soxr only, the precision in bits to which the resampled signal will be
+calculated. The default value of 20 (which, with suitable dithering, is
+appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a
+value of 28 gives SoX’s ’Very High Quality’.
+
+
+cheby
+For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
+approximation for ’irrational’ ratios. Default value is 0.
+
+
+async
+For swr only, simple 1 parameter audio sync to timestamps using stretching,
+squeezing, filling and trimming. Setting this to 1 will enable filling and
+trimming, larger values represent the maximum amount in samples that the data
+may be stretched or squeezed for each second.
+Default value is 0, thus no compensation is applied to make the samples match
+the audio timestamps.
+
+
+first_pts
+For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
+This allows for padding/trimming at the start of stream. By default, no
+assumption is made about the first frame’s expected pts, so no padding or
+trimming is done. For example, this could be set to 0 to pad the beginning with
+silence if an audio stream starts after the video stream or to trim any samples
+with a negative pts due to encoder delay.
+
+
+min_comp
+For swr only, set the minimum difference between timestamps and audio data (in
+seconds) to trigger stretching/squeezing/filling or trimming of the
+data to make it match the timestamps. The default is that
+stretching/squeezing/filling and trimming is disabled
+(min_comp = FLT_MAX
).
+
+
+min_hard_comp
+For swr only, set the minimum difference between timestamps and audio data (in
+seconds) to trigger adding/dropping samples to make it match the
+timestamps. This option effectively is a threshold to select between
+hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
+all compensation is by default disabled through min_comp .
+The default is 0.1.
+
+
+comp_duration
+For swr only, set duration (in seconds) over which data is stretched/squeezed
+to make it match the timestamps. Must be a non-negative double float value,
+default value is 1.0.
+
+
+max_soft_comp
+For swr only, set maximum factor by which data is stretched/squeezed to make it
+match the timestamps. Must be a non-negative double float value, default value
+is 0.
+
+
+matrix_encoding
+Select matrixed stereo encoding.
+
+It accepts the following values:
+
+‘none ’
+select none
+
+‘dolby ’
+select Dolby
+
+‘dplii ’
+select Dolby Pro Logic II
+
+
+
+Default value is none
.
+
+
+filter_type
+For swr only, select resampling filter type. This only affects resampling
+operations.
+
+It accepts the following values:
+
+‘cubic ’
+select cubic
+
+‘blackman_nuttall ’
+select Blackman Nuttall Windowed Sinc
+
+‘kaiser ’
+select Kaiser Windowed Sinc
+
+
+
+
+kaiser_beta
+For swr only, set Kaiser Window Beta value. Must be an integer in the
+interval [2,16], default value is 9.
+
+
+output_sample_bits
+For swr only, set number of used output sample bits for dithering. Must be an integer in the
+interval [0,64], default value is 0, which means it’s not used.
+
+
+
+
+
+
20 Scaler Options# TOC
+
+
The video scaler supports the following named options.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools. For programmatic use, they can be set explicitly in the
+SwsContext
options or through the libavutil/opt.h API.
+
+
+
+
+sws_flags
+Set the scaler flags. This is also used to set the scaling
+algorithm. Only a single algorithm should be selected.
+
+It accepts the following values:
+
+‘fast_bilinear ’
+Select fast bilinear scaling algorithm.
+
+
+‘bilinear ’
+Select bilinear scaling algorithm.
+
+
+‘bicubic ’
+Select bicubic scaling algorithm.
+
+
+‘experimental ’
+Select experimental scaling algorithm.
+
+
+‘neighbor ’
+Select nearest neighbor rescaling algorithm.
+
+
+‘area ’
+Select averaging area rescaling algorithm.
+
+
+‘bicublin ’
+Select bicubic scaling algorithm for the luma component, bilinear for
+chroma components.
+
+
+‘gauss ’
+Select Gaussian rescaling algorithm.
+
+
+‘sinc ’
+Select sinc rescaling algorithm.
+
+
+‘lanczos ’
+Select lanczos rescaling algorithm.
+
+
+‘spline ’
+Select natural bicubic spline rescaling algorithm.
+
+
+‘print_info ’
+Enable printing/debug logging.
+
+
+‘accurate_rnd ’
+Enable accurate rounding.
+
+
+‘full_chroma_int ’
+Enable full chroma interpolation.
+
+
+‘full_chroma_inp ’
+Select full chroma input.
+
+
+‘bitexact ’
+Enable bitexact output.
+
+
+
+
+srcw
+Set source width.
+
+
+srch
+Set source height.
+
+
+dstw
+Set destination width.
+
+
+dsth
+Set destination height.
+
+
+src_format
+Set source pixel format (must be expressed as an integer).
+
+
+dst_format
+Set destination pixel format (must be expressed as an integer).
+
+
+src_range
+Select source range.
+
+
+dst_range
+Select destination range.
+
+
+param0, param1
+Set scaling algorithm parameters. The specified values are specific of
+some scaling algorithms and ignored by others. The specified values
+are floating point number values.
+
+
+sws_dither
+Set the dithering algorithm. Accepts one of the following
+values. Default value is ‘auto ’.
+
+
+‘auto ’
+automatic choice
+
+
+‘none ’
+no dithering
+
+
+‘bayer ’
+bayer dither
+
+
+‘ed ’
+error diffusion dither
+
+
+‘a_dither ’
+arithmetic dither, based using addition
+
+
+‘x_dither ’
+arithmetic dither, based using xor (more random/less apparent patterning that
+a_dither).
+
+
+
+
+
+
+
+
+
21 Filtering Introduction# TOC
+
+
Filtering in FFmpeg is enabled through the libavfilter library.
+
+
In libavfilter, a filter can have multiple inputs and multiple
+outputs.
+To illustrate the sorts of things that are possible, we consider the
+following filtergraph.
+
+
+
[main]
+input --> split ---------------------> overlay --> output
+ | ^
+ |[tmp] [flip]|
+ +-----> crop --> vflip -------+
+
+
+
This filtergraph splits the input stream in two streams, then sends one
+stream through the crop filter and the vflip filter, before merging it
+back with the other stream by overlaying it on top. You can use the
+following command to achieve this:
+
+
+
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
+
+
+
The result will be that the top half of the video is mirrored
+onto the bottom half of the output video.
+
+
Filters in the same linear chain are separated by commas, and distinct
+linear chains of filters are separated by semicolons. In our example,
+crop,vflip are in one linear chain, split and
+overlay are separately in another. The points where the linear
+chains join are labelled by names enclosed in square brackets. In the
+example, the split filter generates two outputs that are associated to
+the labels [main] and [tmp] .
+
+
The stream sent to the second output of split , labelled as
+[tmp] , is processed through the crop filter, which crops
+away the lower half part of the video, and then vertically flipped. The
+overlay filter takes in input the first unchanged output of the
+split filter (which was labelled as [main] ), and overlay on its
+lower half the output generated by the crop,vflip filterchain.
+
+
Some filters take in input a list of parameters: they are specified
+after the filter name and an equal sign, and are separated from each other
+by a colon.
+
+
There exist so-called source filters that do not have an
+audio/video input, and sink filters that will not have audio/video
+output.
+
+
+
+
22 graph2dot# TOC
+
+
The graph2dot program included in the FFmpeg tools
+directory can be used to parse a filtergraph description and issue a
+corresponding textual representation in the dot language.
+
+
Invoke the command:
+
+
+
to see how to use graph2dot .
+
+
You can then pass the dot description to the dot program (from
+the graphviz suite of programs) and obtain a graphical representation
+of the filtergraph.
+
+
For example the sequence of commands:
+
+
echo GRAPH_DESCRIPTION | \
+tools/graph2dot -o graph.tmp && \
+dot -Tpng graph.tmp -o graph.png && \
+display graph.png
+
+
+
can be used to create and display an image representing the graph
+described by the GRAPH_DESCRIPTION string. Note that this string must be
+a complete self-contained graph, with its inputs and outputs explicitly defined.
+For example if your command line is of the form:
+
+
ffmpeg -i infile -vf scale=640:360 outfile
+
+
your GRAPH_DESCRIPTION string will need to be of the form:
+
+
nullsrc,scale=640:360,nullsink
+
+
you may also need to set the nullsrc parameters and add a format
+filter in order to simulate a specific input file.
+
+
+
+
23 Filtergraph description# TOC
+
+
A filtergraph is a directed graph of connected filters. It can contain
+cycles, and there can be multiple links between a pair of
+filters. Each link has one input pad on one side connecting it to one
+filter from which it takes its input, and one output pad on the other
+side connecting it to one filter accepting its output.
+
+
Each filter in a filtergraph is an instance of a filter class
+registered in the application, which defines the features and the
+number of input and output pads of the filter.
+
+
A filter with no input pads is called a "source", and a filter with no
+output pads is called a "sink".
+
+
+
23.1 Filtergraph syntax# TOC
+
+
A filtergraph has a textual representation, which is
+recognized by the -filter /-vf and -filter_complex
+options in ffmpeg
and -vf in ffplay
, and by the
+avfilter_graph_parse()
/avfilter_graph_parse2()
functions defined in
+libavfilter/avfilter.h .
+
+
A filterchain consists of a sequence of connected filters, each one
+connected to the previous one in the sequence. A filterchain is
+represented by a list of ","-separated filter descriptions.
+
+
A filtergraph consists of a sequence of filterchains. A sequence of
+filterchains is represented by a list of ";"-separated filterchain
+descriptions.
+
+
A filter is represented by a string of the form:
+[in_link_1 ]...[in_link_N ]filter_name =arguments [out_link_1 ]...[out_link_M ]
+
+
filter_name is the name of the filter class of which the
+described filter is an instance of, and has to be the name of one of
+the filter classes registered in the program.
+The name of the filter class is optionally followed by a string
+"=arguments ".
+
+
arguments is a string which contains the parameters used to
+initialize the filter instance. It may have one of two forms:
+
+ A ’:’-separated list of key=value pairs.
+
+ A ’:’-separated list of value . In this case, the keys are assumed to be
+the option names in the order they are declared. E.g. the fade
filter
+declares three options in this order – type , start_frame and
+nb_frames . Then the parameter list in:0:30 means that the value
+in is assigned to the option type , 0 to
+start_frame and 30 to nb_frames .
+
+ A ’:’-separated list of mixed direct value and long key=value
+pairs. The direct value must precede the key=value pairs, and
+follow the same constraints order of the previous point. The following
+key=value pairs can be set in any preferred order.
+
+
+
+
If the option value itself is a list of items (e.g. the format
filter
+takes a list of pixel formats), the items in the list are usually separated by
+’|’.
+
+
The list of arguments can be quoted using the character "’" as initial
+and ending mark, and the character ’\’ for escaping the characters
+within the quoted text; otherwise the argument string is considered
+terminated when the next special character (belonging to the set
+"[]=;,") is encountered.
+
+
The name and arguments of the filter are optionally preceded and
+followed by a list of link labels.
+A link label allows one to name a link and associate it to a filter output
+or input pad. The preceding labels in_link_1
+... in_link_N , are associated to the filter input pads,
+the following labels out_link_1 ... out_link_M , are
+associated to the output pads.
+
+
When two link labels with the same name are found in the
+filtergraph, a link between the corresponding input and output pad is
+created.
+
+
If an output pad is not labelled, it is linked by default to the first
+unlabelled input pad of the next filter in the filterchain.
+For example in the filterchain
+
+
nullsrc, split[L1], [L2]overlay, nullsink
+
+
the split filter instance has two output pads, and the overlay filter
+instance two input pads. The first output pad of split is labelled
+"L1", the first input pad of overlay is labelled "L2", and the second
+output pad of split is linked to the second input pad of overlay,
+which are both unlabelled.
+
+
In a complete filterchain all the unlabelled filter input and output
+pads must be connected. A filtergraph is considered valid if all the
+filter input and output pads of all the filterchains are connected.
+
+
Libavfilter will automatically insert scale filters where format
+conversion is required. It is possible to specify swscale flags
+for those automatically inserted scalers by prepending
+sws_flags=flags ;
+to the filtergraph description.
+
+
Here is a BNF description of the filtergraph syntax:
+
+
NAME ::= sequence of alphanumeric characters and '_'
+LINKLABEL ::= "[" NAME "]"
+LINKLABELS ::= LINKLABEL [LINKLABELS ]
+FILTER_ARGUMENTS ::= sequence of chars (possibly quoted)
+FILTER ::= [LINKLABELS ] NAME ["=" FILTER_ARGUMENTS ] [LINKLABELS ]
+FILTERCHAIN ::= FILTER [,FILTERCHAIN ]
+FILTERGRAPH ::= [sws_flags=flags ;] FILTERCHAIN [;FILTERGRAPH ]
+
+
+
+
23.2 Notes on filtergraph escaping# TOC
+
+
Filtergraph description composition entails several levels of
+escaping. See (ffmpeg-utils)the "Quoting and escaping"
+section in the ffmpeg-utils(1) manual for more
+information about the employed escaping procedure.
+
+
A first level escaping affects the content of each filter option
+value, which may contain the special character :
used to
+separate values, or one of the escaping characters \'
.
+
+
A second level escaping affects the whole filter description, which
+may contain the escaping characters \'
or the special
+characters [],;
used by the filtergraph description.
+
+
Finally, when you specify a filtergraph on a shell commandline, you
+need to perform a third level escaping for the shell special
+characters contained within it.
+
+
For example, consider the following string to be embedded in
+the drawtext filter description text value:
+
+
this is a 'string': may contain one, or more, special characters
+
+
+
This string contains the '
special escaping character, and the
+:
special character, so it needs to be escaped in this way:
+
+
text=this is a \'string\'\: may contain one, or more, special characters
+
+
+
A second level of escaping is required when embedding the filter
+description in a filtergraph description, in order to escape all the
+filtergraph special characters. Thus the example above becomes:
+
+
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
+
+
(note that in addition to the \'
escaping special characters,
+also ,
needs to be escaped).
+
+
Finally an additional level of escaping is needed when writing the
+filtergraph description in a shell command, which depends on the
+escaping rules of the adopted shell. For example, assuming that
+\
is special and needs to be escaped with another \
, the
+previous string will finally result in:
+
+
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
+
+
+
+
24 Timeline editing# TOC
+
+
Some filters support a generic enable option. For the filters
+supporting timeline editing, this option can be set to an expression which is
+evaluated before sending a frame to the filter. If the evaluation is non-zero,
+the filter will be enabled, otherwise the frame will be sent unchanged to the
+next filter in the filtergraph.
+
+
The expression accepts the following values:
+
+‘t ’
+timestamp expressed in seconds, NAN if the input timestamp is unknown
+
+
+‘n ’
+sequential number of the input frame, starting from 0
+
+
+‘pos ’
+the position in the file of the input frame, NAN if unknown
+
+
+‘w ’
+‘h ’
+width and height of the input frame if video
+
+
+
+
Additionally, these filters support an enable command that can be used
+to re-define the expression.
+
+
Like any other filtering option, the enable option follows the same
+rules.
+
+
For example, to enable a blur filter (smartblur ) from 10 seconds to 3
+minutes, and a curves filter starting at 3 seconds:
+
+
smartblur = enable='between(t,10,3*60)',
+curves = enable='gte(t,3)' : preset=cross_process
+
+
+
+
+
25 Audio Filters# TOC
+
+
When you configure your FFmpeg build, you can disable any of the
+existing filters using --disable-filters
.
+The configure output will show the audio filters included in your
+build.
+
+
Below is a description of the currently available audio filters.
+
+
+
25.1 adelay# TOC
+
+
Delay one or more audio channels.
+
+
Samples in delayed channel are filled with silence.
+
+
The filter accepts the following option:
+
+
+delays
+Set list of delays in milliseconds for each channel separated by ’|’.
+At least one delay greater than 0 should be provided.
+Unused delays will be silently ignored. If number of given delays is
+smaller than number of channels all remaining channels will not be delayed.
+
+
+
+
+
25.1.1 Examples# TOC
+
+
+ Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave
+the second channel (and any other channels that may be present) unchanged.
+
+
+
+
+
25.2 aecho# TOC
+
+
Apply echoing to the input audio.
+
+
Echoes are reflected sound and can occur naturally amongst mountains
+(and sometimes large buildings) when talking or shouting; digital echo
+effects emulate this behaviour and are often used to help fill out the
+sound of a single instrument or vocal. The time difference between the
+original signal and the reflection is the delay
, and the
+loudness of the reflected signal is the decay
.
+Multiple echoes can have different delays and decays.
+
+
A description of the accepted parameters follows.
+
+
+in_gain
+Set input gain of reflected signal. Default is 0.6
.
+
+
+out_gain
+Set output gain of reflected signal. Default is 0.3
.
+
+
+delays
+Set list of time intervals in milliseconds between original signal and reflections
+separated by ’|’. Allowed range for each delay
is (0 - 90000.0]
.
+Default is 1000
.
+
+
+decays
+Set list of loudnesses of reflected signals separated by ’|’.
+Allowed range for each decay
is (0 - 1.0]
.
+Default is 0.5
.
+
+
+
+
+
25.2.1 Examples# TOC
+
+
+ Make it sound as if there are twice as many instruments as are actually playing:
+
+
+ If delay is very short, then it sound like a (metallic) robot playing music:
+
+
+ A longer delay will sound like an open air concert in the mountains:
+
+
aecho=0.8:0.9:1000:0.3
+
+
+ Same as above but with one more mountain:
+
+
aecho=0.8:0.9:1000|1800:0.3|0.25
+
+
+
+
+
25.3 aeval# TOC
+
+
Modify an audio signal according to the specified expressions.
+
+
This filter accepts one or more expressions (one for each channel),
+which are evaluated and used to modify a corresponding audio signal.
+
+
It accepts the following parameters:
+
+
+exprs
+Set the ’|’-separated expressions list for each separate channel. If
+the number of input channels is greater than the number of
+expressions, the last specified expression is used for the remaining
+output channels.
+
+
+channel_layout, c
+Set output channel layout. If not specified, the channel layout is
+specified by the number of expressions. If set to ‘same ’, it will
+use by default the same input channel layout.
+
+
+
+
Each expression in exprs can contain the following constants and functions:
+
+
+ch
+channel number of the current expression
+
+
+n
+number of the evaluated sample, starting from 0
+
+
+s
+sample rate
+
+
+t
+time of the evaluated sample expressed in seconds
+
+
+nb_in_channels
+nb_out_channels
+input and output number of channels
+
+
+val(CH)
+the value of input channel with number CH
+
+
+
+
Note: this filter is slow. For faster processing you should use a
+dedicated filter.
+
+
+
25.3.1 Examples# TOC
+
+
+ Half volume:
+
+
aeval=val(ch)/2:c=same
+
+
+ Invert phase of the second channel:
+
+
+
+
+
25.4 afade# TOC
+
+
Apply fade-in/out effect to input audio.
+
+
A description of the accepted parameters follows.
+
+
+type, t
+Specify the effect type, can be either in
for fade-in, or
+out
for a fade-out effect. Default is in
.
+
+
+start_sample, ss
+Specify the number of the start sample for starting to apply the fade
+effect. Default is 0.
+
+
+nb_samples, ns
+Specify the number of samples for which the fade effect has to last. At
+the end of the fade-in effect the output audio will have the same
+volume as the input audio, at the end of the fade-out transition
+the output audio will be silence. Default is 44100.
+
+
+start_time, st
+Specify the start time of the fade effect. Default is 0.
+The value must be specified as a time duration; see
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+If set this option is used instead of start_sample .
+
+
+duration, d
+Specify the duration of the fade effect. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+At the end of the fade-in effect the output audio will have the same
+volume as the input audio, at the end of the fade-out transition
+the output audio will be silence.
+By default the duration is determined by nb_samples .
+If set this option is used instead of nb_samples .
+
+
+curve
+Set curve for fade transition.
+
+It accepts the following values:
+
+tri
+select triangular, linear slope (default)
+
+qsin
+select quarter of sine wave
+
+hsin
+select half of sine wave
+
+esin
+select exponential sine wave
+
+log
+select logarithmic
+
+par
+select inverted parabola
+
+qua
+select quadratic
+
+cub
+select cubic
+
+squ
+select square root
+
+cbr
+select cubic root
+
+
+
+
+
+
+
25.4.1 Examples# TOC
+
+
+ Fade in first 15 seconds of audio:
+
+
+ Fade out last 25 seconds of a 900 seconds audio:
+
+
afade=t=out:st=875:d=25
+
+
+
+
+
25.5 aformat# TOC
+
+
Set output format constraints for the input audio. The framework will
+negotiate the most appropriate format to minimize conversions.
+
+
It accepts the following parameters:
+
+sample_fmts
+A ’|’-separated list of requested sample formats.
+
+
+sample_rates
+A ’|’-separated list of requested sample rates.
+
+
+channel_layouts
+A ’|’-separated list of requested channel layouts.
+
+See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
+for the required syntax.
+
+
+
+
If a parameter is omitted, all values are allowed.
+
+
Force the output to either unsigned 8-bit or signed 16-bit stereo
+
+
aformat=sample_fmts=u8|s16:channel_layouts=stereo
+
+
+
+
25.6 allpass# TOC
+
+
Apply a two-pole all-pass filter with central frequency (in Hz)
+frequency , and filter-width width .
+An all-pass filter changes the audio’s frequency to phase relationship
+without changing its frequency to amplitude relationship.
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set frequency in Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+
+
+
25.7 amerge# TOC
+
+
Merge two or more audio streams into a single multi-channel stream.
+
+
The filter accepts the following options:
+
+
+inputs
+Set the number of inputs. Default is 2.
+
+
+
+
+
If the channel layouts of the inputs are disjoint, and therefore compatible,
+the channel layout of the output will be set accordingly and the channels
+will be reordered as necessary. If the channel layouts of the inputs are not
+disjoint, the output will have all the channels of the first input then all
+the channels of the second input, in that order, and the channel layout of
+the output will be the default value corresponding to the total number of
+channels.
+
+
For example, if the first input is in 2.1 (FL+FR+LF) and the second input
+is FC+BL+BR, then the output will be in 5.1, with the channels in the
+following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the
+first input, b1 is the first channel of the second input).
+
+
On the other hand, if both input are in stereo, the output channels will be
+in the default order: a1, a2, b1, b2, and the channel layout will be
+arbitrarily set to 4.0, which may or may not be the expected value.
+
+
All inputs must have the same sample rate, and format.
+
+
If inputs do not have the same duration, the output will stop with the
+shortest.
+
+
+
25.7.1 Examples# TOC
+
+
+ Merge two mono files into a stereo stream:
+
+
amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
+
+
+ Multiple merges assuming 1 video stream and 6 audio streams in input.mkv :
+
+
ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
+
+
+
+
+
25.8 amix# TOC
+
+
Mixes multiple audio inputs into a single output.
+
+
Note that this filter only supports float samples (the amerge
+and pan audio filters support many formats). If the amix
+input has integer samples then aresample will be automatically
+inserted to perform the conversion to float samples.
+
+
For example
+
+
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
+
+
will mix 3 input audio streams to a single output with the same duration as the
+first input and a dropout transition time of 3 seconds.
+
+
It accepts the following parameters:
+
+inputs
+The number of inputs. If unspecified, it defaults to 2.
+
+
+duration
+How to determine the end-of-stream.
+
+longest
+The duration of the longest input. (default)
+
+
+shortest
+The duration of the shortest input.
+
+
+first
+The duration of the first input.
+
+
+
+
+
+dropout_transition
+The transition time, in seconds, for volume renormalization when an input
+stream ends. The default value is 2 seconds.
+
+
+
+
+
+
25.9 anull# TOC
+
+
Pass the audio source unchanged to the output.
+
+
+
25.10 apad# TOC
+
+
Pad the end of an audio stream with silence.
+
+
This can be used together with ffmpeg
-shortest to
+extend audio streams to the same length as the video stream.
+
+
A description of the accepted options follows.
+
+
+packet_size
+Set silence packet size. Default value is 4096.
+
+
+pad_len
+Set the number of samples of silence to add to the end. After the
+value is reached, the stream is terminated. This option is mutually
+exclusive with whole_len .
+
+
+whole_len
+Set the minimum total number of samples in the output audio stream. If
+the value is longer than the input audio length, silence is added to
+the end, until the value is reached. This option is mutually exclusive
+with pad_len .
+
+
+
+
If neither the pad_len nor the whole_len option is
+set, the filter will add silence to the end of the input stream
+indefinitely.
+
+
+
25.10.1 Examples# TOC
+
+
+ Add 1024 samples of silence to the end of the input:
+
+
+ Make sure the audio output will contain at least 10000 samples, pad
+the input with silence if required:
+
+
+ Use ffmpeg
to pad the audio input with silence, so that the
+video stream will always result the shortest and will be converted
+until the end in the output file when using the shortest
+option:
+
+
ffmpeg -i VIDEO -i AUDIO -filter_complex "[1:0]apad" -shortest OUTPUT
+
+
+
+
+
25.11 aphaser# TOC
+
Add a phasing effect to the input audio.
+
+
A phaser filter creates series of peaks and troughs in the frequency spectrum.
+The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect.
+
+
A description of the accepted parameters follows.
+
+
+in_gain
+Set input gain. Default is 0.4.
+
+
+out_gain
+Set output gain. Default is 0.74
+
+
+delay
+Set delay in milliseconds. Default is 3.0.
+
+
+decay
+Set decay. Default is 0.4.
+
+
+speed
+Set modulation speed in Hz. Default is 0.5.
+
+
+type
+Set modulation type. Default is triangular.
+
+It accepts the following values:
+
+‘triangular, t ’
+‘sinusoidal, s ’
+
+
+
+
+
+
25.12 aresample# TOC
+
+
Resample the input audio to the specified parameters, using the
+libswresample library. If none are specified then the filter will
+automatically convert between its input and output.
+
+
This filter is also able to stretch/squeeze the audio data to make it match
+the timestamps or to inject silence / cut out audio to make it match the
+timestamps, do a combination of both or do neither.
+
+
The filter accepts the syntax
+[sample_rate :]resampler_options , where sample_rate
+expresses a sample rate and resampler_options is a list of
+key =value pairs, separated by ":". See the
+ffmpeg-resampler manual for the complete list of supported options.
+
+
+
25.12.1 Examples# TOC
+
+
+ Resample the input audio to 44100Hz:
+
+
+ Stretch/squeeze samples to the given timestamps, with a maximum of 1000
+samples per second compensation:
+
+
+
+
+
25.13 asetnsamples# TOC
+
+
Set the number of samples per each output audio frame.
+
+
The last output packet may contain a different number of samples, as
+the filter will flush all the remaining samples when the input audio
+signal its end.
+
+
The filter accepts the following options:
+
+
+nb_out_samples, n
+Set the number of frames per each output audio frame. The number is
+intended as the number of samples per each channel .
+Default value is 1024.
+
+
+pad, p
+If set to 1, the filter will pad the last audio frame with zeroes, so
+that the last frame will contain the same number of samples as the
+previous ones. Default value is 1.
+
+
+
+
For example, to set the number of per-frame samples to 1234 and
+disable padding for the last frame, use:
+
+
asetnsamples=n=1234:p=0
+
+
+
+
25.14 asetrate# TOC
+
+
Set the sample rate without altering the PCM data.
+This will result in a change of speed and pitch.
+
+
The filter accepts the following options:
+
+
+sample_rate, r
+Set the output sample rate. Default is 44100 Hz.
+
+
+
+
+
25.15 ashowinfo# TOC
+
+
Show a line containing various information for each input audio frame.
+The input audio is not modified.
+
+
The shown line contains a sequence of key/value pairs of the form
+key :value .
+
+
The following values are shown in the output:
+
+
+n
+The (sequential) number of the input frame, starting from 0.
+
+
+pts
+The presentation timestamp of the input frame, in time base units; the time base
+depends on the filter input pad, and is usually 1/sample_rate .
+
+
+pts_time
+The presentation timestamp of the input frame in seconds.
+
+
+pos
+position of the frame in the input stream, -1 if this information in
+unavailable and/or meaningless (for example in case of synthetic audio)
+
+
+fmt
+The sample format.
+
+
+chlayout
+The channel layout.
+
+
+rate
+The sample rate for the audio frame.
+
+
+nb_samples
+The number of samples (per channel) in the frame.
+
+
+checksum
+The Adler-32 checksum (printed in hexadecimal) of the audio data. For planar
+audio, the data is treated as if all the planes were concatenated.
+
+
+plane_checksums
+A list of Adler-32 checksums for each data plane.
+
+
+
+
+
25.16 astats# TOC
+
+
Display time domain statistical information about the audio channels.
+Statistics are calculated and displayed for each audio channel and,
+where applicable, an overall figure is also given.
+
+
It accepts the following option:
+
+length
+Short window length in seconds, used for peak and trough RMS measurement.
+Default is 0.05
(50 milliseconds). Allowed range is [0.1 - 10]
.
+
+
+
+
A description of each shown parameter follows:
+
+
+DC offset
+Mean amplitude displacement from zero.
+
+
+Min level
+Minimal sample level.
+
+
+Max level
+Maximal sample level.
+
+
+Peak level dB
+RMS level dB
+Standard peak and RMS level measured in dBFS.
+
+
+RMS peak dB
+RMS trough dB
+Peak and trough values for RMS level measured over a short window.
+
+
+Crest factor
+Standard ratio of peak to RMS level (note: not in dB).
+
+
+Flat factor
+Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels
+(i.e. either Min level or Max level ).
+
+
+Peak count
+Number of occasions (not the number of samples) that the signal attained either
+Min level or Max level .
+
+
+
+
+
25.17 astreamsync# TOC
+
+
Forward two audio streams and control the order the buffers are forwarded.
+
+
The filter accepts the following options:
+
+
+expr, e
+Set the expression deciding which stream should be
+forwarded next: if the result is negative, the first stream is forwarded; if
+the result is positive or zero, the second stream is forwarded. It can use
+the following variables:
+
+
+b1 b2
+number of buffers forwarded so far on each stream
+
+s1 s2
+number of samples forwarded so far on each stream
+
+t1 t2
+current timestamp of each stream
+
+
+
+The default value is t1-t2
, which means to always forward the stream
+that has a smaller timestamp.
+
+
+
+
+
25.17.1 Examples# TOC
+
+
Stress-test amerge
by randomly sending buffers on the wrong
+input, while avoiding too much of a desynchronization:
+
+
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
+[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
+[a2] [b2] amerge
+
+
+
+
25.18 asyncts# TOC
+
+
Synchronize audio data with timestamps by squeezing/stretching it and/or
+dropping samples/adding silence when needed.
+
+
This filter is not built by default, please use aresample to do squeezing/stretching.
+
+
It accepts the following parameters:
+
+compensate
+Enable stretching/squeezing the data to make it match the timestamps. Disabled
+by default. When disabled, time gaps are covered with silence.
+
+
+min_delta
+The minimum difference between timestamps and audio data (in seconds) to trigger
+adding/dropping samples. The default value is 0.1. If you get an imperfect
+sync with this filter, try setting this parameter to 0.
+
+
+max_comp
+The maximum compensation in samples per second. Only relevant with compensate=1.
+The default value is 500.
+
+
+first_pts
+Assume that the first PTS should be this value. The time base is 1 / sample
+rate. This allows for padding/trimming at the start of the stream. By default,
+no assumption is made about the first frame’s expected PTS, so no padding or
+trimming is done. For example, this could be set to 0 to pad the beginning with
+silence if an audio stream starts after the video stream or to trim any samples
+with a negative PTS due to encoder delay.
+
+
+
+
+
+
25.19 atempo# TOC
+
+
Adjust audio tempo.
+
+
The filter accepts exactly one parameter, the audio tempo. If not
+specified then the filter will assume nominal 1.0 tempo. Tempo must
+be in the [0.5, 2.0] range.
+
+
+
25.19.1 Examples# TOC
+
+
+ Slow down audio to 80% tempo:
+
+
+ To speed up audio to 125% tempo:
+
+
+
+
+
25.20 atrim# TOC
+
+
Trim the input so that the output contains one continuous subpart of the input.
+
+
It accepts the following parameters:
+
+start
+Timestamp (in seconds) of the start of the section to keep. I.e. the audio
+sample with the timestamp start will be the first sample in the output.
+
+
+end
+Specify time of the first audio sample that will be dropped, i.e. the
+audio sample immediately preceding the one with the timestamp end will be
+the last sample in the output.
+
+
+start_pts
+Same as start , except this option sets the start timestamp in samples
+instead of seconds.
+
+
+end_pts
+Same as end , except this option sets the end timestamp in samples instead
+of seconds.
+
+
+duration
+The maximum duration of the output in seconds.
+
+
+start_sample
+The number of the first sample that should be output.
+
+
+end_sample
+The number of the first sample that should be dropped.
+
+
+
+
start , end , and duration are expressed as time
+duration specifications; see
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
+
+
Note that the first two sets of the start/end options and the duration
+option look at the frame timestamp, while the _sample options simply count the
+samples that pass through the filter. So start/end_pts and start/end_sample will
+give different results when the timestamps are wrong, inexact or do not start at
+zero. Also note that this filter does not modify the timestamps. If you wish
+to have the output timestamps start at zero, insert the asetpts filter after the
+atrim filter.
+
+
If multiple start or end options are set, this filter tries to be greedy and
+keep all samples that match at least one of the specified constraints. To keep
+only the part that matches all the constraints at once, chain multiple atrim
+filters.
+
+
The defaults are such that all the input is kept. So it is possible to set e.g.
+just the end values to keep everything before the specified time.
+
+
Examples:
+
+ Drop everything except the second minute of input:
+
+
ffmpeg -i INPUT -af atrim=60:120
+
+
+ Keep only the first 1000 samples:
+
+
ffmpeg -i INPUT -af atrim=end_sample=1000
+
+
+
+
+
+
25.21 bandpass# TOC
+
+
Apply a two-pole Butterworth band-pass filter with central
+frequency frequency , and (3dB-point) band-width width.
+The csg option selects a constant skirt gain (peak gain = Q)
+instead of the default: constant 0dB peak gain.
+The filter roll off at 6dB per octave (20dB per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the filter’s central frequency. Default is 3000
.
+
+
+csg
+Constant skirt gain if set to 1. Defaults to 0.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+
+
+
25.22 bandreject# TOC
+
+
Apply a two-pole Butterworth band-reject filter with central
+frequency frequency , and (3dB-point) band-width width .
+The filter roll off at 6dB per octave (20dB per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the filter’s central frequency. Default is 3000
.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+
+
+
25.23 bass# TOC
+
+
Boost or cut the bass (lower) frequencies of the audio using a two-pole
+shelving filter with a response similar to that of a standard
+hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
+
+
The filter accepts the following options:
+
+
+gain, g
+Give the gain at 0 Hz. Its useful range is about -20
+(for a large cut) to +20 (for a large boost).
+Beware of clipping when using a positive gain.
+
+
+frequency, f
+Set the filter’s central frequency and so can be used
+to extend or reduce the frequency range to be boosted or cut.
+The default value is 100
Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Determine how steep is the filter’s shelf transition.
+
+
+
+
+
25.24 biquad# TOC
+
+
Apply a biquad IIR filter with the given coefficients.
+Where b0 , b1 , b2 and a0 , a1 , a2
+are the numerator and denominator coefficients respectively.
+
+
+
25.25 bs2b# TOC
+
Bauer stereo to binaural transformation, which improves headphone listening of
+stereo audio records.
+
+
It accepts the following parameters:
+
+profile
+Pre-defined crossfeed level.
+
+default
+Default level (fcut=700, feed=50).
+
+
+cmoy
+Chu Moy circuit (fcut=700, feed=60).
+
+
+jmeier
+Jan Meier circuit (fcut=650, feed=95).
+
+
+
+
+
+fcut
+Cut frequency (in Hz).
+
+
+feed
+Feed level (in Hz).
+
+
+
+
+
+
25.26 channelmap# TOC
+
+
Remap input channels to new locations.
+
+
It accepts the following parameters:
+
+channel_layout
+The channel layout of the output stream.
+
+
+map
+Map channels from input to output. The argument is a ’|’-separated list of
+mappings, each in the in_channel -out_channel
or
+in_channel form. in_channel can be either the name of the input
+channel (e.g. FL for front left) or its index in the input channel layout.
+out_channel is the name of the output channel or its index in the output
+channel layout. If out_channel is not given then it is implicitly an
+index, starting with zero and increasing by one for each mapping.
+
+
+
+
If no mapping is present, the filter will implicitly map input channels to
+output channels, preserving indices.
+
+
For example, assuming a 5.1+downmix input MOV file,
+
+
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
+
+
will create an output WAV file tagged as stereo from the downmix channels of
+the input.
+
+
To fix a 5.1 WAV improperly encoded in AAC’s native channel order
+
+
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
+
+
+
+
25.27 channelsplit# TOC
+
+
Split each channel from an input audio stream into a separate output stream.
+
+
It accepts the following parameters:
+
+channel_layout
+The channel layout of the input stream. The default is "stereo".
+
+
+
+
For example, assuming a stereo input MP3 file,
+
+
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
+
+
will create an output Matroska file with two audio streams, one containing only
+the left channel and the other the right channel.
+
+
Split a 5.1 WAV file into per-channel files:
+
+
ffmpeg -i in.wav -filter_complex
+'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
+-map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
+front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
+side_right.wav
+
+
+
+
25.28 compand# TOC
+
Compress or expand the audio’s dynamic range.
+
+
It accepts the following parameters:
+
+
+attacks
+decays
+A list of times in seconds for each channel over which the instantaneous level
+of the input signal is averaged to determine its volume. attacks refers to
+increase of volume and decays refers to decrease of volume. For most
+situations, the attack time (response to the audio getting louder) should be
+shorter than the decay time, because the human ear is more sensitive to sudden
+loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and
+a typical value for decay is 0.8 seconds.
+
+
+points
+A list of points for the transfer function, specified in dB relative to the
+maximum possible signal amplitude. Each key points list must be defined using
+the following syntax: x0/y0|x1/y1|x2/y2|....
or
+x0/y0 x1/y1 x2/y2 ....
+
+The input values must be in strictly increasing order but the transfer function
+does not have to be monotonically rising. The point 0/0
is assumed but
+may be overridden (by 0/out-dBn
). Typical values for the transfer
+function are -70/-70|-60/-20
.
+
+
+soft-knee
+Set the curve radius in dB for all joints. It defaults to 0.01.
+
+
+gain
+Set the additional gain in dB to be applied at all points on the transfer
+function. This allows for easy adjustment of the overall gain.
+It defaults to 0.
+
+
+volume
+Set an initial volume, in dB, to be assumed for each channel when filtering
+starts. This permits the user to supply a nominal level initially, so that, for
+example, a very large gain is not applied to initial signal levels before the
+companding has begun to operate. A typical value for audio which is initially
+quiet is -90 dB. It defaults to 0.
+
+
+delay
+Set a delay, in seconds. The input audio is analyzed immediately, but audio is
+delayed before being fed to the volume adjuster. Specifying a delay
+approximately equal to the attack/decay times allows the filter to effectively
+operate in predictive rather than reactive mode. It defaults to 0.
+
+
+
+
+
+
25.28.1 Examples# TOC
+
+
+ Make music with both quiet and loud passages suitable for listening to in a
+noisy environment:
+
+
compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
+
+
+ A noise gate for when the noise is at a lower level than the signal:
+
+
compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
+
+
+ Here is another noise gate, this time for when the noise is at a higher level
+than the signal (making it, in some ways, similar to squelch):
+
+
compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
+
+
+
+
+
25.29 earwax# TOC
+
+
Make audio easier to listen to on headphones.
+
+
This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio
+so that when listened to on headphones the stereo image is moved from
+inside your head (standard for headphones) to outside and in front of
+the listener (standard for speakers).
+
+
Ported from SoX.
+
+
+
25.30 equalizer# TOC
+
+
Apply a two-pole peaking equalisation (EQ) filter. With this
+filter, the signal-level at and around a selected frequency can
+be increased or decreased, whilst (unlike bandpass and bandreject
+filters) that at all other frequencies is unchanged.
+
+
In order to produce complex equalisation curves, this filter can
+be given several times, each with a different central frequency.
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the filter’s central frequency in Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+gain, g
+Set the required gain or attenuation in dB.
+Beware of clipping when using a positive gain.
+
+
+
+
+
25.30.1 Examples# TOC
+
+ Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz:
+
+
equalizer=f=1000:width_type=h:width=200:g=-10
+
+
+ Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2:
+
+
equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
+
+
+
+
+
25.31 flanger# TOC
+
Apply a flanging effect to the audio.
+
+
The filter accepts the following options:
+
+
+delay
+Set base delay in milliseconds. Range from 0 to 30. Default value is 0.
+
+
+depth
+Set added swep delay in milliseconds. Range from 0 to 10. Default value is 2.
+
+
+regen
+Set percentage regeneration (delayed signal feedback). Range from -95 to 95.
+Default value is 0.
+
+
+width
+Set percentage of delayed signal mixed with original. Range from 0 to 100.
+Default value is 71.
+
+
+speed
+Set sweeps per second (Hz). Range from 0.1 to 10. Default value is 0.5.
+
+
+shape
+Set swept wave shape, can be triangular or sinusoidal .
+Default value is sinusoidal .
+
+
+phase
+Set swept wave percentage-shift for multi channel. Range from 0 to 100.
+Default value is 25.
+
+
+interp
+Set delay-line interpolation, linear or quadratic .
+Default is linear .
+
+
+
+
+
25.32 highpass# TOC
+
+
Apply a high-pass filter with 3dB point frequency.
+The filter can be either single-pole, or double-pole (the default).
+The filter roll off at 6dB per pole per octave (20dB per pole per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set frequency in Hz. Default is 3000.
+
+
+poles, p
+Set number of poles. Default is 2.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+Applies only to double-pole filter.
+The default is 0.707q and gives a Butterworth response.
+
+
+
+
+
25.33 join# TOC
+
+
Join multiple input streams into one multi-channel stream.
+
+
It accepts the following parameters:
+
+inputs
+The number of input streams. It defaults to 2.
+
+
+channel_layout
+The desired output channel layout. It defaults to stereo.
+
+
+map
+Map channels from inputs to output. The argument is a ’|’-separated list of
+mappings, each in the input_idx .in_channel -out_channel
+form. input_idx is the 0-based index of the input stream. in_channel
+can be either the name of the input channel (e.g. FL for front left) or its
+index in the specified input stream. out_channel is the name of the output
+channel.
+
+
+
+
The filter will attempt to guess the mappings when they are not specified
+explicitly. It does so by first trying to find an unused matching input channel
+and if that fails it picks the first unused input channel.
+
+
Join 3 inputs (with properly set channel layouts):
+
+
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
+
+
+
Build a 5.1 output from 6 single-channel streams:
+
+
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
+'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
+out
+
+
+
+
25.34 ladspa# TOC
+
+
Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-ladspa
.
+
+
+file, f
+Specifies the name of LADSPA plugin library to load. If the environment
+variable LADSPA_PATH
is defined, the LADSPA plugin is searched in
+each one of the directories specified by the colon separated list in
+LADSPA_PATH
, otherwise in the standard LADSPA paths, which are in
+this order: HOME/.ladspa/lib/ , /usr/local/lib/ladspa/ ,
+/usr/lib/ladspa/ .
+
+
+plugin, p
+Specifies the plugin within the library. Some libraries contain only
+one plugin, but others contain many of them. If this is not set filter
+will list all available plugins within the specified library.
+
+
+controls, c
+Set the ’|’ separated list of controls which are zero or more floating point
+values that determine the behavior of the loaded plugin (for example delay,
+threshold or gain).
+Controls need to be defined using the following syntax:
+c0=value0 |c1=value1 |c2=value2 |..., where
+valuei is the value set on the i -th control.
+If controls is set to help
, all available controls and
+their valid ranges are printed.
+
+
+sample_rate, s
+Specify the sample rate, default to 44100. Only used if plugin have
+zero inputs.
+
+
+nb_samples, n
+Set the number of samples per channel per each output frame, default
+is 1024. Only used if plugin have zero inputs.
+
+
+duration, d
+Set the minimum duration of the sourced audio. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+Note that the resulting duration may be greater than the specified duration,
+as the generated audio is always cut at the end of a complete frame.
+If not specified, or the expressed duration is negative, the audio is
+supposed to be generated forever.
+Only used if plugin have zero inputs.
+
+
+
+
+
+
25.34.1 Examples# TOC
+
+
+ List all available plugins within amp (LADSPA example plugin) library:
+
+
+ List all available controls and their valid ranges for vcf_notch
+plugin from VCF
library:
+
+
ladspa=f=vcf:p=vcf_notch:c=help
+
+
+ Simulate low quality audio equipment using Computer Music Toolkit
(CMT)
+plugin library:
+
+
ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
+
+
+ Add reverberation to the audio using TAP-plugins
+(Tom’s Audio Processing plugins):
+
+
ladspa=file=tap_reverb:tap_reverb
+
+
+ Generate white noise, with 0.2 amplitude:
+
+
ladspa=file=cmt:noise_source_white:c=c0=.2
+
+
+ Generate 20 bpm clicks using plugin C* Click - Metronome
from the
+C* Audio Plugin Suite
(CAPS) library:
+
+
ladspa=file=caps:Click:c=c1=20'
+
+
+ Apply C* Eq10X2 - Stereo 10-band equaliser
effect:
+
+
ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
+
+
+
+
+
25.34.2 Commands# TOC
+
+
This filter supports the following commands:
+
+cN
+Modify the N -th control value.
+
+If the specified value is not valid, it is ignored and prior one is kept.
+
+
+
+
+
25.35 lowpass# TOC
+
+
Apply a low-pass filter with 3dB point frequency.
+The filter can be either single-pole or double-pole (the default).
+The filter roll off at 6dB per pole per octave (20dB per pole per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set frequency in Hz. Default is 500.
+
+
+poles, p
+Set number of poles. Default is 2.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+Applies only to double-pole filter.
+The default is 0.707q and gives a Butterworth response.
+
+
+
+
+
25.36 pan# TOC
+
+
Mix channels with specific gain levels. The filter accepts the output
+channel layout followed by a set of channels definitions.
+
+
This filter is also designed to efficiently remap the channels of an audio
+stream.
+
+
The filter accepts parameters of the form:
+"l |outdef |outdef |..."
+
+
+l
+output channel layout or number of channels
+
+
+outdef
+output channel specification, of the form:
+"out_name =[gain *]in_name [+[gain *]in_name ...]"
+
+
+out_name
+output channel to define, either a channel name (FL, FR, etc.) or a channel
+number (c0, c1, etc.)
+
+
+gain
+multiplicative coefficient for the channel, 1 leaving the volume unchanged
+
+
+in_name
+input channel to use, see out_name for details; it is not possible to mix
+named and numbered input channels
+
+
+
+
If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for
+that specification will be renormalized so that the total is 1, thus
+avoiding clipping noise.
+
+
+
25.36.1 Mixing examples# TOC
+
+
For example, if you want to down-mix from stereo to mono, but with a bigger
+factor for the left channel:
+
+
pan=1c|c0=0.9*c0+0.1*c1
+
+
+
A customized down-mix to stereo that works automatically for 3-, 4-, 5- and
+7-channels surround:
+
+
pan=stereo| FL < FL + 0.5*FC + 0.6*BL + 0.6*SL | FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
+
+
+
Note that ffmpeg
integrates a default down-mix (and up-mix) system
+that should be preferred (see "-ac" option) unless you have very specific
+needs.
+
+
+
25.36.2 Remapping examples# TOC
+
+
The channel remapping will be effective if, and only if:
+
+
+ gain coefficients are zeroes or ones,
+ only one input per channel output,
+
+
+
If all these conditions are satisfied, the filter will notify the user ("Pure
+channel mapping detected"), and use an optimized and lossless method to do the
+remapping.
+
+
For example, if you have a 5.1 source and want a stereo audio stream by
+dropping the extra channels:
+
+
pan="stereo| c0=FL | c1=FR"
+
+
+
Given the same source, you can also switch front left and front right channels
+and keep the input channel layout:
+
+
pan="5.1| c0=c1 | c1=c0 | c2=c2 | c3=c3 | c4=c4 | c5=c5"
+
+
+
If the input is a stereo audio stream, you can mute the front left channel (and
+still keep the stereo channel layout) with:
+
+
+
Still with a stereo audio stream input, you can copy the right channel in both
+front left and right:
+
+
pan="stereo| c0=FR | c1=FR"
+
+
+
+
25.37 replaygain# TOC
+
+
ReplayGain scanner filter. This filter takes an audio stream as an input and
+outputs it unchanged.
+At end of filtering it displays track_gain
and track_peak
.
+
+
+
25.38 resample# TOC
+
+
Convert the audio sample format, sample rate and channel layout. It is
+not meant to be used directly.
+
+
+
25.39 silencedetect# TOC
+
+
Detect silence in an audio stream.
+
+
This filter logs a message when it detects that the input audio volume is less
+or equal to a noise tolerance value for a duration greater or equal to the
+minimum detected noise duration.
+
+
The printed times and duration are expressed in seconds.
+
+
The filter accepts the following options:
+
+
+duration, d
+Set silence duration until notification (default is 2 seconds).
+
+
+noise, n
+Set noise tolerance. Can be specified in dB (in case "dB" is appended to the
+specified value) or amplitude ratio. Default is -60dB, or 0.001.
+
+
+
+
+
25.39.1 Examples# TOC
+
+
+ Detect 5 seconds of silence with -50dB noise tolerance:
+
+
silencedetect=n=-50dB:d=5
+
+
+ Complete example with ffmpeg
to detect silence with 0.0001 noise
+tolerance in silence.mp3 :
+
+
ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
+
+
+
+
+
25.40 silenceremove# TOC
+
+
Remove silence from the beginning, middle or end of the audio.
+
+
The filter accepts the following options:
+
+
+start_periods
+This value is used to indicate if audio should be trimmed at beginning of
+the audio. A value of zero indicates no silence should be trimmed from the
+beginning. When specifying a non-zero value, it trims audio up until it
+finds non-silence. Normally, when trimming silence from beginning of audio
+the start_periods will be 1
but it can be increased to higher
+values to trim all audio up to specific count of non-silence periods.
+Default value is 0
.
+
+
+start_duration
+Specify the amount of time that non-silence must be detected before it stops
+trimming audio. By increasing the duration, bursts of noises can be treated
+as silence and trimmed off. Default value is 0
.
+
+
+start_threshold
+This indicates what sample value should be treated as silence. For digital
+audio, a value of 0
may be fine but for audio recorded from analog,
+you may wish to increase the value to account for background noise.
+Can be specified in dB (in case "dB" is appended to the specified value)
+or amplitude ratio. Default value is 0
.
+
+
+stop_periods
+Set the count for trimming silence from the end of audio.
+To remove silence from the middle of a file, specify a stop_periods
+that is negative. This value is then treated as a positive value and is
+used to indicate the effect should restart processing as specified by
+start_periods , making it suitable for removing periods of silence
+in the middle of the audio.
+Default value is 0
.
+
+
+stop_duration
+Specify a duration of silence that must exist before audio is not copied any
+more. By specifying a higher duration, silence that is wanted can be left in
+the audio.
+Default value is 0
.
+
+
+stop_threshold
+This is the same as start_threshold but for trimming silence from
+the end of audio.
+Can be specified in dB (in case "dB" is appended to the specified value)
+or amplitude ratio. Default value is 0
.
+
+
+leave_silence
+This indicate that stop_duration length of audio should be left intact
+at the beginning of each period of silence.
+For example, if you want to remove long pauses between words but do not want
+to remove the pauses completely. Default value is 0
.
+
+
+
+
+
+
25.40.1 Examples# TOC
+
+
+ The following example shows how this filter can be used to start a recording
+that does not contain the delay at the start which usually occurs between
+pressing the record button and the start of the performance:
+
+
silenceremove=1:5:0.02
+
+
+
+
+
25.41 treble# TOC
+
+
Boost or cut treble (upper) frequencies of the audio using a two-pole
+shelving filter with a response similar to that of a standard
+hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
+
+
The filter accepts the following options:
+
+
+gain, g
+Give the gain at whichever is the lower of ~22 kHz and the
+Nyquist frequency. Its useful range is about -20 (for a large cut)
+to +20 (for a large boost). Beware of clipping when using a positive gain.
+
+
+frequency, f
+Set the filter’s central frequency and so can be used
+to extend or reduce the frequency range to be boosted or cut.
+The default value is 3000
Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Determine how steep is the filter’s shelf transition.
+
+
+
+
+
25.42 volume# TOC
+
+
Adjust the input audio volume.
+
+
It accepts the following parameters:
+
+volume
+Set audio volume expression.
+
+Output values are clipped to the maximum value.
+
+The output audio volume is given by the relation:
+
+
output_volume = volume * input_volume
+
+
+The default value for volume is "1.0".
+
+
+precision
+This parameter represents the mathematical precision.
+
+It determines which input sample formats will be allowed, which affects the
+precision of the volume scaling.
+
+
+fixed
+8-bit fixed-point; this limits input sample format to U8, S16, and S32.
+
+float
+32-bit floating-point; this limits input sample format to FLT. (default)
+
+double
+64-bit floating-point; this limits input sample format to DBL.
+
+
+
+
+replaygain
+Choose the behaviour on encountering ReplayGain side data in input frames.
+
+
+drop
+Remove ReplayGain side data, ignoring its contents (the default).
+
+
+ignore
+Ignore ReplayGain side data, but leave it in the frame.
+
+
+track
+Prefer the track gain, if present.
+
+
+album
+Prefer the album gain, if present.
+
+
+
+
+replaygain_preamp
+Pre-amplification gain in dB to apply to the selected replaygain gain.
+
+Default value for replaygain_preamp is 0.0.
+
+
+eval
+Set when the volume expression is evaluated.
+
+It accepts the following values:
+
+‘once ’
+only evaluate expression once during the filter initialization, or
+when the ‘volume ’ command is sent
+
+
+‘frame ’
+evaluate expression for each incoming frame
+
+
+
+Default value is ‘once ’.
+
+
+
+
The volume expression can contain the following parameters.
+
+
+n
+frame number (starting at zero)
+
+nb_channels
+number of channels
+
+nb_consumed_samples
+number of samples consumed by the filter
+
+nb_samples
+number of samples in the current frame
+
+pos
+original frame position in the file
+
+pts
+frame PTS
+
+sample_rate
+sample rate
+
+startpts
+PTS at start of stream
+
+startt
+time at start of stream
+
+t
+frame time
+
+tb
+timestamp timebase
+
+volume
+last set volume value
+
+
+
+
Note that when eval is set to ‘once ’ only the
+sample_rate and tb variables are available, all other
+variables will evaluate to NAN.
+
+
+
25.42.1 Commands# TOC
+
+
This filter supports the following commands:
+
+volume
+Modify the volume expression.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+replaygain_noclip
+Prevent clipping by limiting the gain applied.
+
+Default value for replaygain_noclip is 1.
+
+
+
+
+
+
25.42.2 Examples# TOC
+
+
+
+
+
25.43 volumedetect# TOC
+
+
Detect the volume of the input video.
+
+
The filter has no parameters. The input is not modified. Statistics about
+the volume will be printed in the log when the input stream end is reached.
+
+
In particular it will show the mean volume (root mean square), maximum
+volume (on a per-sample basis), and the beginning of a histogram of the
+registered volume values (from the maximum value to a cumulated 1/1000 of
+the samples).
+
+
All volumes are in decibels relative to the maximum PCM value.
+
+
+
25.43.1 Examples# TOC
+
+
Here is an excerpt of the output:
+
+
[Parsed_volumedetect_0 0xa23120] mean_volume: -27 dB
+[Parsed_volumedetect_0 0xa23120] max_volume: -4 dB
+[Parsed_volumedetect_0 0xa23120] histogram_4db: 6
+[Parsed_volumedetect_0 0xa23120] histogram_5db: 62
+[Parsed_volumedetect_0 0xa23120] histogram_6db: 286
+[Parsed_volumedetect_0 0xa23120] histogram_7db: 1042
+[Parsed_volumedetect_0 0xa23120] histogram_8db: 2551
+[Parsed_volumedetect_0 0xa23120] histogram_9db: 4609
+[Parsed_volumedetect_0 0xa23120] histogram_10db: 8409
+
+
+
It means that:
+
+ The mean square energy is approximately -27 dB, or 10^-2.7.
+ The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB.
+ There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc.
+
+
+
In other words, raising the volume by +4 dB does not cause any clipping,
+raising it by +5 dB causes clipping for 6 samples, etc.
+
+
+
+
26 Audio Sources# TOC
+
+
Below is a description of the currently available audio sources.
+
+
+
26.1 abuffer# TOC
+
+
Buffer audio frames, and make them available to the filter chain.
+
+
This source is mainly intended for a programmatic use, in particular
+through the interface defined in libavfilter/asrc_abuffer.h .
+
+
It accepts the following parameters:
+
+time_base
+The timebase which will be used for timestamps of submitted frames. It must be
+either a floating-point number or in numerator /denominator form.
+
+
+sample_rate
+The sample rate of the incoming audio buffers.
+
+
+sample_fmt
+The sample format of the incoming audio buffers.
+Either a sample format name or its corresponding integer representation from
+the enum AVSampleFormat in libavutil/samplefmt.h
+
+
+channel_layout
+The channel layout of the incoming audio buffers.
+Either a channel layout name from channel_layout_map in
+libavutil/channel_layout.c or its corresponding integer representation
+from the AV_CH_LAYOUT_* macros in libavutil/channel_layout.h
+
+
+channels
+The number of channels of the incoming audio buffers.
+If both channels and channel_layout are specified, then they
+must be consistent.
+
+
+
+
+
+
26.1.1 Examples# TOC
+
+
+
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
+
+
+
will instruct the source to accept planar 16bit signed stereo at 44100Hz.
+Since the sample format with name "s16p" corresponds to the number
+6 and the "stereo" channel layout corresponds to the value 0x3, this is
+equivalent to:
+
+
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
+
+
+
+
26.2 aevalsrc# TOC
+
+
Generate an audio signal specified by an expression.
+
+
This source accepts in input one or more expressions (one for each
+channel), which are evaluated and used to generate a corresponding
+audio signal.
+
+
This source accepts the following options:
+
+
+exprs
+Set the ’|’-separated expressions list for each separate channel. In case the
+channel_layout option is not specified, the selected channel layout
+depends on the number of provided expressions. Otherwise the last
+specified expression is applied to the remaining output channels.
+
+
+channel_layout, c
+Set the channel layout. The number of channels in the specified layout
+must be equal to the number of specified expressions.
+
+
+duration, d
+Set the minimum duration of the sourced audio. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+Note that the resulting duration may be greater than the specified
+duration, as the generated audio is always cut at the end of a
+complete frame.
+
+If not specified, or the expressed duration is negative, the audio is
+supposed to be generated forever.
+
+
+nb_samples, n
+Set the number of samples per channel per each output frame,
+default to 1024.
+
+
+sample_rate, s
+Specify the sample rate, default to 44100.
+
+
+
+
Each expression in exprs can contain the following constants:
+
+
+n
+number of the evaluated sample, starting from 0
+
+
+t
+time of the evaluated sample expressed in seconds, starting from 0
+
+
+s
+sample rate
+
+
+
+
+
+
26.2.1 Examples# TOC
+
+
+ Generate silence:
+
+
+ Generate a sin signal with frequency of 440 Hz, set sample rate to
+8000 Hz:
+
+
aevalsrc="sin(440*2*PI*t):s=8000"
+
+
+ Generate a two channels signal, specify the channel layout (Front
+Center + Back Center) explicitly:
+
+
aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
+
+
+ Generate white noise:
+
+
aevalsrc="-2+random(0)"
+
+
+ Generate an amplitude modulated signal:
+
+
aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
+
+
+ Generate 2.5 Hz binaural beats on a 360 Hz carrier:
+
+
aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
+
+
+
+
+
+
26.3 anullsrc# TOC
+
+
The null audio source, return unprocessed audio frames. It is mainly useful
+as a template and to be employed in analysis / debugging tools, or as
+the source for filters which ignore the input data (for example the sox
+synth filter).
+
+
This source accepts the following options:
+
+
+channel_layout, cl
+
+Specifies the channel layout, and can be either an integer or a string
+representing a channel layout. The default value of channel_layout
+is "stereo".
+
+Check the channel_layout_map definition in
+libavutil/channel_layout.c for the mapping between strings and
+channel layout values.
+
+
+sample_rate, r
+Specifies the sample rate, and defaults to 44100.
+
+
+nb_samples, n
+Set the number of samples per requested frames.
+
+
+
+
+
+
26.3.1 Examples# TOC
+
+
+ Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO.
+
+
+ Do the same operation with a more obvious syntax:
+
+
anullsrc=r=48000:cl=mono
+
+
+
+
All the parameters need to be explicitly defined.
+
+
+
26.4 flite# TOC
+
+
Synthesize a voice utterance using the libflite library.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libflite
.
+
+
Note that the flite library is not thread-safe.
+
+
The filter accepts the following options:
+
+
+list_voices
+If set to 1, list the names of the available voices and exit
+immediately. Default value is 0.
+
+
+nb_samples, n
+Set the maximum number of samples per frame. Default value is 512.
+
+
+textfile
+Set the filename containing the text to speak.
+
+
+text
+Set the text to speak.
+
+
+voice, v
+Set the voice to use for the speech synthesis. Default value is
+kal
. See also the list_voices option.
+
+
+
+
+
26.4.1 Examples# TOC
+
+
+ Read from file speech.txt , and synthesize the text using the
+standard flite voice:
+
+
flite=textfile=speech.txt
+
+
+ Read the specified text selecting the slt
voice:
+
+
flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
+
+
+ Input text to ffmpeg:
+
+
ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
+
+
+ Make ffplay speak the specified text, using flite
and
+the lavfi
device:
+
+
ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
+
+
+
+
For more information about libflite, check:
+http://www.speech.cs.cmu.edu/flite/
+
+
+
26.5 sine# TOC
+
+
Generate an audio signal made of a sine wave with amplitude 1/8.
+
+
The audio signal is bit-exact.
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the carrier frequency. Default is 440 Hz.
+
+
+beep_factor, b
+Enable a periodic beep every second with frequency beep_factor times
+the carrier frequency. Default is 0, meaning the beep is disabled.
+
+
+sample_rate, r
+Specify the sample rate, default is 44100.
+
+
+duration, d
+Specify the duration of the generated audio stream.
+
+
+samples_per_frame
+Set the number of samples per output frame, default is 1024.
+
+
+
+
+
26.5.1 Examples# TOC
+
+
+ Generate a simple 440 Hz sine wave:
+
+
+ Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds:
+
+
sine=220:4:d=5
+sine=f=220:b=4:d=5
+sine=frequency=220:beep_factor=4:duration=5
+
+
+
+
+
+
+
27 Audio Sinks# TOC
+
+
Below is a description of the currently available audio sinks.
+
+
+
27.1 abuffersink# TOC
+
+
Buffer audio frames, and make them available to the end of filter chain.
+
+
This sink is mainly intended for programmatic use, in particular
+through the interface defined in libavfilter/buffersink.h
+or the options system.
+
+
It accepts a pointer to an AVABufferSinkContext structure, which
+defines the incoming buffers’ formats, to be passed as the opaque
+parameter to avfilter_init_filter
for initialization.
+
+
27.2 anullsink# TOC
+
+
Null audio sink; do absolutely nothing with the input audio. It is
+mainly useful as a template and for use in analysis / debugging
+tools.
+
+
+
+
28 Video Filters# TOC
+
+
When you configure your FFmpeg build, you can disable any of the
+existing filters using --disable-filters
.
+The configure output will show the video filters included in your
+build.
+
+
Below is a description of the currently available video filters.
+
+
+
28.1 alphaextract# TOC
+
+
Extract the alpha component from the input as a grayscale video. This
+is especially useful with the alphamerge filter.
+
+
+
28.2 alphamerge# TOC
+
+
Add or replace the alpha component of the primary input with the
+grayscale value of a second input. This is intended for use with
+alphaextract to allow the transmission or storage of frame
+sequences that have alpha in a format that doesn’t support an alpha
+channel.
+
+
For example, to reconstruct full frames from a normal YUV-encoded video
+and a separate video created with alphaextract , you might use:
+
+
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
+
+
+
Since this filter is designed for reconstruction, it operates on frame
+sequences without considering timestamps, and terminates when either
+input reaches end of stream. This will cause problems if your encoding
+pipeline drops frames. If you’re trying to apply an image as an
+overlay to a video stream, consider the overlay filter instead.
+
+
+
28.3 ass# TOC
+
+
Same as the subtitles filter, except that it doesn’t require libavcodec
+and libavformat to work. On the other hand, it is limited to ASS (Advanced
+Substation Alpha) subtitles files.
+
+
This filter accepts the following option in addition to the common options from
+the subtitles filter:
+
+
+shaping
+Set the shaping engine
+
+Available values are:
+
+‘auto ’
+The default libass shaping engine, which is the best available.
+
+‘simple ’
+Fast, font-agnostic shaper that can do only substitutions
+
+‘complex ’
+Slower shaper using OpenType for substitutions and positioning
+
+
+
+The default is auto
.
+
+
+
+
+
28.4 bbox# TOC
+
+
Compute the bounding box for the non-black pixels in the input frame
+luminance plane.
+
+
This filter computes the bounding box containing all the pixels with a
+luminance value greater than the minimum allowed value.
+The parameters describing the bounding box are printed on the filter
+log.
+
+
The filter accepts the following option:
+
+
+min_val
+Set the minimal luminance value. Default is 16
.
+
+
+
+
+
28.5 blackdetect# TOC
+
+
Detect video intervals that are (almost) completely black. Can be
+useful to detect chapter transitions, commercials, or invalid
+recordings. Output lines contains the time for the start, end and
+duration of the detected black interval expressed in seconds.
+
+
In order to display the output lines, you need to set the loglevel at
+least to the AV_LOG_INFO value.
+
+
The filter accepts the following options:
+
+
+black_min_duration, d
+Set the minimum detected black duration expressed in seconds. It must
+be a non-negative floating point number.
+
+Default value is 2.0.
+
+
+picture_black_ratio_th, pic_th
+Set the threshold for considering a picture "black".
+Express the minimum value for the ratio:
+
+
nb_black_pixels / nb_pixels
+
+
+for which a picture is considered black.
+Default value is 0.98.
+
+
+pixel_black_th, pix_th
+Set the threshold for considering a pixel "black".
+
+The threshold expresses the maximum pixel luminance value for which a
+pixel is considered "black". The provided value is scaled according to
+the following equation:
+
+
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
+
+
+luminance_range_size and luminance_minimum_value depend on
+the input video format, the range is [0-255] for YUV full-range
+formats and [16-235] for YUV non full-range formats.
+
+Default value is 0.10.
+
+
+
+
The following example sets the maximum pixel threshold to the minimum
+value, and detects only black intervals of 2 or more seconds:
+
+
blackdetect=d=2:pix_th=0.00
+
+
+
+
28.6 blackframe# TOC
+
+
Detect frames that are (almost) completely black. Can be useful to
+detect chapter transitions or commercials. Output lines consist of
+the frame number of the detected frame, the percentage of blackness,
+the position in the file if known or -1 and the timestamp in seconds.
+
+
In order to display the output lines, you need to set the loglevel at
+least to the AV_LOG_INFO value.
+
+
It accepts the following parameters:
+
+
+amount
+The percentage of the pixels that have to be below the threshold; it defaults to
+98
.
+
+
+threshold, thresh
+The threshold below which a pixel value is considered black; it defaults to
+32
.
+
+
+
+
+
+
28.7 blend, tblend# TOC
+
+
Blend two video frames into each other.
+
+
The blend
filter takes two input streams and outputs one
+stream, the first input is the "top" layer and second input is
+"bottom" layer. Output terminates when shortest input terminates.
+
+
The tblend
(time blend) filter takes two consecutive frames
+from one single stream, and outputs the result obtained by blending
+the new frame on top of the old frame.
+
+
A description of the accepted options follows.
+
+
+c0_mode
+c1_mode
+c2_mode
+c3_mode
+all_mode
+Set blend mode for specific pixel component or all pixel components in case
+of all_mode . Default value is normal
.
+
+Available values for component modes are:
+
+‘addition ’
+‘and ’
+‘average ’
+‘burn ’
+‘darken ’
+‘difference ’
+‘difference128 ’
+‘divide ’
+‘dodge ’
+‘exclusion ’
+‘hardlight ’
+‘lighten ’
+‘multiply ’
+‘negation ’
+‘normal ’
+‘or ’
+‘overlay ’
+‘phoenix ’
+‘pinlight ’
+‘reflect ’
+‘screen ’
+‘softlight ’
+‘subtract ’
+‘vividlight ’
+‘xor ’
+
+
+
+c0_opacity
+c1_opacity
+c2_opacity
+c3_opacity
+all_opacity
+Set blend opacity for specific pixel component or all pixel components in case
+of all_opacity . Only used in combination with pixel component blend modes.
+
+
+c0_expr
+c1_expr
+c2_expr
+c3_expr
+all_expr
+Set blend expression for specific pixel component or all pixel components in case
+of all_expr . Note that related mode options will be ignored if those are set.
+
+The expressions can use the following variables:
+
+
+N
+The sequential number of the filtered frame, starting from 0
.
+
+
+X
+Y
+the coordinates of the current sample
+
+
+W
+H
+the width and height of currently filtered plane
+
+
+SW
+SH
+Width and height scale depending on the currently filtered plane. It is the
+ratio between the corresponding luma plane number of pixels and the current
+plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
+0.5,0.5
for chroma planes.
+
+
+T
+Time of the current frame, expressed in seconds.
+
+
+TOP, A
+Value of pixel component at current location for first video frame (top layer).
+
+
+BOTTOM, B
+Value of pixel component at current location for second video frame (bottom layer).
+
+
+
+
+shortest
+Force termination when the shortest input terminates. Default is
+0
. This option is only defined for the blend
filter.
+
+
+repeatlast
+Continue applying the last bottom frame after the end of the stream. A value of
+0
disable the filter after the last frame of the bottom layer is reached.
+Default is 1
. This option is only defined for the blend
filter.
+
+
+
+
+
28.7.1 Examples# TOC
+
+
+ Apply transition from bottom layer to top layer in first 10 seconds:
+
+
blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
+
+
+ Apply 1x1 checkerboard effect:
+
+
blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
+
+
+ Apply uncover left effect:
+
+
blend=all_expr='if(gte(N*SW+X,W),A,B)'
+
+
+ Apply uncover down effect:
+
+
blend=all_expr='if(gte(Y-N*SH,0),A,B)'
+
+
+ Apply uncover up-left effect:
+
+
blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
+
+
+ Display differences between the current and the previous frame:
+
+
tblend=all_mode=difference128
+
+
+
+
+
28.8 boxblur# TOC
+
+
Apply a boxblur algorithm to the input video.
+
+
It accepts the following parameters:
+
+
+luma_radius, lr
+luma_power, lp
+chroma_radius, cr
+chroma_power, cp
+alpha_radius, ar
+alpha_power, ap
+
+
+
A description of the accepted options follows.
+
+
+luma_radius, lr
+chroma_radius, cr
+alpha_radius, ar
+Set an expression for the box radius in pixels used for blurring the
+corresponding input plane.
+
+The radius value must be a non-negative number, and must not be
+greater than the value of the expression min(w,h)/2
for the
+luma and alpha planes, and of min(cw,ch)/2
for the chroma
+planes.
+
+Default value for luma_radius is "2". If not specified,
+chroma_radius and alpha_radius default to the
+corresponding value set for luma_radius .
+
+The expressions can contain the following constants:
+
+w
+h
+The input width and height in pixels.
+
+
+cw
+ch
+The input chroma image width and height in pixels.
+
+
+hsub
+vsub
+The horizontal and vertical chroma subsample values. For example, for the
+pixel format "yuv422p", hsub is 2 and vsub is 1.
+
+
+
+
+luma_power, lp
+chroma_power, cp
+alpha_power, ap
+Specify how many times the boxblur filter is applied to the
+corresponding plane.
+
+Default value for luma_power is 2. If not specified,
+chroma_power and alpha_power default to the
+corresponding value set for luma_power .
+
+A value of 0 will disable the effect.
+
+
+
+
+
28.8.1 Examples# TOC
+
+
+ Apply a boxblur filter with the luma, chroma, and alpha radii
+set to 2:
+
+
boxblur=luma_radius=2:luma_power=1
+boxblur=2:1
+
+
+ Set the luma radius to 2, and alpha and chroma radius to 0:
+
+
+ Set the luma and chroma radii to a fraction of the video dimension:
+
+
boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
+
+
+
+
+
28.9 codecview# TOC
+
+
Visualize information exported by some codecs.
+
+
Some codecs can export information through frames using side-data or other
+means. For example, some MPEG based codecs export motion vectors through the
+export_mvs flag in the codec flags2 option.
+
+
The filter accepts the following option:
+
+
+mv
+Set motion vectors to visualize.
+
+Available flags for mv are:
+
+
+‘pf ’
+forward predicted MVs of P-frames
+
+‘bf ’
+forward predicted MVs of B-frames
+
+‘bb ’
+backward predicted MVs of B-frames
+
+
+
+
+
+
+
28.9.1 Examples# TOC
+
+
+ Visualizes multi-directionals MVs from P and B-Frames using ffplay
:
+
+
ffplay -flags2 +export_mvs input.mpg -vf codecview=mv=pf+bf+bb
+
+
+
+
+
28.10 colorbalance# TOC
+
Modify intensity of primary colors (red, green and blue) of input frames.
+
+
The filter allows an input frame to be adjusted in the shadows, midtones or highlights
+regions for the red-cyan, green-magenta or blue-yellow balance.
+
+
A positive adjustment value shifts the balance towards the primary color, a negative
+value towards the complementary color.
+
+
The filter accepts the following options:
+
+
+rs
+gs
+bs
+Adjust red, green and blue shadows (darkest pixels).
+
+
+rm
+gm
+bm
+Adjust red, green and blue midtones (medium pixels).
+
+
+rh
+gh
+bh
+Adjust red, green and blue highlights (brightest pixels).
+
+Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
+
+
+
+
+
28.10.1 Examples# TOC
+
+
+ Add red color cast to shadows:
+
+
+
+
+
28.11 colorlevels# TOC
+
+
Adjust video input frames using levels.
+
+
The filter accepts the following options:
+
+
+rimin
+gimin
+bimin
+aimin
+Adjust red, green, blue and alpha input black point.
+Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
+
+
+rimax
+gimax
+bimax
+aimax
+Adjust red, green, blue and alpha input white point.
+Allowed ranges for options are [-1.0, 1.0]
. Defaults are 1
.
+
+Input levels are used to lighten highlights (bright tones), darken shadows
+(dark tones), change the balance of bright and dark tones.
+
+
+romin
+gomin
+bomin
+aomin
+Adjust red, green, blue and alpha output black point.
+Allowed ranges for options are [0, 1.0]
. Defaults are 0
.
+
+
+romax
+gomax
+bomax
+aomax
+Adjust red, green, blue and alpha output white point.
+Allowed ranges for options are [0, 1.0]
. Defaults are 1
.
+
+Output levels allows manual selection of a constrained output level range.
+
+
+
+
+
28.11.1 Examples# TOC
+
+
+ Make video output darker:
+
+
colorlevels=rimin=0.058:gimin=0.058:bimin=0.058
+
+
+ Increase contrast:
+
+
colorlevels=rimin=0.039:gimin=0.039:bimin=0.039:rimax=0.96:gimax=0.96:bimax=0.96
+
+
+ Make video output lighter:
+
+
colorlevels=rimax=0.902:gimax=0.902:bimax=0.902
+
+
+ Increase brightness:
+
+
colorlevels=romin=0.5:gomin=0.5:bomin=0.5
+
+
+
+
+
28.12 colorchannelmixer# TOC
+
+
Adjust video input frames by re-mixing color channels.
+
+
This filter modifies a color channel by adding the values associated to
+the other channels of the same pixels. For example if the value to
+modify is red, the output value will be:
+
+
red =red *rr + blue *rb + green *rg + alpha *ra
+
+
+
The filter accepts the following options:
+
+
+rr
+rg
+rb
+ra
+Adjust contribution of input red, green, blue and alpha channels for output red channel.
+Default is 1
for rr , and 0
for rg , rb and ra .
+
+
+gr
+gg
+gb
+ga
+Adjust contribution of input red, green, blue and alpha channels for output green channel.
+Default is 1
for gg , and 0
for gr , gb and ga .
+
+
+br
+bg
+bb
+ba
+Adjust contribution of input red, green, blue and alpha channels for output blue channel.
+Default is 1
for bb , and 0
for br , bg and ba .
+
+
+ar
+ag
+ab
+aa
+Adjust contribution of input red, green, blue and alpha channels for output alpha channel.
+Default is 1
for aa , and 0
for ar , ag and ab .
+
+Allowed ranges for options are [-2.0, 2.0]
.
+
+
+
+
+
28.12.1 Examples# TOC
+
+
+ Convert source to grayscale:
+
+
colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
+
+ Simulate sepia tones:
+
+
colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
+
+
+
+
+
28.13 colormatrix# TOC
+
+
Convert color matrix.
+
+
The filter accepts the following options:
+
+
+src
+dst
+Specify the source and destination color matrix. Both values must be
+specified.
+
+The accepted values are:
+
+‘bt709 ’
+BT.709
+
+
+‘bt601 ’
+BT.601
+
+
+‘smpte240m ’
+SMPTE-240M
+
+
+‘fcc ’
+FCC
+
+
+
+
+
+
For example to convert from BT.601 to SMPTE-240M, use the command:
+
+
colormatrix=bt601:smpte240m
+
+
+
+
28.14 copy# TOC
+
+
Copy the input source unchanged to the output. This is mainly useful for
+testing purposes.
+
+
+
28.15 crop# TOC
+
+
Crop the input video to given dimensions.
+
+
It accepts the following parameters:
+
+
+w, out_w
+The width of the output video. It defaults to iw
.
+This expression is evaluated only once during the filter
+configuration.
+
+
+h, out_h
+The height of the output video. It defaults to ih
.
+This expression is evaluated only once during the filter
+configuration.
+
+
+x
+The horizontal position, in the input video, of the left edge of the output
+video. It defaults to (in_w-out_w)/2
.
+This expression is evaluated per-frame.
+
+
+y
+The vertical position, in the input video, of the top edge of the output video.
+It defaults to (in_h-out_h)/2
.
+This expression is evaluated per-frame.
+
+
+keep_aspect
+If set to 1 will force the output display aspect ratio
+to be the same of the input, by changing the output sample aspect
+ratio. It defaults to 0.
+
+
+
+
The out_w , out_h , x , y parameters are
+expressions containing the following constants:
+
+
+x
+y
+The computed values for x and y . They are evaluated for
+each new frame.
+
+
+in_w
+in_h
+The input width and height.
+
+
+iw
+ih
+These are the same as in_w and in_h .
+
+
+out_w
+out_h
+The output (cropped) width and height.
+
+
+ow
+oh
+These are the same as out_w and out_h .
+
+
+a
+same as iw / ih
+
+
+sar
+input sample aspect ratio
+
+
+dar
+input display aspect ratio, it is the same as (iw / ih ) * sar
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+n
+The number of the input frame, starting from 0.
+
+
+pos
+the position in the file of the input frame, NAN if unknown
+
+
+t
+The timestamp expressed in seconds. It’s NAN if the input timestamp is unknown.
+
+
+
+
+
The expression for out_w may depend on the value of out_h ,
+and the expression for out_h may depend on out_w , but they
+cannot depend on x and y , as x and y are
+evaluated after out_w and out_h .
+
+
The x and y parameters specify the expressions for the
+position of the top-left corner of the output (non-cropped) area. They
+are evaluated for each frame. If the evaluated value is not valid, it
+is approximated to the nearest valid value.
+
+
The expression for x may depend on y , and the expression
+for y may depend on x .
+
+
+
28.15.1 Examples# TOC
+
+
+
+
+
28.16 cropdetect# TOC
+
+
Auto-detect the crop size.
+
+
It calculates the necessary cropping parameters and prints the
+recommended parameters via the logging system. The detected dimensions
+correspond to the non-black area of the input video.
+
+
It accepts the following parameters:
+
+
+limit
+Set higher black value threshold, which can be optionally specified
+from nothing (0) to everything (255 for 8bit based formats). An intensity
+value greater to the set value is considered non-black. It defaults to 24.
+You can also specify a value between 0.0 and 1.0 which will be scaled depending
+on the bitdepth of the pixel format.
+
+
+round
+The value which the width/height should be divisible by. It defaults to
+16. The offset is automatically adjusted to center the video. Use 2 to
+get only even dimensions (needed for 4:2:2 video). 16 is best when
+encoding to most video codecs.
+
+
+reset_count, reset
+Set the counter that determines after how many frames cropdetect will
+reset the previously detected largest video area and start over to
+detect the current optimal crop area. Default value is 0.
+
+This can be useful when channel logos distort the video area. 0
+indicates ’never reset’, and returns the largest area encountered during
+playback.
+
+
+
+
+
28.17 curves# TOC
+
+
Apply color adjustments using curves.
+
+
This filter is similar to the Adobe Photoshop and GIMP curves tools. Each
+component (red, green and blue) has its values defined by N key points
+tied from each other using a smooth curve. The x-axis represents the pixel
+values from the input frame, and the y-axis the new pixel values to be set for
+the output frame.
+
+
By default, a component curve is defined by the two points (0;0) and
+(1;1) . This creates a straight line where each original pixel value is
+"adjusted" to its own value, which means no change to the image.
+
+
The filter allows you to redefine these two points and add some more. A new
+curve (using a natural cubic spline interpolation) will be define to pass
+smoothly through all these new coordinates. The new defined points needs to be
+strictly increasing over the x-axis, and their x and y values must
+be in the [0;1] interval. If the computed curves happened to go outside
+the vector spaces, the values will be clipped accordingly.
+
+
If there is no key point defined in x=0
, the filter will automatically
+insert a (0;0) point. In the same way, if there is no key point defined
+in x=1
, the filter will automatically insert a (1;1) point.
+
+
The filter accepts the following options:
+
+
+preset
+Select one of the available color presets. This option can be used in addition
+to the r , g , b parameters; in this case, the later
+options takes priority on the preset values.
+Available presets are:
+
+‘none ’
+‘color_negative ’
+‘cross_process ’
+‘darker ’
+‘increase_contrast ’
+‘lighter ’
+‘linear_contrast ’
+‘medium_contrast ’
+‘negative ’
+‘strong_contrast ’
+‘vintage ’
+
+Default is none
.
+
+master, m
+Set the master key points. These points will define a second pass mapping. It
+is sometimes called a "luminance" or "value" mapping. It can be used with
+r , g , b or all since it acts like a
+post-processing LUT.
+
+red, r
+Set the key points for the red component.
+
+green, g
+Set the key points for the green component.
+
+blue, b
+Set the key points for the blue component.
+
+all
+Set the key points for all components (not including master).
+Can be used in addition to the other key points component
+options. In this case, the unset component(s) will fallback on this
+all setting.
+
+psfile
+Specify a Photoshop curves file (.asv
) to import the settings from.
+
+
+
+
To avoid some filtergraph syntax conflicts, each key points list need to be
+defined using the following syntax: x0/y0 x1/y1 x2/y2 ...
.
+
+
+
28.17.1 Examples# TOC
+
+
+
+
+
28.18 dctdnoiz# TOC
+
+
Denoise frames using 2D DCT (frequency domain filtering).
+
+
This filter is not designed for real time.
+
+
The filter accepts the following options:
+
+
+sigma, s
+Set the noise sigma constant.
+
+This sigma defines a hard threshold of 3 * sigma
; every DCT
+coefficient (absolute value) below this threshold with be dropped.
+
+If you need a more advanced filtering, see expr .
+
+Default is 0
.
+
+
+overlap
+Set number overlapping pixels for each block. Since the filter can be slow, you
+may want to reduce this value, at the cost of a less effective filter and the
+risk of various artefacts.
+
+If the overlapping value doesn’t allow to process the whole input width or
+height, a warning will be displayed and according borders won’t be denoised.
+
+Default value is blocksize -1, which is the best possible setting.
+
+
+expr, e
+Set the coefficient factor expression.
+
+For each coefficient of a DCT block, this expression will be evaluated as a
+multiplier value for the coefficient.
+
+If this is option is set, the sigma option will be ignored.
+
+The absolute value of the coefficient can be accessed through the c
+variable.
+
+
+n
+Set the blocksize using the number of bits. 1<<n
defines the
+blocksize , which is the width and height of the processed blocks.
+
+The default value is 3 (8x8) and can be raised to 4 for a
+blocksize of 16x16. Note that changing this setting has huge consequences
+on the speed processing. Also, a larger block size does not necessarily means a
+better de-noising.
+
+
+
+
+
28.18.1 Examples# TOC
+
+
Apply a denoise with a sigma of 4.5
:
+
+
+
The same operation can be achieved using the expression system:
+
+
dctdnoiz=e='gte(c, 4.5*3)'
+
+
+
Violent denoise using a block size of 16x16
:
+
+
+
+
28.19 decimate# TOC
+
+
Drop duplicated frames at regular intervals.
+
+
The filter accepts the following options:
+
+
+cycle
+Set the number of frames from which one will be dropped. Setting this to
+N means one frame in every batch of N frames will be dropped.
+Default is 5
.
+
+
+dupthresh
+Set the threshold for duplicate detection. If the difference metric for a frame
+is less than or equal to this value, then it is declared as duplicate. Default
+is 1.1
+
+
+scthresh
+Set scene change threshold. Default is 15
.
+
+
+blockx
+blocky
+Set the size of the x and y-axis blocks used during metric calculations.
+Larger blocks give better noise suppression, but also give worse detection of
+small movements. Must be a power of two. Default is 32
.
+
+
+ppsrc
+Mark main input as a pre-processed input and activate clean source input
+stream. This allows the input to be pre-processed with various filters to help
+the metrics calculation while keeping the frame selection lossless. When set to
+1
, the first stream is for the pre-processed input, and the second
+stream is the clean source from where the kept frames are chosen. Default is
+0
.
+
+
+chroma
+Set whether or not chroma is considered in the metric calculations. Default is
+1
.
+
+
+
+
+
28.20 dejudder# TOC
+
+
Remove judder produced by partially interlaced telecined content.
+
+
Judder can be introduced, for instance, by pullup filter. If the original
+source was partially telecined content then the output of pullup,dejudder
+will have a variable frame rate. May change the recorded frame rate of the
+container. Aside from that change, this filter will not affect constant frame
+rate video.
+
+
The option available in this filter is:
+
+cycle
+Specify the length of the window over which the judder repeats.
+
+Accepts any integer greater than 1. Useful values are:
+
+‘4 ’
+If the original was telecined from 24 to 30 fps (Film to NTSC).
+
+
+‘5 ’
+If the original was telecined from 25 to 30 fps (PAL to NTSC).
+
+
+‘20 ’
+If a mixture of the two.
+
+
+
+The default is ‘4 ’.
+
+
+
+
+
28.21 delogo# TOC
+
+
Suppress a TV station logo by a simple interpolation of the surrounding
+pixels. Just set a rectangle covering the logo and watch it disappear
+(and sometimes something even uglier appear - your mileage may vary).
+
+
It accepts the following parameters:
+
+x
+y
+Specify the top left corner coordinates of the logo. They must be
+specified.
+
+
+w
+h
+Specify the width and height of the logo to clear. They must be
+specified.
+
+
+band, t
+Specify the thickness of the fuzzy edge of the rectangle (added to
+w and h ). The default value is 4.
+
+
+show
+When set to 1, a green rectangle is drawn on the screen to simplify
+finding the right x , y , w , and h parameters.
+The default value is 0.
+
+The rectangle is drawn on the outermost pixels which will be (partly)
+replaced with interpolated values. The values of the next pixels
+immediately outside this rectangle in each direction will be used to
+compute the interpolated pixel values inside the rectangle.
+
+
+
+
+
+
28.21.1 Examples# TOC
+
+
+ Set a rectangle covering the area with top left corner coordinates 0,0
+and size 100x77, and a band of size 10:
+
+
delogo=x=0:y=0:w=100:h=77:band=10
+
+
+
+
+
+
28.22 deshake# TOC
+
+
Attempt to fix small changes in horizontal and/or vertical shift. This
+filter helps remove camera shake from hand-holding a camera, bumping a
+tripod, moving on a vehicle, etc.
+
+
The filter accepts the following options:
+
+
+x
+y
+w
+h
+Specify a rectangular area where to limit the search for motion
+vectors.
+If desired the search for motion vectors can be limited to a
+rectangular area of the frame defined by its top left corner, width
+and height. These parameters have the same meaning as the drawbox
+filter which can be used to visualise the position of the bounding
+box.
+
+This is useful when simultaneous movement of subjects within the frame
+might be confused for camera motion by the motion vector search.
+
+If any or all of x , y , w and h are set to -1
+then the full frame is used. This allows later options to be set
+without specifying the bounding box for the motion vector search.
+
+Default - search the whole frame.
+
+
+rx
+ry
+Specify the maximum extent of movement in x and y directions in the
+range 0-64 pixels. Default 16.
+
+
+edge
+Specify how to generate pixels to fill blanks at the edge of the
+frame. Available values are:
+
+‘blank, 0 ’
+Fill zeroes at blank locations
+
+‘original, 1 ’
+Original image at blank locations
+
+‘clamp, 2 ’
+Extruded edge value at blank locations
+
+‘mirror, 3 ’
+Mirrored edge at blank locations
+
+
+Default value is ‘mirror ’.
+
+
+blocksize
+Specify the blocksize to use for motion search. Range 4-128 pixels,
+default 8.
+
+
+contrast
+Specify the contrast threshold for blocks. Only blocks with more than
+the specified contrast (difference between darkest and lightest
+pixels) will be considered. Range 1-255, default 125.
+
+
+search
+Specify the search strategy. Available values are:
+
+‘exhaustive, 0 ’
+Set exhaustive search
+
+‘less, 1 ’
+Set less exhaustive search.
+
+
+Default value is ‘exhaustive ’.
+
+
+filename
+If set then a detailed log of the motion search is written to the
+specified file.
+
+
+opencl
+If set to 1, specify using OpenCL capabilities, only available if
+FFmpeg was configured with --enable-opencl
. Default value is 0.
+
+
+
+
+
+
28.23 drawbox# TOC
+
+
Draw a colored box on the input image.
+
+
It accepts the following parameters:
+
+
+x
+y
+The expressions which specify the top left corner coordinates of the box. It defaults to 0.
+
+
+width, w
+height, h
+The expressions which specify the width and height of the box; if 0 they are interpreted as
+the input width and height. It defaults to 0.
+
+
+color, c
+Specify the color of the box to write. For the general syntax of this option,
+check the "Color" section in the ffmpeg-utils manual. If the special
+value invert
is used, the box edge color is the same as the
+video with inverted luma.
+
+
+thickness, t
+The expression which sets the thickness of the box edge. Default value is 3
.
+
+See below for the list of accepted constants.
+
+
+
+
The parameters for x , y , w and h and t are expressions containing the
+following constants:
+
+
+dar
+The input display aspect ratio, it is the same as (w / h ) * sar .
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+in_h, ih
+in_w, iw
+The input width and height.
+
+
+sar
+The input sample aspect ratio.
+
+
+x
+y
+The x and y offset coordinates where the box is drawn.
+
+
+w
+h
+The width and height of the drawn box.
+
+
+t
+The thickness of the drawn box.
+
+These constants allow the x , y , w , h and t expressions to refer to
+each other, so you may for example specify y=x/dar
or h=w/dar
.
+
+
+
+
+
+
28.23.1 Examples# TOC
+
+
+
+
+
28.24 drawgrid# TOC
+
+
Draw a grid on the input image.
+
+
It accepts the following parameters:
+
+
+x
+y
+The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0.
+
+
+width, w
+height, h
+The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the
+input width and height, respectively, minus thickness
, so image gets
+framed. Default to 0.
+
+
+color, c
+Specify the color of the grid. For the general syntax of this option,
+check the "Color" section in the ffmpeg-utils manual. If the special
+value invert
is used, the grid color is the same as the
+video with inverted luma.
+
+
+thickness, t
+The expression which sets the thickness of the grid line. Default value is 1
.
+
+See below for the list of accepted constants.
+
+
+
+
The parameters for x , y , w and h and t are expressions containing the
+following constants:
+
+
+dar
+The input display aspect ratio, it is the same as (w / h ) * sar .
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+in_h, ih
+in_w, iw
+The input grid cell width and height.
+
+
+sar
+The input sample aspect ratio.
+
+
+x
+y
+The x and y coordinates of some point of grid intersection (meant to configure offset).
+
+
+w
+h
+The width and height of the drawn cell.
+
+
+t
+The thickness of the drawn cell.
+
+These constants allow the x , y , w , h and t expressions to refer to
+each other, so you may for example specify y=x/dar
or h=w/dar
.
+
+
+
+
+
+
28.24.1 Examples# TOC
+
+
+ Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%:
+
+
drawgrid=width=100:height=100:thickness=2:color=red@0.5
+
+
+ Draw a white 3x3 grid with an opacity of 50%:
+
+
drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
+
+
+
+
+
28.25 drawtext# TOC
+
+
Draw a text string or text from a specified file on top of a video, using the
+libfreetype library.
+
+
To enable compilation of this filter, you need to configure FFmpeg with
+--enable-libfreetype
.
+To enable default font fallback and the font option you need to
+configure FFmpeg with --enable-libfontconfig
.
+To enable the text_shaping option, you need to configure FFmpeg with
+--enable-libfribidi
.
+
+
+
28.25.1 Syntax# TOC
+
+
It accepts the following parameters:
+
+
+box
+Used to draw a box around text using the background color.
+The value must be either 1 (enable) or 0 (disable).
+The default value of box is 0.
+
+
+boxcolor
+The color to be used for drawing box around text. For the syntax of this
+option, check the "Color" section in the ffmpeg-utils manual.
+
+The default value of boxcolor is "white".
+
+
+borderw
+Set the width of the border to be drawn around the text using bordercolor .
+The default value of borderw is 0.
+
+
+bordercolor
+Set the color to be used for drawing border around text. For the syntax of this
+option, check the "Color" section in the ffmpeg-utils manual.
+
+The default value of bordercolor is "black".
+
+
+expansion
+Select how the text is expanded. Can be either none
,
+strftime
(deprecated) or
+normal
(default). See the Text expansion section
+below for details.
+
+
+fix_bounds
+If true, check and fix text coords to avoid clipping.
+
+
+fontcolor
+The color to be used for drawing fonts. For the syntax of this option, check
+the "Color" section in the ffmpeg-utils manual.
+
+The default value of fontcolor is "black".
+
+
+fontcolor_expr
+String which is expanded the same way as text to obtain dynamic
+fontcolor value. By default this option has empty value and is not
+processed. When this option is set, it overrides fontcolor option.
+
+
+font
+The font family to be used for drawing text. By default Sans.
+
+
+fontfile
+The font file to be used for drawing text. The path must be included.
+This parameter is mandatory if the fontconfig support is disabled.
+
+
+fontsize
+The font size to be used for drawing text.
+The default value of fontsize is 16.
+
+
+text_shaping
+If set to 1, attempt to shape the text (for example, reverse the order of
+right-to-left text and join Arabic characters) before drawing it.
+Otherwise, just draw the text exactly as given.
+By default 1 (if supported).
+
+
+ft_load_flags
+The flags to be used for loading the fonts.
+
+The flags map the corresponding flags supported by libfreetype, and are
+a combination of the following values:
+
+default
+no_scale
+no_hinting
+render
+no_bitmap
+vertical_layout
+force_autohint
+crop_bitmap
+pedantic
+ignore_global_advance_width
+no_recurse
+ignore_transform
+monochrome
+linear_design
+no_autohint
+
+
+Default value is "default".
+
+For more information consult the documentation for the FT_LOAD_*
+libfreetype flags.
+
+
+shadowcolor
+The color to be used for drawing a shadow behind the drawn text. For the
+syntax of this option, check the "Color" section in the ffmpeg-utils manual.
+
+The default value of shadowcolor is "black".
+
+
+shadowx
+shadowy
+The x and y offsets for the text shadow position with respect to the
+position of the text. They can be either positive or negative
+values. The default value for both is "0".
+
+
+start_number
+The starting frame number for the n/frame_num variable. The default value
+is "0".
+
+
+tabsize
+The size in number of spaces to use for rendering the tab.
+Default value is 4.
+
+
+timecode
+Set the initial timecode representation in "hh:mm:ss[:;.]ff"
+format. It can be used with or without text parameter. timecode_rate
+option must be specified.
+
+
+timecode_rate, rate, r
+Set the timecode frame rate (timecode only).
+
+
+text
+The text string to be drawn. The text must be a sequence of UTF-8
+encoded characters.
+This parameter is mandatory if no file is specified with the parameter
+textfile .
+
+
+textfile
+A text file containing text to be drawn. The text must be a sequence
+of UTF-8 encoded characters.
+
+This parameter is mandatory if no text string is specified with the
+parameter text .
+
+If both text and textfile are specified, an error is thrown.
+
+
+reload
+If set to 1, the textfile will be reloaded before each frame.
+Be sure to update it atomically, or it may be read partially, or even fail.
+
+
+x
+y
+The expressions which specify the offsets where text will be drawn
+within the video frame. They are relative to the top/left border of the
+output image.
+
+The default value of x and y is "0".
+
+See below for the list of accepted constants and functions.
+
+
+
+
The parameters for x and y are expressions containing the
+following constants and functions:
+
+
+dar
+input display aspect ratio, it is the same as (w / h ) * sar
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+line_h, lh
+the height of each text line
+
+
+main_h, h, H
+the input height
+
+
+main_w, w, W
+the input width
+
+
+max_glyph_a, ascent
+the maximum distance from the baseline to the highest/upper grid
+coordinate used to place a glyph outline point, for all the rendered
+glyphs.
+It is a positive value, due to the grid’s orientation with the Y axis
+upwards.
+
+
+max_glyph_d, descent
+the maximum distance from the baseline to the lowest grid coordinate
+used to place a glyph outline point, for all the rendered glyphs.
+This is a negative value, due to the grid’s orientation, with the Y axis
+upwards.
+
+
+max_glyph_h
+maximum glyph height, that is the maximum height for all the glyphs
+contained in the rendered text, it is equivalent to ascent -
+descent .
+
+
+max_glyph_w
+maximum glyph width, that is the maximum width for all the glyphs
+contained in the rendered text
+
+
+n
+the number of input frame, starting from 0
+
+
+rand(min, max)
+return a random number included between min and max
+
+
+sar
+The input sample aspect ratio.
+
+
+t
+timestamp expressed in seconds, NAN if the input timestamp is unknown
+
+
+text_h, th
+the height of the rendered text
+
+
+text_w, tw
+the width of the rendered text
+
+
+x
+y
+the x and y offset coordinates where the text is drawn.
+
+These parameters allow the x and y expressions to refer
+each other, so you can for example specify y=x/dar
.
+
+
+
+
+
28.25.2 Text expansion# TOC
+
+
If expansion is set to strftime
,
+the filter recognizes strftime() sequences in the provided text and
+expands them accordingly. Check the documentation of strftime(). This
+feature is deprecated.
+
+
If expansion is set to none
, the text is printed verbatim.
+
+
If expansion is set to normal
(which is the default),
+the following expansion mechanism is used.
+
+
The backslash character ’\’, followed by any character, always expands to
+the second character.
+
+
Sequence of the form %{...}
are expanded. The text between the
+braces is a function name, possibly followed by arguments separated by ’:’.
+If the arguments contain special characters or delimiters (’:’ or ’}’),
+they should be escaped.
+
+
Note that they probably must also be escaped as the value for the
+text option in the filter argument string and as the filter
+argument in the filtergraph description, and possibly also for the shell,
+that makes up to four levels of escaping; using a text file avoids these
+problems.
+
+
The following functions are available:
+
+
+expr, e
+The expression evaluation result.
+
+It must take one argument specifying the expression to be evaluated,
+which accepts the same constants and functions as the x and
+y values. Note that not all constants should be used, for
+example the text size is not known when evaluating the expression, so
+the constants text_w and text_h will have an undefined
+value.
+
+
+expr_int_format, eif
+Evaluate the expression’s value and output as formatted integer.
+
+The first argument is the expression to be evaluated, just as for the expr function.
+The second argument specifies the output format. Allowed values are ’x’, ’X’, ’d’ and
+’u’. They are treated exactly as in the printf function.
+The third parameter is optional and sets the number of positions taken by the output.
+It can be used to add padding with zeros from the left.
+
+
+gmtime
+The time at which the filter is running, expressed in UTC.
+It can accept an argument: a strftime() format string.
+
+
+localtime
+The time at which the filter is running, expressed in the local time zone.
+It can accept an argument: a strftime() format string.
+
+
+metadata
+Frame metadata. It must take one argument specifying metadata key.
+
+
+n, frame_num
+The frame number, starting from 0.
+
+
+pict_type
+A 1 character description of the current picture type.
+
+
+pts
+The timestamp of the current frame.
+It can take up to two arguments.
+
+The first argument is the format of the timestamp; it defaults to flt
+for seconds as a decimal number with microsecond accuracy; hms
stands
+for a formatted [-]HH:MM:SS.mmm timestamp with millisecond accuracy.
+
+The second argument is an offset added to the timestamp.
+
+
+
+
+
+
28.25.3 Examples# TOC
+
+
+
+
For more information about libfreetype, check:
+http://www.freetype.org/ .
+
+
For more information about fontconfig, check:
+http://freedesktop.org/software/fontconfig/fontconfig-user.html .
+
+
For more information about libfribidi, check:
+http://fribidi.org/ .
+
+
+
28.26 edgedetect# TOC
+
+
Detect and draw edges. The filter uses the Canny Edge Detection algorithm.
+
+
The filter accepts the following options:
+
+
+low
+high
+Set low and high threshold values used by the Canny thresholding
+algorithm.
+
+The high threshold selects the "strong" edge pixels, which are then
+connected through 8-connectivity with the "weak" edge pixels selected
+by the low threshold.
+
+low and high threshold values must be chosen in the range
+[0,1], and low should be lesser or equal to high .
+
+Default value for low is 20/255
, and default value for high
+is 50/255
.
+
+
+mode
+Define the drawing mode.
+
+
+‘wires ’
+Draw white/gray wires on black background.
+
+
+‘colormix ’
+Mix the colors to create a paint/cartoon effect.
+
+
+
+Default value is wires .
+
+
+
+
+
28.26.1 Examples# TOC
+
+
+ Standard edge detection with custom values for the hysteresis thresholding:
+
+
edgedetect=low=0.1:high=0.4
+
+
+ Painting effect without thresholding:
+
+
edgedetect=mode=colormix:high=0
+
+
+
+
+
28.27 extractplanes# TOC
+
+
Extract color channel components from input video stream into
+separate grayscale video streams.
+
+
The filter accepts the following option:
+
+
+planes
+Set plane(s) to extract.
+
+Available values for planes are:
+
+‘y ’
+‘u ’
+‘v ’
+‘a ’
+‘r ’
+‘g ’
+‘b ’
+
+
+Choosing planes not available in the input will result in an error.
+That means you cannot select r
, g
, b
planes
+with y
, u
, v
planes at same time.
+
+
+
+
+
28.27.1 Examples# TOC
+
+
+ Extract luma, u and v color channel component from input video frame
+into 3 grayscale outputs:
+
+
ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
+
+
+
+
+
28.28 elbg# TOC
+
+
Apply a posterize effect using the ELBG (Enhanced LBG) algorithm.
+
+
For each input image, the filter will compute the optimal mapping from
+the input to the output given the codebook length, that is the number
+of distinct output colors.
+
+
This filter accepts the following options.
+
+
+codebook_length, l
+Set codebook length. The value must be a positive integer, and
+represents the number of distinct output colors. Default value is 256.
+
+
+nb_steps, n
+Set the maximum number of iterations to apply for computing the optimal
+mapping. The higher the value the better the result and the higher the
+computation time. Default value is 1.
+
+
+seed, s
+Set a random seed, must be an integer included between 0 and
+UINT32_MAX. If not specified, or if explicitly set to -1, the filter
+will try to use a good random seed on a best effort basis.
+
+
+
+
+
28.29 fade# TOC
+
+
Apply a fade-in/out effect to the input video.
+
+
It accepts the following parameters:
+
+
+type, t
+The effect type can be either "in" for a fade-in, or "out" for a fade-out
+effect.
+Default is in
.
+
+
+start_frame, s
+Specify the number of the frame to start applying the fade
+effect at. Default is 0.
+
+
+nb_frames, n
+The number of frames that the fade effect lasts. At the end of the
+fade-in effect, the output video will have the same intensity as the input video.
+At the end of the fade-out transition, the output video will be filled with the
+selected color .
+Default is 25.
+
+
+alpha
+If set to 1, fade only alpha channel, if one exists on the input.
+Default value is 0.
+
+
+start_time, st
+Specify the timestamp (in seconds) of the frame to start to apply the fade
+effect. If both start_frame and start_time are specified, the fade will start at
+whichever comes last. Default is 0.
+
+
+duration, d
+The number of seconds for which the fade effect has to last. At the end of the
+fade-in effect the output video will have the same intensity as the input video,
+at the end of the fade-out transition the output video will be filled with the
+selected color .
+If both duration and nb_frames are specified, duration is used. Default is 0.
+
+
+color, c
+Specify the color of the fade. Default is "black".
+
+
+
+
+
28.29.1 Examples# TOC
+
+
+
+
+
28.30 field# TOC
+
+
Extract a single field from an interlaced image using stride
+arithmetic to avoid wasting CPU time. The output frames are marked as
+non-interlaced.
+
+
The filter accepts the following options:
+
+
+type
+Specify whether to extract the top (if the value is 0
or
+top
) or the bottom field (if the value is 1
or
+bottom
).
+
+
+
+
+
28.31 fieldmatch# TOC
+
+
Field matching filter for inverse telecine. It is meant to reconstruct the
+progressive frames from a telecined stream. The filter does not drop duplicated
+frames, so to achieve a complete inverse telecine fieldmatch
needs to be
+followed by a decimation filter such as decimate in the filtergraph.
+
+
The separation of the field matching and the decimation is notably motivated by
+the possibility of inserting a de-interlacing filter fallback between the two.
+If the source has mixed telecined and real interlaced content,
+fieldmatch
will not be able to match fields for the interlaced parts.
+But these remaining combed frames will be marked as interlaced, and thus can be
+de-interlaced by a later filter such as yadif before decimation.
+
+
In addition to the various configuration options, fieldmatch
can take an
+optional second stream, activated through the ppsrc option. If
+enabled, the frames reconstruction will be based on the fields and frames from
+this second stream. This allows the first input to be pre-processed in order to
+help the various algorithms of the filter, while keeping the output lossless
+(assuming the fields are matched properly). Typically, a field-aware denoiser,
+or brightness/contrast adjustments can help.
+
+
Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project)
+and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from
+which fieldmatch
is based on. While the semantic and usage are very
+close, some behaviour and options names can differ.
+
+
The decimate filter currently only works for constant frame rate input.
+Do not use fieldmatch
and decimate if your input has mixed
+telecined and progressive content with changing framerate.
+
+
The filter accepts the following options:
+
+
+order
+Specify the assumed field order of the input stream. Available values are:
+
+
+‘auto ’
+Auto detect parity (use FFmpeg’s internal parity value).
+
+‘bff ’
+Assume bottom field first.
+
+‘tff ’
+Assume top field first.
+
+
+
+Note that it is sometimes recommended not to trust the parity announced by the
+stream.
+
+Default value is auto .
+
+
+mode
+Set the matching mode or strategy to use. pc mode is the safest in the
+sense that it won’t risk creating jerkiness due to duplicate frames when
+possible, but if there are bad edits or blended fields it will end up
+outputting combed frames when a good match might actually exist. On the other
+hand, pcn_ub mode is the most risky in terms of creating jerkiness,
+but will almost always find a good frame if there is one. The other values are
+all somewhere in between pc and pcn_ub in terms of risking
+jerkiness and creating duplicate frames versus finding good matches in sections
+with bad edits, orphaned fields, blended fields, etc.
+
+More details about p/c/n/u/b are available in p/c/n/u/b meaning section.
+
+Available values are:
+
+
+‘pc ’
+2-way matching (p/c)
+
+‘pc_n ’
+2-way matching, and trying 3rd match if still combed (p/c + n)
+
+‘pc_u ’
+2-way matching, and trying 3rd match (same order) if still combed (p/c + u)
+
+‘pc_n_ub ’
+2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if
+still combed (p/c + n + u/b)
+
+‘pcn ’
+3-way matching (p/c/n)
+
+‘pcn_ub ’
+3-way matching, and trying 4th/5th matches if all 3 of the original matches are
+detected as combed (p/c/n + u/b)
+
+
+
+The parenthesis at the end indicate the matches that would be used for that
+mode assuming order =tff (and field on auto or
+top ).
+
+In terms of speed pc mode is by far the fastest and pcn_ub is
+the slowest.
+
+Default value is pc_n .
+
+
+ppsrc
+Mark the main input stream as a pre-processed input, and enable the secondary
+input stream as the clean source to pick the fields from. See the filter
+introduction for more details. It is similar to the clip2 feature from
+VFM/TFM.
+
+Default value is 0
(disabled).
+
+
+field
+Set the field to match from. It is recommended to set this to the same value as
+order unless you experience matching failures with that setting. In
+certain circumstances changing the field that is used to match from can have a
+large impact on matching performance. Available values are:
+
+
+‘auto ’
+Automatic (same value as order ).
+
+‘bottom ’
+Match from the bottom field.
+
+‘top ’
+Match from the top field.
+
+
+
+Default value is auto .
+
+
+mchroma
+Set whether or not chroma is included during the match comparisons. In most
+cases it is recommended to leave this enabled. You should set this to 0
+only if your clip has bad chroma problems such as heavy rainbowing or other
+artifacts. Setting this to 0
could also be used to speed things up at
+the cost of some accuracy.
+
+Default value is 1
.
+
+
+y0
+y1
+These define an exclusion band which excludes the lines between y0 and
+y1 from being included in the field matching decision. An exclusion
+band can be used to ignore subtitles, a logo, or other things that may
+interfere with the matching. y0 sets the starting scan line and
+y1 sets the ending line; all lines in between y0 and
+y1 (including y0 and y1 ) will be ignored. Setting
+y0 and y1 to the same value will disable the feature.
+y0 and y1 defaults to 0
.
+
+
+scthresh
+Set the scene change detection threshold as a percentage of maximum change on
+the luma plane. Good values are in the [8.0, 14.0]
range. Scene change
+detection is only relevant in case combmatch =sc . The range for
+scthresh is [0.0, 100.0]
.
+
+Default value is 12.0
.
+
+
+combmatch
+When combatch is not none , fieldmatch
will take into
+account the combed scores of matches when deciding what match to use as the
+final match. Available values are:
+
+
+‘none ’
+No final matching based on combed scores.
+
+‘sc ’
+Combed scores are only used when a scene change is detected.
+
+‘full ’
+Use combed scores all the time.
+
+
+
+Default is sc .
+
+
+combdbg
+Force fieldmatch
to calculate the combed metrics for certain matches and
+print them. This setting is known as micout in TFM/VFM vocabulary.
+Available values are:
+
+
+‘none ’
+No forced calculation.
+
+‘pcn ’
+Force p/c/n calculations.
+
+‘pcnub ’
+Force p/c/n/u/b calculations.
+
+
+
+Default value is none .
+
+
+cthresh
+This is the area combing threshold used for combed frame detection. This
+essentially controls how "strong" or "visible" combing must be to be detected.
+Larger values mean combing must be more visible and smaller values mean combing
+can be less visible or strong and still be detected. Valid settings are from
+-1
(every pixel will be detected as combed) to 255
(no pixel will
+be detected as combed). This is basically a pixel difference value. A good
+range is [8, 12]
.
+
+Default value is 9
.
+
+
+chroma
+Sets whether or not chroma is considered in the combed frame decision. Only
+disable this if your source has chroma problems (rainbowing, etc.) that are
+causing problems for the combed frame detection with chroma enabled. Actually,
+using chroma =0 is usually more reliable, except for the case
+where there is chroma only combing in the source.
+
+Default value is 0
.
+
+
+blockx
+blocky
+Respectively set the x-axis and y-axis size of the window used during combed
+frame detection. This has to do with the size of the area in which
+combpel pixels are required to be detected as combed for a frame to be
+declared combed. See the combpel parameter description for more info.
+Possible values are any number that is a power of 2 starting at 4 and going up
+to 512.
+
+Default value is 16
.
+
+
+combpel
+The number of combed pixels inside any of the blocky by
+blockx size blocks on the frame for the frame to be detected as
+combed. While cthresh controls how "visible" the combing must be, this
+setting controls "how much" combing there must be in any localized area (a
+window defined by the blockx and blocky settings) on the
+frame. Minimum value is 0
and maximum is blocky x blockx
(at
+which point no frames will ever be detected as combed). This setting is known
+as MI in TFM/VFM vocabulary.
+
+Default value is 80
.
+
+
+
+
+
28.31.1 p/c/n/u/b meaning# TOC
+
+
+
28.31.1.1 p/c/n# TOC
+
+
We assume the following telecined stream:
+
+
+
Top fields: 1 2 2 3 4
+Bottom fields: 1 2 3 4 4
+
+
+
The numbers correspond to the progressive frame the fields relate to. Here, the
+first two frames are progressive, the 3rd and 4th are combed, and so on.
+
+
When fieldmatch
is configured to run a matching from bottom
+(field =bottom ) this is how this input stream get transformed:
+
+
+
Input stream:
+ T 1 2 2 3 4
+ B 1 2 3 4 4 <-- matching reference
+
+Matches: c c n n c
+
+Output stream:
+ T 1 2 3 4 4
+ B 1 2 3 4 4
+
+
+
As a result of the field matching, we can see that some frames get duplicated.
+To perform a complete inverse telecine, you need to rely on a decimation filter
+after this operation. See for instance the decimate filter.
+
+
The same operation now matching from top fields (field =top )
+looks like this:
+
+
+
Input stream:
+ T 1 2 2 3 4 <-- matching reference
+ B 1 2 3 4 4
+
+Matches: c c p p c
+
+Output stream:
+ T 1 2 2 3 4
+ B 1 2 2 3 4
+
+
+
In these examples, we can see what p , c and n mean;
+basically, they refer to the frame and field of the opposite parity:
+
+
+ p matches the field of the opposite parity in the previous frame
+ c matches the field of the opposite parity in the current frame
+ n matches the field of the opposite parity in the next frame
+
+
+
+
28.31.1.2 u/b# TOC
+
+
The u and b matching are a bit special in the sense that they match
+from the opposite parity flag. In the following examples, we assume that we are
+currently matching the 2nd frame (Top:2, bottom:2). According to the match, a
+’x’ is placed above and below each matched fields.
+
+
With bottom matching (field =bottom ):
+
+
Match: c p n b u
+
+ x x x x x
+ Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
+ Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
+ x x x x x
+
+Output frames:
+ 2 1 2 2 2
+ 2 2 2 1 3
+
+
+
With top matching (field =top ):
+
+
Match: c p n b u
+
+ x x x x x
+ Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
+ Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
+ x x x x x
+
+Output frames:
+ 2 2 2 1 2
+ 2 1 3 2 2
+
+
+
+
28.31.2 Examples# TOC
+
+
Simple IVTC of a top field first telecined stream:
+
+
fieldmatch=order=tff:combmatch=none, decimate
+
+
+
Advanced IVTC, with fallback on yadif for still combed frames:
+
+
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
+
+
+
+
28.32 fieldorder# TOC
+
+
Transform the field order of the input video.
+
+
It accepts the following parameters:
+
+
+order
+The output field order. Valid values are tff for top field first or bff
+for bottom field first.
+
+
+
+
The default value is ‘tff ’.
+
+
The transformation is done by shifting the picture content up or down
+by one line, and filling the remaining line with appropriate picture content.
+This method is consistent with most broadcast field order converters.
+
+
If the input video is not flagged as being interlaced, or it is already
+flagged as being of the required output field order, then this filter does
+not alter the incoming video.
+
+
It is very useful when converting to or from PAL DV material,
+which is bottom field first.
+
+
For example:
+
+
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
+
+
+
+
28.33 fifo# TOC
+
+
Buffer input images and send them when they are requested.
+
+
It is mainly useful when auto-inserted by the libavfilter
+framework.
+
+
It does not take parameters.
+
+
+
28.34 format# TOC
+
+
Convert the input video to one of the specified pixel formats.
+Libavfilter will try to pick one that is suitable as input to
+the next filter.
+
+
It accepts the following parameters:
+
+pix_fmts
+A ’|’-separated list of pixel format names, such as
+"pix_fmts=yuv420p|monow|rgb24".
+
+
+
+
+
+
28.34.1 Examples# TOC
+
+
+
+
+
28.35 fps# TOC
+
+
Convert the video to specified constant frame rate by duplicating or dropping
+frames as necessary.
+
+
It accepts the following parameters:
+
+fps
+The desired output frame rate. The default is 25
.
+
+
+round
+Rounding method.
+
+Possible values are:
+
+zero
+zero round towards 0
+
+inf
+round away from 0
+
+down
+round towards -infinity
+
+up
+round towards +infinity
+
+near
+round to nearest
+
+
+The default is near
.
+
+
+start_time
+Assume the first PTS should be the given value, in seconds. This allows for
+padding/trimming at the start of stream. By default, no assumption is made
+about the first frame’s expected PTS, so no padding or trimming is done.
+For example, this could be set to 0 to pad the beginning with duplicates of
+the first frame if a video stream starts after the audio stream or to trim any
+frames with a negative PTS.
+
+
+
+
+
Alternatively, the options can be specified as a flat string:
+fps [:round ].
+
+
See also the setpts filter.
+
+
+
28.35.1 Examples# TOC
+
+
+ A typical usage in order to set the fps to 25:
+
+
+ Sets the fps to 24, using abbreviation and rounding method to round to nearest:
+
+
fps=fps=film:round=near
+
+
+
+
+
28.36 framepack# TOC
+
+
Pack two different video streams into a stereoscopic video, setting proper
+metadata on supported codecs. The two views should have the same size and
+framerate and processing will stop when the shorter video ends. Please note
+that you may conveniently adjust view properties with the scale and
+fps filters.
+
+
It accepts the following parameters:
+
+format
+The desired packing format. Supported values are:
+
+
+sbs
+The views are next to each other (default).
+
+
+tab
+The views are on top of each other.
+
+
+lines
+The views are packed by line.
+
+
+columns
+The views are packed by column.
+
+
+frameseq
+The views are temporally interleaved.
+
+
+
+
+
+
+
+
Some examples:
+
+
+
# Convert left and right views into a frame-sequential video
+ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
+
+# Convert views into a side-by-side video with the same output resolution as the input
+ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
+
+
+
+
28.37 framestep# TOC
+
+
Select one frame every N-th frame.
+
+
This filter accepts the following option:
+
+step
+Select frame after every step
frames.
+Allowed values are positive integers higher than 0. Default value is 1
.
+
+
+
+
+
28.38 frei0r# TOC
+
+
Apply a frei0r effect to the input video.
+
+
To enable the compilation of this filter, you need to install the frei0r
+header and configure FFmpeg with --enable-frei0r
.
+
+
It accepts the following parameters:
+
+
+filter_name
+The name of the frei0r effect to load. If the environment variable
+FREI0R_PATH
is defined, the frei0r effect is searched for in each of the
+directories specified by the colon-separated list in FREIOR_PATH
.
+Otherwise, the standard frei0r paths are searched, in this order:
+HOME/.frei0r-1/lib/ , /usr/local/lib/frei0r-1/ ,
+/usr/lib/frei0r-1/ .
+
+
+filter_params
+A ’|’-separated list of parameters to pass to the frei0r effect.
+
+
+
+
+
A frei0r effect parameter can be a boolean (its value is either
+"y" or "n"), a double, a color (specified as
+R /G /B , where R , G , and B are floating point
+numbers between 0.0 and 1.0, inclusive) or by a color description specified in the "Color"
+section in the ffmpeg-utils manual), a position (specified as X /Y , where
+X and Y are floating point numbers) and/or a string.
+
+
The number and types of parameters depend on the loaded effect. If an
+effect parameter is not specified, the default value is set.
+
+
+
28.38.1 Examples# TOC
+
+
+ Apply the distort0r effect, setting the first two double parameters:
+
+
frei0r=filter_name=distort0r:filter_params=0.5|0.01
+
+
+ Apply the colordistance effect, taking a color as the first parameter:
+
+
frei0r=colordistance:0.2/0.3/0.4
+frei0r=colordistance:violet
+frei0r=colordistance:0x112233
+
+
+ Apply the perspective effect, specifying the top left and top right image
+positions:
+
+
frei0r=perspective:0.2/0.2|0.8/0.2
+
+
+
+
For more information, see
+http://frei0r.dyne.org
+
+
+
28.39 fspp# TOC
+
+
Apply fast and simple postprocessing. It is a faster version of spp .
+
+
It splits (I)DCT into horizontal/vertical passes. Unlike the simple post-
+processing filter, one of them is performed once per block, not per pixel.
+This allows for much higher speed.
+
+
The filter accepts the following options:
+
+
+quality
+Set quality. This option defines the number of levels for averaging. It accepts
+an integer in the range 4-5. Default value is 4
.
+
+
+qp
+Force a constant quantization parameter. It accepts an integer in range 0-63.
+If not set, the filter will use the QP from the video stream (if available).
+
+
+strength
+Set filter strength. It accepts an integer in range -15 to 32. Lower values mean
+more details but also more artifacts, while higher values make the image smoother
+but also blurrier. Default value is 0
− PSNR optimal.
+
+
+use_bframe_qp
+Enable the use of the QP from the B-Frames if set to 1
. Using this
+option may cause flicker since the B-Frames have often larger QP. Default is
+0
(not enabled).
+
+
+
+
+
+
28.40 geq# TOC
+
+
The filter accepts the following options:
+
+
+lum_expr, lum
+Set the luminance expression.
+
+cb_expr, cb
+Set the chrominance blue expression.
+
+cr_expr, cr
+Set the chrominance red expression.
+
+alpha_expr, a
+Set the alpha expression.
+
+red_expr, r
+Set the red expression.
+
+green_expr, g
+Set the green expression.
+
+blue_expr, b
+Set the blue expression.
+
+
+
+
The colorspace is selected according to the specified options. If one
+of the lum_expr , cb_expr , or cr_expr
+options is specified, the filter will automatically select a YCbCr
+colorspace. If one of the red_expr , green_expr , or
+blue_expr options is specified, it will select an RGB
+colorspace.
+
+
If one of the chrominance expression is not defined, it falls back on the other
+one. If no alpha expression is specified it will evaluate to opaque value.
+If none of chrominance expressions are specified, they will evaluate
+to the luminance expression.
+
+
The expressions can use the following variables and functions:
+
+
+N
+The sequential number of the filtered frame, starting from 0
.
+
+
+X
+Y
+The coordinates of the current sample.
+
+
+W
+H
+The width and height of the image.
+
+
+SW
+SH
+Width and height scale depending on the currently filtered plane. It is the
+ratio between the corresponding luma plane number of pixels and the current
+plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
+0.5,0.5
for chroma planes.
+
+
+T
+Time of the current frame, expressed in seconds.
+
+
+p(x, y)
+Return the value of the pixel at location (x ,y ) of the current
+plane.
+
+
+lum(x, y)
+Return the value of the pixel at location (x ,y ) of the luminance
+plane.
+
+
+cb(x, y)
+Return the value of the pixel at location (x ,y ) of the
+blue-difference chroma plane. Return 0 if there is no such plane.
+
+
+cr(x, y)
+Return the value of the pixel at location (x ,y ) of the
+red-difference chroma plane. Return 0 if there is no such plane.
+
+
+r(x, y)
+g(x, y)
+b(x, y)
+Return the value of the pixel at location (x ,y ) of the
+red/green/blue component. Return 0 if there is no such component.
+
+
+alpha(x, y)
+Return the value of the pixel at location (x ,y ) of the alpha
+plane. Return 0 if there is no such plane.
+
+
+
+
For functions, if x and y are outside the area, the value will be
+automatically clipped to the closer edge.
+
+
+
28.40.1 Examples# TOC
+
+
+ Flip the image horizontally:
+
+
+ Generate a bidimensional sine wave, with angle PI/3
and a
+wavelength of 100 pixels:
+
+
geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
+
+
+ Generate a fancy enigmatic moving light:
+
+
nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
+
+
+ Generate a quick emboss effect:
+
+
format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
+
+
+ Modify RGB components depending on pixel position:
+
+
geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
+
+
+ Create a radial gradient that is the same size as the input (also see
+the vignette filter):
+
+
geq=lum=255*gauss((X/W-0.5)*3)*gauss((Y/H-0.5)*3)/gauss(0)/gauss(0),format=gray
+
+
+ Create a linear gradient to use as a mask for another filter, then
+compose with overlay . In this example the video will gradually
+become more blurry from the top to the bottom of the y-axis as defined
+by the linear gradient:
+
+
ffmpeg -i input.mp4 -filter_complex "geq=lum=255*(Y/H),format=gray[grad];[0:v]boxblur=4[blur];[blur][grad]alphamerge[alpha];[0:v][alpha]overlay" output.mp4
+
+
+
+
+
28.41 gradfun# TOC
+
+
Fix the banding artifacts that are sometimes introduced into nearly flat
+regions by truncation to 8bit color depth.
+Interpolate the gradients that should go where the bands are, and
+dither them.
+
+
It is designed for playback only. Do not use it prior to
+lossy compression, because compression tends to lose the dither and
+bring back the bands.
+
+
It accepts the following parameters:
+
+
+strength
+The maximum amount by which the filter will change any one pixel. This is also
+the threshold for detecting nearly flat regions. Acceptable values range from
+.51 to 64; the default value is 1.2. Out-of-range values will be clipped to the
+valid range.
+
+
+radius
+The neighborhood to fit the gradient to. A larger radius makes for smoother
+gradients, but also prevents the filter from modifying the pixels near detailed
+regions. Acceptable values are 8-32; the default value is 16. Out-of-range
+values will be clipped to the valid range.
+
+
+
+
+
Alternatively, the options can be specified as a flat string:
+strength [:radius ]
+
+
+
28.41.1 Examples# TOC
+
+
+ Apply the filter with a 3.5
strength and radius of 8
:
+
+
+ Specify radius, omitting the strength (which will fall-back to the default
+value):
+
+
+
+
+
+
28.42 haldclut# TOC
+
+
Apply a Hald CLUT to a video stream.
+
+
First input is the video stream to process, and second one is the Hald CLUT.
+The Hald CLUT input can be a simple picture or a complete video stream.
+
+
The filter accepts the following options:
+
+
+shortest
+Force termination when the shortest input terminates. Default is 0
.
+
+repeatlast
+Continue applying the last CLUT after the end of the stream. A value of
+0
disable the filter after the last frame of the CLUT is reached.
+Default is 1
.
+
+
+
+
haldclut
also has the same interpolation options as lut3d (both
+filters share the same internals).
+
+
More information about the Hald CLUT can be found on Eskil Steenberg’s website
+(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html .
+
+
+
28.42.1 Workflow examples# TOC
+
+
+
28.42.1.1 Hald CLUT video stream# TOC
+
+
Generate an identity Hald CLUT stream altered with various effects:
+
+
ffmpeg -f lavfi -i haldclutsrc =8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
+
+
+
Note: make sure you use a lossless codec.
+
+
Then use it with haldclut
to apply it on some random stream:
+
+
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
+
+
+
The Hald CLUT will be applied to the 10 first seconds (duration of
+clut.nut ), then the latest picture of that CLUT stream will be applied
+to the remaining frames of the mandelbrot
stream.
+
+
+
28.42.1.2 Hald CLUT with preview# TOC
+
+
A Hald CLUT is supposed to be a squared image of Level*Level*Level
by
+Level*Level*Level
pixels. For a given Hald CLUT, FFmpeg will select the
+biggest possible square starting at the top left of the picture. The remaining
+padding pixels (bottom or right) will be ignored. This area can be used to add
+a preview of the Hald CLUT.
+
+
Typically, the following generated Hald CLUT will be supported by the
+haldclut
filter:
+
+
+
ffmpeg -f lavfi -i haldclutsrc =8 -vf "
+ pad=iw+320 [padded_clut];
+ smptebars=s=320x256, split [a][b];
+ [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
+ [main][b] overlay=W-320" -frames:v 1 clut.png
+
+
+
It contains the original and a preview of the effect of the CLUT: SMPTE color
+bars are displayed on the right-top, and below the same color bars processed by
+the color changes.
+
+
Then, the effect of this Hald CLUT can be visualized with:
+
+
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
+
+
+
+
28.43 hflip# TOC
+
+
Flip the input video horizontally.
+
+
For example, to horizontally flip the input video with ffmpeg
:
+
+
ffmpeg -i in.avi -vf "hflip" out.avi
+
+
+
+
28.44 histeq# TOC
+
This filter applies a global color histogram equalization on a
+per-frame basis.
+
+
It can be used to correct video that has a compressed range of pixel
+intensities. The filter redistributes the pixel intensities to
+equalize their distribution across the intensity range. It may be
+viewed as an "automatically adjusting contrast filter". This filter is
+useful only for correcting degraded or poorly captured source
+video.
+
+
The filter accepts the following options:
+
+
+strength
+Determine the amount of equalization to be applied. As the strength
+is reduced, the distribution of pixel intensities more-and-more
+approaches that of the input frame. The value must be a float number
+in the range [0,1] and defaults to 0.200.
+
+
+intensity
+Set the maximum intensity that can generated and scale the output
+values appropriately. The strength should be set as desired and then
+the intensity can be limited if needed to avoid washing-out. The value
+must be a float number in the range [0,1] and defaults to 0.210.
+
+
+antibanding
+Set the antibanding level. If enabled the filter will randomly vary
+the luminance of output pixels by a small amount to avoid banding of
+the histogram. Possible values are none
, weak
or
+strong
. It defaults to none
.
+
+
+
+
+
28.45 histogram# TOC
+
+
Compute and draw a color distribution histogram for the input video.
+
+
The computed histogram is a representation of the color component
+distribution in an image.
+
+
The filter accepts the following options:
+
+
+mode
+Set histogram mode.
+
+It accepts the following values:
+
+‘levels ’
+Standard histogram that displays the color components distribution in an
+image. Displays color graph for each color component. Shows distribution of
+the Y, U, V, A or R, G, B components, depending on input format, in the
+current frame. Below each graph a color component scale meter is shown.
+
+
+‘color ’
+Displays chroma values (U/V color placement) in a two dimensional
+graph (which is called a vectorscope). The brighter a pixel in the
+vectorscope, the more pixels of the input frame correspond to that pixel
+(i.e., more pixels have this chroma value). The V component is displayed on
+the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost
+side being V = 255. The U component is displayed on the vertical (Y) axis,
+with the top representing U = 0 and the bottom representing U = 255.
+
+The position of a white pixel in the graph corresponds to the chroma value of
+a pixel of the input clip. The graph can therefore be used to read the hue
+(color flavor) and the saturation (the dominance of the hue in the color). As
+the hue of a color changes, it moves around the square. At the center of the
+square the saturation is zero, which means that the corresponding pixel has no
+color. If the amount of a specific color is increased (while leaving the other
+colors unchanged) the saturation increases, and the indicator moves towards
+the edge of the square.
+
+
+‘color2 ’
+Chroma values in vectorscope, similar as color
but actual chroma values
+are displayed.
+
+
+‘waveform ’
+Per row/column color component graph. In row mode, the graph on the left side
+represents color component value 0 and the right side represents value = 255.
+In column mode, the top side represents color component value = 0 and bottom
+side represents value = 255.
+
+
+Default value is levels
.
+
+
+level_height
+Set height of level in levels
. Default value is 200
.
+Allowed range is [50, 2048].
+
+
+scale_height
+Set height of color scale in levels
. Default value is 12
.
+Allowed range is [0, 40].
+
+
+step
+Set step for waveform
mode. Smaller values are useful to find out how
+many values of the same luminance are distributed across input rows/columns.
+Default value is 10
. Allowed range is [1, 255].
+
+
+waveform_mode
+Set mode for waveform
. Can be either row
, or column
.
+Default is row
.
+
+
+waveform_mirror
+Set mirroring mode for waveform
. 0
means unmirrored, 1
+means mirrored. In mirrored mode, higher values will be represented on the left
+side for row
mode and at the top for column
mode. Default is
+0
(unmirrored).
+
+
+display_mode
+Set display mode for waveform
and levels
.
+It accepts the following values:
+
+‘parade ’
+Display separate graph for the color components side by side in
+row
waveform mode or one below the other in column
waveform mode
+for waveform
histogram mode. For levels
histogram mode,
+per color component graphs are placed below each other.
+
+Using this display mode in waveform
histogram mode makes it easy to
+spot color casts in the highlights and shadows of an image, by comparing the
+contours of the top and the bottom graphs of each waveform. Since whites,
+grays, and blacks are characterized by exactly equal amounts of red, green,
+and blue, neutral areas of the picture should display three waveforms of
+roughly equal width/height. If not, the correction is easy to perform by
+making level adjustments the three waveforms.
+
+
+‘overlay ’
+Presents information identical to that in the parade
, except
+that the graphs representing color components are superimposed directly
+over one another.
+
+This display mode in waveform
histogram mode makes it easier to spot
+relative differences or similarities in overlapping areas of the color
+components that are supposed to be identical, such as neutral whites, grays,
+or blacks.
+
+
+Default is parade
.
+
+
+levels_mode
+Set mode for levels
. Can be either linear
, or logarithmic
.
+Default is linear
.
+
+
+
+
+
28.45.1 Examples# TOC
+
+
+ Calculate and draw histogram:
+
+
ffplay -i input -vf histogram
+
+
+
+
+
+
28.46 hqdn3d# TOC
+
+
This is a high precision/quality 3d denoise filter. It aims to reduce
+image noise, producing smooth images and making still images really
+still. It should enhance compressibility.
+
+
It accepts the following optional parameters:
+
+
+luma_spatial
+A non-negative floating point number which specifies spatial luma strength.
+It defaults to 4.0.
+
+
+chroma_spatial
+A non-negative floating point number which specifies spatial chroma strength.
+It defaults to 3.0*luma_spatial /4.0.
+
+
+luma_tmp
+A floating point number which specifies luma temporal strength. It defaults to
+6.0*luma_spatial /4.0.
+
+
+chroma_tmp
+A floating point number which specifies chroma temporal strength. It defaults to
+luma_tmp *chroma_spatial /luma_spatial .
+
+
+
+
+
28.47 hqx# TOC
+
+
Apply a high-quality magnification filter designed for pixel art. This filter
+was originally created by Maxim Stepin.
+
+
It accepts the following option:
+
+
+n
+Set the scaling dimension: 2
for hq2x
, 3
for
+hq3x
and 4
for hq4x
.
+Default is 3
.
+
+
+
+
+
28.48 hue# TOC
+
+
Modify the hue and/or the saturation of the input.
+
+
It accepts the following parameters:
+
+
+h
+Specify the hue angle as a number of degrees. It accepts an expression,
+and defaults to "0".
+
+
+s
+Specify the saturation in the [-10,10] range. It accepts an expression and
+defaults to "1".
+
+
+H
+Specify the hue angle as a number of radians. It accepts an
+expression, and defaults to "0".
+
+
+b
+Specify the brightness in the [-10,10] range. It accepts an expression and
+defaults to "0".
+
+
+
+
h and H are mutually exclusive, and can’t be
+specified at the same time.
+
+
The b , h , H and s option values are
+expressions containing the following constants:
+
+
+n
+frame count of the input frame starting from 0
+
+
+pts
+presentation timestamp of the input frame expressed in time base units
+
+
+r
+frame rate of the input video, NAN if the input frame rate is unknown
+
+
+t
+timestamp expressed in seconds, NAN if the input timestamp is unknown
+
+
+tb
+time base of the input video
+
+
+
+
+
28.48.1 Examples# TOC
+
+
+
+
+
28.48.2 Commands# TOC
+
+
This filter supports the following commands:
+
+b
+s
+h
+H
+Modify the hue and/or the saturation and/or brightness of the input video.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+
+
+
+
28.49 idet# TOC
+
+
Detect video interlacing type.
+
+
This filter tries to detect if the input frames as interlaced, progressive,
+top or bottom field first. It will also try and detect fields that are
+repeated between adjacent frames (a sign of telecine).
+
+
Single frame detection considers only immediately adjacent frames when classifying each frame.
+Multiple frame detection incorporates the classification history of previous frames.
+
+
The filter will log these metadata values:
+
+
+single.current_frame
+Detected type of current frame using single-frame detection. One of:
+“tff” (top field first), “bff” (bottom field first),
+“progressive”, or “undetermined”
+
+
+single.tff
+Cumulative number of frames detected as top field first using single-frame detection.
+
+
+multiple.tff
+Cumulative number of frames detected as top field first using multiple-frame detection.
+
+
+single.bff
+Cumulative number of frames detected as bottom field first using single-frame detection.
+
+
+multiple.current_frame
+Detected type of current frame using multiple-frame detection. One of:
+“tff” (top field first), “bff” (bottom field first),
+“progressive”, or “undetermined”
+
+
+multiple.bff
+Cumulative number of frames detected as bottom field first using multiple-frame detection.
+
+
+single.progressive
+Cumulative number of frames detected as progressive using single-frame detection.
+
+
+multiple.progressive
+Cumulative number of frames detected as progressive using multiple-frame detection.
+
+
+single.undetermined
+Cumulative number of frames that could not be classified using single-frame detection.
+
+
+multiple.undetermined
+Cumulative number of frames that could not be classified using multiple-frame detection.
+
+
+repeated.current_frame
+Which field in the current frame is repeated from the last. One of “neither”, “top”, or “bottom”.
+
+
+repeated.neither
+Cumulative number of frames with no repeated field.
+
+
+repeated.top
+Cumulative number of frames with the top field repeated from the previous frame’s top field.
+
+
+repeated.bottom
+Cumulative number of frames with the bottom field repeated from the previous frame’s bottom field.
+
+
+
+
The filter accepts the following options:
+
+
+intl_thres
+Set interlacing threshold.
+
+prog_thres
+Set progressive threshold.
+
+repeat_thres
+Threshold for repeated field detection.
+
+half_life
+Number of frames after which a given frame’s contribution to the
+statistics is halved (i.e., it contributes only 0.5 to it’s
+classification). The default of 0 means that all frames seen are given
+full weight of 1.0 forever.
+
+analyze_interlaced_flag
+When this is not 0 then idet will use the specified number of frames to determine
+if the interlaced flag is accurate, it will not count undetermined frames.
+If the flag is found to be accurate it will be used without any further
+computations, if it is found to be inaccuarte it will be cleared without any
+further computations. This allows inserting the idet filter as a low computational
+method to clean up the interlaced flag
+
+
+
+
+
28.50 il# TOC
+
+
Deinterleave or interleave fields.
+
+
This filter allows one to process interlaced images fields without
+deinterlacing them. Deinterleaving splits the input frame into 2
+fields (so called half pictures). Odd lines are moved to the top
+half of the output image, even lines to the bottom half.
+You can process (filter) them independently and then re-interleave them.
+
+
The filter accepts the following options:
+
+
+luma_mode, l
+chroma_mode, c
+alpha_mode, a
+Available values for luma_mode , chroma_mode and
+alpha_mode are:
+
+
+‘none ’
+Do nothing.
+
+
+‘deinterleave, d ’
+Deinterleave fields, placing one above the other.
+
+
+‘interleave, i ’
+Interleave fields. Reverse the effect of deinterleaving.
+
+
+Default value is none
.
+
+
+luma_swap, ls
+chroma_swap, cs
+alpha_swap, as
+Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0
.
+
+
+
+
+
28.51 interlace# TOC
+
+
Simple interlacing filter from progressive contents. This interleaves upper (or
+lower) lines from odd frames with lower (or upper) lines from even frames,
+halving the frame rate and preserving image height.
+
+
+
Original Original New Frame
+ Frame 'j' Frame 'j+1' (tff)
+ ========== =========== ==================
+ Line 0 --------------------> Frame 'j' Line 0
+ Line 1 Line 1 ----> Frame 'j+1' Line 1
+ Line 2 ---------------------> Frame 'j' Line 2
+ Line 3 Line 3 ----> Frame 'j+1' Line 3
+ ... ... ...
+New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
+
+
+
It accepts the following optional parameters:
+
+
+scan
+This determines whether the interlaced frame is taken from the even
+(tff - default) or odd (bff) lines of the progressive frame.
+
+
+lowpass
+Enable (default) or disable the vertical lowpass filter to avoid twitter
+interlacing and reduce moire patterns.
+
+
+
+
+
28.52 kerndeint# TOC
+
+
Deinterlace input video by applying Donald Graft’s adaptive kernel
+deinterling. Work on interlaced parts of a video to produce
+progressive frames.
+
+
The description of the accepted parameters follows.
+
+
+thresh
+Set the threshold which affects the filter’s tolerance when
+determining if a pixel line must be processed. It must be an integer
+in the range [0,255] and defaults to 10. A value of 0 will result in
+applying the process on every pixels.
+
+
+map
+Paint pixels exceeding the threshold value to white if set to 1.
+Default is 0.
+
+
+order
+Set the fields order. Swap fields if set to 1, leave fields alone if
+0. Default is 0.
+
+
+sharp
+Enable additional sharpening if set to 1. Default is 0.
+
+
+twoway
+Enable twoway sharpening if set to 1. Default is 0.
+
+
+
+
+
28.52.1 Examples# TOC
+
+
+ Apply default values:
+
+
kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
+
+
+ Enable additional sharpening:
+
+
+ Paint processed pixels in white:
+
+
+
+
+
28.53 lenscorrection# TOC
+
+
Correct radial lens distortion
+
+
This filter can be used to correct for radial distortion as can result from the use
+of wide angle lenses, and thereby re-rectify the image. To find the right parameters
+one can use tools available for example as part of opencv or simply trial-and-error.
+To use opencv use the calibration sample (under samples/cpp) from the opencv sources
+and extract the k1 and k2 coefficients from the resulting matrix.
+
+
Note that effectively the same filter is available in the open-source tools Krita and
+Digikam from the KDE project.
+
+
In contrast to the vignette filter, which can also be used to compensate lens errors,
+this filter corrects the distortion of the image, whereas vignette corrects the
+brightness distribution, so you may want to use both filters together in certain
+cases, though you will have to take care of ordering, i.e. whether vignetting should
+be applied before or after lens correction.
+
+
+
28.53.1 Options# TOC
+
+
The filter accepts the following options:
+
+
+cx
+Relative x-coordinate of the focal point of the image, and thereby the center of the
+distortion. This value has a range [0,1] and is expressed as fractions of the image
+width.
+
+cy
+Relative y-coordinate of the focal point of the image, and thereby the center of the
+distortion. This value has a range [0,1] and is expressed as fractions of the image
+height.
+
+k1
+Coefficient of the quadratic correction term. 0.5 means no correction.
+
+k2
+Coefficient of the double quadratic correction term. 0.5 means no correction.
+
+
+
+
The formula that generates the correction is:
+
+
r_src = r_tgt * (1 + k1 * (r_tgt / r_0 )^2 + k2 * (r_tgt / r_0 )^4)
+
+
where r_0 is halve of the image diagonal and r_src and r_tgt are the
+distances from the focal point in the source and target images, respectively.
+
+
+
28.54 lut3d# TOC
+
+
Apply a 3D LUT to an input video.
+
+
The filter accepts the following options:
+
+
+file
+Set the 3D LUT file name.
+
+Currently supported formats:
+
+‘3dl ’
+AfterEffects
+
+‘cube ’
+Iridas
+
+‘dat ’
+DaVinci
+
+‘m3d ’
+Pandora
+
+
+
+interp
+Select interpolation mode.
+
+Available values are:
+
+
+‘nearest ’
+Use values from the nearest defined point.
+
+‘trilinear ’
+Interpolate values using the 8 points defining a cube.
+
+‘tetrahedral ’
+Interpolate values using a tetrahedron.
+
+
+
+
+
+
+
28.55 lut, lutrgb, lutyuv# TOC
+
+
Compute a look-up table for binding each pixel component input value
+to an output value, and apply it to the input video.
+
+
lutyuv applies a lookup table to a YUV input video, lutrgb
+to an RGB input video.
+
+
These filters accept the following parameters:
+
+c0
+set first pixel component expression
+
+c1
+set second pixel component expression
+
+c2
+set third pixel component expression
+
+c3
+set fourth pixel component expression, corresponds to the alpha component
+
+
+r
+set red component expression
+
+g
+set green component expression
+
+b
+set blue component expression
+
+a
+alpha component expression
+
+
+y
+set Y/luminance component expression
+
+u
+set U/Cb component expression
+
+v
+set V/Cr component expression
+
+
+
+
Each of them specifies the expression to use for computing the lookup table for
+the corresponding pixel component values.
+
+
The exact component associated to each of the c* options depends on the
+format in input.
+
+
The lut filter requires either YUV or RGB pixel formats in input,
+lutrgb requires RGB pixel formats in input, and lutyuv requires YUV.
+
+
The expressions can contain the following constants and functions:
+
+
+w
+h
+The input width and height.
+
+
+val
+The input value for the pixel component.
+
+
+clipval
+The input value, clipped to the minval -maxval range.
+
+
+maxval
+The maximum value for the pixel component.
+
+
+minval
+The minimum value for the pixel component.
+
+
+negval
+The negated value for the pixel component value, clipped to the
+minval -maxval range; it corresponds to the expression
+"maxval-clipval+minval".
+
+
+clip(val)
+The computed value in val , clipped to the
+minval -maxval range.
+
+
+gammaval(gamma)
+The computed gamma correction value of the pixel component value,
+clipped to the minval -maxval range. It corresponds to the
+expression
+"pow((clipval-minval)/(maxval-minval)\,gamma )*(maxval-minval)+minval"
+
+
+
+
+
All expressions default to "val".
+
+
+
28.55.1 Examples# TOC
+
+
+
+
+
28.56 mergeplanes# TOC
+
+
Merge color channel components from several video streams.
+
+
The filter accepts up to 4 input streams, and merge selected input
+planes to the output video.
+
+
This filter accepts the following options:
+
+mapping
+Set input to output plane mapping. Default is 0
.
+
+The mappings is specified as a bitmap. It should be specified as a
+hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the
+mapping for the first plane of the output stream. ’A’ sets the number of
+the input stream to use (from 0 to 3), and ’a’ the plane number of the
+corresponding input to use (from 0 to 3). The rest of the mappings is
+similar, ’Bb’ describes the mapping for the output stream second
+plane, ’Cc’ describes the mapping for the output stream third plane and
+’Dd’ describes the mapping for the output stream fourth plane.
+
+
+format
+Set output pixel format. Default is yuva444p
.
+
+
+
+
+
28.56.1 Examples# TOC
+
+
+ Merge three gray video streams of same width and height into single video stream:
+
+
[a0][a1][a2]mergeplanes=0x001020:yuv444p
+
+
+ Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream:
+
+
[a0][a1]mergeplanes=0x00010210:yuva444p
+
+
+ Swap Y and A plane in yuva444p stream:
+
+
format=yuva444p,mergeplanes=0x03010200:yuva444p
+
+
+ Swap U and V plane in yuv420p stream:
+
+
format=yuv420p,mergeplanes=0x000201:yuv420p
+
+
+ Cast a rgb24 clip to yuv444p:
+
+
format=rgb24,mergeplanes=0x000102:yuv444p
+
+
+
+
+
28.57 mcdeint# TOC
+
+
Apply motion-compensation deinterlacing.
+
+
It needs one field per frame as input and must thus be used together
+with yadif=1/3 or equivalent.
+
+
This filter accepts the following options:
+
+mode
+Set the deinterlacing mode.
+
+It accepts one of the following values:
+
+‘fast ’
+‘medium ’
+‘slow ’
+use iterative motion estimation
+
+‘extra_slow ’
+like ‘slow ’, but use multiple reference frames.
+
+
+Default value is ‘fast ’.
+
+
+parity
+Set the picture field parity assumed for the input video. It must be
+one of the following values:
+
+
+‘0, tff ’
+assume top field first
+
+‘1, bff ’
+assume bottom field first
+
+
+
+Default value is ‘bff ’.
+
+
+qp
+Set per-block quantization parameter (QP) used by the internal
+encoder.
+
+Higher values should result in a smoother motion vector field but less
+optimal individual vectors. Default value is 1.
+
+
+
+
+
28.58 mp# TOC
+
+
Apply an MPlayer filter to the input video.
+
+
This filter provides a wrapper around some of the filters of
+MPlayer/MEncoder.
+
+
This wrapper is considered experimental. Some of the wrapped filters
+may not work properly and we may drop support for them, as they will
+be implemented natively into FFmpeg. Thus you should avoid
+depending on them when writing portable scripts.
+
+
The filter accepts the parameters:
+filter_name [:=]filter_params
+
+
filter_name is the name of a supported MPlayer filter,
+filter_params is a string containing the parameters accepted by
+the named filter.
+
+
The list of the currently supported filters follows:
+
+eq2
+eq
+ilpack
+softpulldown
+
+
+
The parameter syntax and behavior for the listed filters are the same
+of the corresponding MPlayer filters. For detailed instructions check
+the "VIDEO FILTERS" section in the MPlayer manual.
+
+
+
28.58.1 Examples# TOC
+
+
+ Adjust gamma, brightness, contrast:
+
+
+
+
See also mplayer(1), http://www.mplayerhq.hu/ .
+
+
+
28.59 mpdecimate# TOC
+
+
Drop frames that do not differ greatly from the previous frame in
+order to reduce frame rate.
+
+
The main use of this filter is for very-low-bitrate encoding
+(e.g. streaming over dialup modem), but it could in theory be used for
+fixing movies that were inverse-telecined incorrectly.
+
+
A description of the accepted options follows.
+
+
+max
+Set the maximum number of consecutive frames which can be dropped (if
+positive), or the minimum interval between dropped frames (if
+negative). If the value is 0, the frame is dropped unregarding the
+number of previous sequentially dropped frames.
+
+Default value is 0.
+
+
+hi
+lo
+frac
+Set the dropping threshold values.
+
+Values for hi and lo are for 8x8 pixel blocks and
+represent actual pixel value differences, so a threshold of 64
+corresponds to 1 unit of difference for each pixel, or the same spread
+out differently over the block.
+
+A frame is a candidate for dropping if no 8x8 blocks differ by more
+than a threshold of hi , and if no more than frac blocks (1
+meaning the whole image) differ by more than a threshold of lo .
+
+Default value for hi is 64*12, default value for lo is
+64*5, and default value for frac is 0.33.
+
+
+
+
+
+
28.60 negate# TOC
+
+
Negate input video.
+
+
It accepts an integer in input; if non-zero it negates the
+alpha component (if available). The default value in input is 0.
+
+
+
28.61 noformat# TOC
+
+
Force libavfilter not to use any of the specified pixel formats for the
+input to the next filter.
+
+
It accepts the following parameters:
+
+pix_fmts
+A ’|’-separated list of pixel format names, such as
+apix_fmts=yuv420p|monow|rgb24".
+
+
+
+
+
+
28.61.1 Examples# TOC
+
+
+ Force libavfilter to use a format different from yuv420p for the
+input to the vflip filter:
+
+
noformat=pix_fmts=yuv420p,vflip
+
+
+ Convert the input video to any of the formats not contained in the list:
+
+
noformat=yuv420p|yuv444p|yuv410p
+
+
+
+
+
28.62 noise# TOC
+
+
Add noise on video input frame.
+
+
The filter accepts the following options:
+
+
+all_seed
+c0_seed
+c1_seed
+c2_seed
+c3_seed
+Set noise seed for specific pixel component or all pixel components in case
+of all_seed . Default value is 123457
.
+
+
+all_strength, alls
+c0_strength, c0s
+c1_strength, c1s
+c2_strength, c2s
+c3_strength, c3s
+Set noise strength for specific pixel component or all pixel components in case
+all_strength . Default value is 0
. Allowed range is [0, 100].
+
+
+all_flags, allf
+c0_flags, c0f
+c1_flags, c1f
+c2_flags, c2f
+c3_flags, c3f
+Set pixel component flags or set flags for all components if all_flags .
+Available values for component flags are:
+
+‘a ’
+averaged temporal noise (smoother)
+
+‘p ’
+mix random noise with a (semi)regular pattern
+
+‘t ’
+temporal noise (noise pattern changes between frames)
+
+‘u ’
+uniform noise (gaussian otherwise)
+
+
+
+
+
+
+
28.62.1 Examples# TOC
+
+
Add temporal and uniform noise to input video:
+
+
noise=alls=20:allf=t+u
+
+
+
+
28.63 null# TOC
+
+
Pass the video source unchanged to the output.
+
+
+
28.64 ocv# TOC
+
+
Apply a video transform using libopencv.
+
+
To enable this filter, install the libopencv library and headers and
+configure FFmpeg with --enable-libopencv
.
+
+
It accepts the following parameters:
+
+
+filter_name
+The name of the libopencv filter to apply.
+
+
+filter_params
+The parameters to pass to the libopencv filter. If not specified, the default
+values are assumed.
+
+
+
+
+
Refer to the official libopencv documentation for more precise
+information:
+http://docs.opencv.org/master/modules/imgproc/doc/filtering.html
+
+
Several libopencv filters are supported; see the following subsections.
+
+
+
28.64.1 dilate# TOC
+
+
Dilate an image by using a specific structuring element.
+It corresponds to the libopencv function cvDilate
.
+
+
It accepts the parameters: struct_el |nb_iterations .
+
+
struct_el represents a structuring element, and has the syntax:
+cols xrows +anchor_x xanchor_y /shape
+
+
cols and rows represent the number of columns and rows of
+the structuring element, anchor_x and anchor_y the anchor
+point, and shape the shape for the structuring element. shape
+must be "rect", "cross", "ellipse", or "custom".
+
+
If the value for shape is "custom", it must be followed by a
+string of the form "=filename ". The file with name
+filename is assumed to represent a binary image, with each
+printable character corresponding to a bright pixel. When a custom
+shape is used, cols and rows are ignored, the number
+or columns and rows of the read file are assumed instead.
+
+
The default value for struct_el is "3x3+0x0/rect".
+
+
nb_iterations specifies the number of times the transform is
+applied to the image, and defaults to 1.
+
+
Some examples:
+
+
# Use the default values
+ocv=dilate
+
+# Dilate using a structuring element with a 5x5 cross, iterating two times
+ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
+
+# Read the shape from the file diamond.shape, iterating two times.
+# The file diamond.shape may contain a pattern of characters like this
+# *
+# ***
+# *****
+# ***
+# *
+# The specified columns and rows are ignored
+# but the anchor point coordinates are not
+ocv=dilate:0x0+2x2/custom=diamond.shape|2
+
+
+
+
28.64.2 erode# TOC
+
+
Erode an image by using a specific structuring element.
+It corresponds to the libopencv function cvErode
.
+
+
It accepts the parameters: struct_el :nb_iterations ,
+with the same syntax and semantics as the dilate filter.
+
+
+
28.64.3 smooth# TOC
+
+
Smooth the input video.
+
+
The filter takes the following parameters:
+type |param1 |param2 |param3 |param4 .
+
+
type is the type of smooth filter to apply, and must be one of
+the following values: "blur", "blur_no_scale", "median", "gaussian",
+or "bilateral". The default value is "gaussian".
+
+
The meaning of param1 , param2 , param3 , and param4
+depend on the smooth type. param1 and
+param2 accept integer positive values or 0. param3 and
+param4 accept floating point values.
+
+
The default value for param1 is 3. The default value for the
+other parameters is 0.
+
+
These parameters correspond to the parameters assigned to the
+libopencv function cvSmooth
.
+
+
+
28.65 overlay# TOC
+
+
Overlay one video on top of another.
+
+
It takes two inputs and has one output. The first input is the "main"
+video on which the second input is overlaid.
+
+
It accepts the following parameters:
+
+
A description of the accepted options follows.
+
+
+x
+y
+Set the expression for the x and y coordinates of the overlaid video
+on the main video. Default value is "0" for both expressions. In case
+the expression is invalid, it is set to a huge value (meaning that the
+overlay will not be displayed within the output visible area).
+
+
+eof_action
+The action to take when EOF is encountered on the secondary input; it accepts
+one of the following values:
+
+
+repeat
+Repeat the last frame (the default).
+
+endall
+End both streams.
+
+pass
+Pass the main input through.
+
+
+
+
+eval
+Set when the expressions for x , and y are evaluated.
+
+It accepts the following values:
+
+‘init ’
+only evaluate expressions once during the filter initialization or
+when a command is processed
+
+
+‘frame ’
+evaluate expressions for each incoming frame
+
+
+
+Default value is ‘frame ’.
+
+
+shortest
+If set to 1, force the output to terminate when the shortest input
+terminates. Default value is 0.
+
+
+format
+Set the format for the output video.
+
+It accepts the following values:
+
+‘yuv420 ’
+force YUV420 output
+
+
+‘yuv422 ’
+force YUV422 output
+
+
+‘yuv444 ’
+force YUV444 output
+
+
+‘rgb ’
+force RGB output
+
+
+
+Default value is ‘yuv420 ’.
+
+
+rgb (deprecated)
+If set to 1, force the filter to accept inputs in the RGB
+color space. Default value is 0. This option is deprecated, use
+format instead.
+
+
+repeatlast
+If set to 1, force the filter to draw the last overlay frame over the
+main input until the end of the stream. A value of 0 disables this
+behavior. Default value is 1.
+
+
+
+
The x , and y expressions can contain the following
+parameters.
+
+
+main_w, W
+main_h, H
+The main input width and height.
+
+
+overlay_w, w
+overlay_h, h
+The overlay input width and height.
+
+
+x
+y
+The computed values for x and y . They are evaluated for
+each new frame.
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values of the output
+format. For example for the pixel format "yuv422p" hsub is 2 and
+vsub is 1.
+
+
+n
+the number of input frame, starting from 0
+
+
+pos
+the position in the file of the input frame, NAN if unknown
+
+
+t
+The timestamp, expressed in seconds. It’s NAN if the input timestamp is unknown.
+
+
+
+
+
Note that the n , pos , t variables are available only
+when evaluation is done per frame , and will evaluate to NAN
+when eval is set to ‘init ’.
+
+
Be aware that frames are taken from each input video in timestamp
+order, hence, if their initial timestamps differ, it is a good idea
+to pass the two inputs through a setpts=PTS-STARTPTS filter to
+have them begin in the same zero timestamp, as the example for
+the movie filter does.
+
+
You can chain together more overlays but you should test the
+efficiency of such approach.
+
+
+
28.65.1 Commands# TOC
+
+
This filter supports the following commands:
+
+x
+y
+Modify the x and y of the overlay input.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+
+
+
+
28.65.2 Examples# TOC
+
+
+
+
+
28.66 owdenoise# TOC
+
+
Apply Overcomplete Wavelet denoiser.
+
+
The filter accepts the following options:
+
+
+depth
+Set depth.
+
+Larger depth values will denoise lower frequency components more, but
+slow down filtering.
+
+Must be an int in the range 8-16, default is 8
.
+
+
+luma_strength, ls
+Set luma strength.
+
+Must be a double value in the range 0-1000, default is 1.0
.
+
+
+chroma_strength, cs
+Set chroma strength.
+
+Must be a double value in the range 0-1000, default is 1.0
.
+
+
+
+
+
28.67 pad# TOC
+
+
Add paddings to the input image, and place the original input at the
+provided x , y coordinates.
+
+
It accepts the following parameters:
+
+
+width, w
+height, h
+Specify an expression for the size of the output image with the
+paddings added. If the value for width or height is 0, the
+corresponding input size is used for the output.
+
+The width expression can reference the value set by the
+height expression, and vice versa.
+
+The default value of width and height is 0.
+
+
+x
+y
+Specify the offsets to place the input image at within the padded area,
+with respect to the top/left border of the output image.
+
+The x expression can reference the value set by the y
+expression, and vice versa.
+
+The default value of x and y is 0.
+
+
+color
+Specify the color of the padded area. For the syntax of this option,
+check the "Color" section in the ffmpeg-utils manual.
+
+The default value of color is "black".
+
+
+
+
The value for the width , height , x , and y
+options are expressions containing the following constants:
+
+
+in_w
+in_h
+The input video width and height.
+
+
+iw
+ih
+These are the same as in_w and in_h .
+
+
+out_w
+out_h
+The output width and height (the size of the padded area), as
+specified by the width and height expressions.
+
+
+ow
+oh
+These are the same as out_w and out_h .
+
+
+x
+y
+The x and y offsets as specified by the x and y
+expressions, or NAN if not yet specified.
+
+
+a
+same as iw / ih
+
+
+sar
+input sample aspect ratio
+
+
+dar
+input display aspect ratio, it is the same as (iw / ih ) * sar
+
+
+hsub
+vsub
+The horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+
+
+
28.67.1 Examples# TOC
+
+
+
+
+
28.68 perspective# TOC
+
+
Correct perspective of video not recorded perpendicular to the screen.
+
+
A description of the accepted parameters follows.
+
+
+x0
+y0
+x1
+y1
+x2
+y2
+x3
+y3
+Set coordinates expression for top left, top right, bottom left and bottom right corners.
+Default values are 0:0:W:0:0:H:W:H
with which perspective will remain unchanged.
+If the sense
option is set to source
, then the specified points will be sent
+to the corners of the destination. If the sense
option is set to destination
,
+then the corners of the source will be sent to the specified coordinates.
+
+The expressions can use the following variables:
+
+
+W
+H
+the width and height of video frame.
+
+
+
+
+interpolation
+Set interpolation for perspective correction.
+
+It accepts the following values:
+
+‘linear ’
+‘cubic ’
+
+
+Default value is ‘linear ’.
+
+
+sense
+Set interpretation of coordinate options.
+
+It accepts the following values:
+
+‘0, source ’
+
+Send point in the source specified by the given coordinates to
+the corners of the destination.
+
+
+‘1, destination ’
+
+Send the corners of the source to the point in the destination specified
+by the given coordinates.
+
+Default value is ‘source ’.
+
+
+
+
+
+
+
28.69 phase# TOC
+
+
Delay interlaced video by one field time so that the field order changes.
+
+
The intended use is to fix PAL movies that have been captured with the
+opposite field order to the film-to-video transfer.
+
+
A description of the accepted parameters follows.
+
+
+mode
+Set phase mode.
+
+It accepts the following values:
+
+‘t ’
+Capture field order top-first, transfer bottom-first.
+Filter will delay the bottom field.
+
+
+‘b ’
+Capture field order bottom-first, transfer top-first.
+Filter will delay the top field.
+
+
+‘p ’
+Capture and transfer with the same field order. This mode only exists
+for the documentation of the other options to refer to, but if you
+actually select it, the filter will faithfully do nothing.
+
+
+‘a ’
+Capture field order determined automatically by field flags, transfer
+opposite.
+Filter selects among ‘t ’ and ‘b ’ modes on a frame by frame
+basis using field flags. If no field information is available,
+then this works just like ‘u ’.
+
+
+‘u ’
+Capture unknown or varying, transfer opposite.
+Filter selects among ‘t ’ and ‘b ’ on a frame by frame basis by
+analyzing the images and selecting the alternative that produces best
+match between the fields.
+
+
+‘T ’
+Capture top-first, transfer unknown or varying.
+Filter selects among ‘t ’ and ‘p ’ using image analysis.
+
+
+‘B ’
+Capture bottom-first, transfer unknown or varying.
+Filter selects among ‘b ’ and ‘p ’ using image analysis.
+
+
+‘A ’
+Capture determined by field flags, transfer unknown or varying.
+Filter selects among ‘t ’, ‘b ’ and ‘p ’ using field flags and
+image analysis. If no field information is available, then this works just
+like ‘U ’. This is the default mode.
+
+
+‘U ’
+Both capture and transfer unknown or varying.
+Filter selects among ‘t ’, ‘b ’ and ‘p ’ using image analysis only.
+
+
+
+
+
+
+
28.70 pixdesctest# TOC
+
+
Pixel format descriptor test filter, mainly useful for internal
+testing. The output video should be equal to the input video.
+
+
For example:
+
+
format=monow, pixdesctest
+
+
+
can be used to test the monowhite pixel format descriptor definition.
+
+
+
28.71 pp# TOC
+
+
Enable the specified chain of postprocessing subfilters using libpostproc. This
+library should be automatically selected with a GPL build (--enable-gpl
).
+Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’.
+Each subfilter and some options have a short and a long name that can be used
+interchangeably, i.e. dr/dering are the same.
+
+
The filters accept the following options:
+
+
+subfilters
+Set postprocessing subfilters string.
+
+
+
+
All subfilters share common options to determine their scope:
+
+
+a/autoq
+Honor the quality commands for this subfilter.
+
+
+c/chrom
+Do chrominance filtering, too (default).
+
+
+y/nochrom
+Do luminance filtering only (no chrominance).
+
+
+n/noluma
+Do chrominance filtering only (no luminance).
+
+
+
+
These options can be appended after the subfilter name, separated by a ’|’.
+
+
Available subfilters are:
+
+
+hb/hdeblock[|difference[|flatness]]
+Horizontal deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+vb/vdeblock[|difference[|flatness]]
+Vertical deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+ha/hadeblock[|difference[|flatness]]
+Accurate horizontal deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+va/vadeblock[|difference[|flatness]]
+Accurate vertical deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+
+
The horizontal and vertical deblocking filters share the difference and
+flatness values so you cannot set different horizontal and vertical
+thresholds.
+
+
+h1/x1hdeblock
+Experimental horizontal deblocking filter
+
+
+v1/x1vdeblock
+Experimental vertical deblocking filter
+
+
+dr/dering
+Deringing filter
+
+
+tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
+
+threshold1
+larger -> stronger filtering
+
+threshold2
+larger -> stronger filtering
+
+threshold3
+larger -> stronger filtering
+
+
+
+
+al/autolevels[:f/fullyrange], automatic brightness / contrast correction
+
+f/fullyrange
+Stretch luminance to 0-255
.
+
+
+
+
+lb/linblenddeint
+Linear blend deinterlacing filter that deinterlaces the given block by
+filtering all lines with a (1 2 1)
filter.
+
+
+li/linipoldeint
+Linear interpolating deinterlacing filter that deinterlaces the given block by
+linearly interpolating every second line.
+
+
+ci/cubicipoldeint
+Cubic interpolating deinterlacing filter deinterlaces the given block by
+cubically interpolating every second line.
+
+
+md/mediandeint
+Median deinterlacing filter that deinterlaces the given block by applying a
+median filter to every second line.
+
+
+fd/ffmpegdeint
+FFmpeg deinterlacing filter that deinterlaces the given block by filtering every
+second line with a (-1 4 2 4 -1)
filter.
+
+
+l5/lowpass5
+Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given
+block by filtering all lines with a (-1 2 6 2 -1)
filter.
+
+
+fq/forceQuant[|quantizer]
+Overrides the quantizer table from the input with the constant quantizer you
+specify.
+
+quantizer
+Quantizer to use
+
+
+
+
+de/default
+Default pp filter combination (hb|a,vb|a,dr|a
)
+
+
+fa/fast
+Fast pp filter combination (h1|a,v1|a,dr|a
)
+
+
+ac
+High quality pp filter combination (ha|a|128|7,va|a,dr|a
)
+
+
+
+
+
28.71.1 Examples# TOC
+
+
+ Apply horizontal and vertical deblocking, deringing and automatic
+brightness/contrast:
+
+
+ Apply default filters without brightness/contrast correction:
+
+
+ Apply default filters and temporal denoiser:
+
+
pp=default/tmpnoise|1|2|3
+
+
+ Apply deblocking on luminance only, and switch vertical deblocking on or off
+automatically depending on available CPU time:
+
+
+
+
+
28.72 pp7# TOC
+
Apply Postprocessing filter 7. It is variant of the spp filter,
+similar to spp = 6 with 7 point DCT, where only the center sample is
+used after IDCT.
+
+
The filter accepts the following options:
+
+
+qp
+Force a constant quantization parameter. It accepts an integer in range
+0 to 63. If not set, the filter will use the QP from the video stream
+(if available).
+
+
+mode
+Set thresholding mode. Available modes are:
+
+
+‘hard ’
+Set hard thresholding.
+
+‘soft ’
+Set soft thresholding (better de-ringing effect, but likely blurrier).
+
+‘medium ’
+Set medium thresholding (good results, default).
+
+
+
+
+
+
+
28.73 psnr# TOC
+
+
Obtain the average, maximum and minimum PSNR (Peak Signal to Noise
+Ratio) between two input videos.
+
+
This filter takes in input two input videos, the first input is
+considered the "main" source and is passed unchanged to the
+output. The second input is used as a "reference" video for computing
+the PSNR.
+
+
Both video inputs must have the same resolution and pixel format for
+this filter to work correctly. Also it assumes that both inputs
+have the same number of frames, which are compared one by one.
+
+
The obtained average PSNR is printed through the logging system.
+
+
The filter stores the accumulated MSE (mean squared error) of each
+frame, and at the end of the processing it is averaged across all frames
+equally, and the following formula is applied to obtain the PSNR:
+
+
+
PSNR = 10*log10(MAX^2/MSE)
+
+
+
Where MAX is the average of the maximum values of each component of the
+image.
+
+
The description of the accepted parameters follows.
+
+
+stats_file, f
+If specified the filter will use the named file to save the PSNR of
+each individual frame.
+
+
+
+
The file printed if stats_file is selected, contains a sequence of
+key/value pairs of the form key :value for each compared
+couple of frames.
+
+
A description of each shown parameter follows:
+
+
+n
+sequential number of the input frame, starting from 1
+
+
+mse_avg
+Mean Square Error pixel-by-pixel average difference of the compared
+frames, averaged over all the image components.
+
+
+mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
+Mean Square Error pixel-by-pixel average difference of the compared
+frames for the component specified by the suffix.
+
+
+psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
+Peak Signal to Noise ratio of the compared frames for the component
+specified by the suffix.
+
+
+
+
For example:
+
+
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
+[main][ref] psnr="stats_file=stats.log" [out]
+
+
+
On this example the input file being processed is compared with the
+reference file ref_movie.mpg . The PSNR of each individual frame
+is stored in stats.log .
+
+
+
28.74 pullup# TOC
+
+
Pulldown reversal (inverse telecine) filter, capable of handling mixed
+hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive
+content.
+
+
The pullup filter is designed to take advantage of future context in making
+its decisions. This filter is stateless in the sense that it does not lock
+onto a pattern to follow, but it instead looks forward to the following
+fields in order to identify matches and rebuild progressive frames.
+
+
To produce content with an even framerate, insert the fps filter after
+pullup, use fps=24000/1001
if the input frame rate is 29.97fps,
+fps=24
for 30fps and the (rare) telecined 25fps input.
+
+
The filter accepts the following options:
+
+
+jl
+jr
+jt
+jb
+These options set the amount of "junk" to ignore at the left, right, top, and
+bottom of the image, respectively. Left and right are in units of 8 pixels,
+while top and bottom are in units of 2 lines.
+The default is 8 pixels on each side.
+
+
+sb
+Set the strict breaks. Setting this option to 1 will reduce the chances of
+filter generating an occasional mismatched frame, but it may also cause an
+excessive number of frames to be dropped during high motion sequences.
+Conversely, setting it to -1 will make filter match fields more easily.
+This may help processing of video where there is slight blurring between
+the fields, but may also cause there to be interlaced frames in the output.
+Default value is 0
.
+
+
+mp
+Set the metric plane to use. It accepts the following values:
+
+‘l ’
+Use luma plane.
+
+
+‘u ’
+Use chroma blue plane.
+
+
+‘v ’
+Use chroma red plane.
+
+
+
+This option may be set to use chroma plane instead of the default luma plane
+for doing filter’s computations. This may improve accuracy on very clean
+source material, but more likely will decrease accuracy, especially if there
+is chroma noise (rainbow effect) or any grayscale video.
+The main purpose of setting mp to a chroma plane is to reduce CPU
+load and make pullup usable in realtime on slow machines.
+
+
+
+
For best results (without duplicated frames in the output file) it is
+necessary to change the output frame rate. For example, to inverse
+telecine NTSC input:
+
+
ffmpeg -i input -vf pullup -r 24000/1001 ...
+
+
+
+
28.75 qp# TOC
+
+
Change video quantization parameters (QP).
+
+
The filter accepts the following option:
+
+
+qp
+Set expression for quantization parameter.
+
+
+
+
The expression is evaluated through the eval API and can contain, among others,
+the following constants:
+
+
+known
+1 if index is not 129, 0 otherwise.
+
+
+qp
+Sequentional index starting from -129 to 128.
+
+
+
+
+
28.75.1 Examples# TOC
+
+
+ Some equation like:
+
+
+
+
+
28.76 removelogo# TOC
+
+
Suppress a TV station logo, using an image file to determine which
+pixels comprise the logo. It works by filling in the pixels that
+comprise the logo with neighboring pixels.
+
+
The filter accepts the following options:
+
+
+filename, f
+Set the filter bitmap file, which can be any image format supported by
+libavformat. The width and height of the image file must match those of the
+video stream being processed.
+
+
+
+
Pixels in the provided bitmap image with a value of zero are not
+considered part of the logo, non-zero pixels are considered part of
+the logo. If you use white (255) for the logo and black (0) for the
+rest, you will be safe. For making the filter bitmap, it is
+recommended to take a screen capture of a black frame with the logo
+visible, and then using a threshold filter followed by the erode
+filter once or twice.
+
+
If needed, little splotches can be fixed manually. Remember that if
+logo pixels are not covered, the filter quality will be much
+reduced. Marking too many pixels as part of the logo does not hurt as
+much, but it will increase the amount of blurring needed to cover over
+the image and will destroy more information than necessary, and extra
+pixels will slow things down on a large logo.
+
+
+
28.77 rotate# TOC
+
+
Rotate video by an arbitrary angle expressed in radians.
+
+
The filter accepts the following options:
+
+
A description of the optional parameters follows.
+
+angle, a
+Set an expression for the angle by which to rotate the input video
+clockwise, expressed as a number of radians. A negative value will
+result in a counter-clockwise rotation. By default it is set to "0".
+
+This expression is evaluated for each frame.
+
+
+out_w, ow
+Set the output width expression, default value is "iw".
+This expression is evaluated just once during configuration.
+
+
+out_h, oh
+Set the output height expression, default value is "ih".
+This expression is evaluated just once during configuration.
+
+
+bilinear
+Enable bilinear interpolation if set to 1, a value of 0 disables
+it. Default value is 1.
+
+
+fillcolor, c
+Set the color used to fill the output area not covered by the rotated
+image. For the general syntax of this option, check the "Color" section in the
+ffmpeg-utils manual. If the special value "none" is selected then no
+background is printed (useful for example if the background is never shown).
+
+Default value is "black".
+
+
+
+
The expressions for the angle and the output size can contain the
+following constants and functions:
+
+
+n
+sequential number of the input frame, starting from 0. It is always NAN
+before the first frame is filtered.
+
+
+t
+time in seconds of the input frame, it is set to 0 when the filter is
+configured. It is always NAN before the first frame is filtered.
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+in_w, iw
+in_h, ih
+the input video width and height
+
+
+out_w, ow
+out_h, oh
+the output width and height, that is the size of the padded area as
+specified by the width and height expressions
+
+
+rotw(a)
+roth(a)
+the minimal width/height required for completely containing the input
+video rotated by a radians.
+
+These are only available when computing the out_w and
+out_h expressions.
+
+
+
+
+
28.77.1 Examples# TOC
+
+
+ Rotate the input by PI/6 radians clockwise:
+
+
+ Rotate the input by PI/6 radians counter-clockwise:
+
+
+ Rotate the input by 45 degrees clockwise:
+
+
+ Apply a constant rotation with period T, starting from an angle of PI/3:
+
+
+ Make the input video rotation oscillating with a period of T
+seconds and an amplitude of A radians:
+
+
rotate=A*sin(2*PI/T*t)
+
+
+ Rotate the video, output size is chosen so that the whole rotating
+input video is always completely contained in the output:
+
+
rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
+
+
+ Rotate the video, reduce the output size so that no background is ever
+shown:
+
+
rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
+
+
+
+
+
28.77.2 Commands# TOC
+
+
The filter supports the following commands:
+
+
+a, angle
+Set the angle expression.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+
+
+
+
28.78 sab# TOC
+
+
Apply Shape Adaptive Blur.
+
+
The filter accepts the following options:
+
+
+luma_radius, lr
+Set luma blur filter strength, must be a value in range 0.1-4.0, default
+value is 1.0. A greater value will result in a more blurred image, and
+in slower processing.
+
+
+luma_pre_filter_radius, lpfr
+Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default
+value is 1.0.
+
+
+luma_strength, ls
+Set luma maximum difference between pixels to still be considered, must
+be a value in the 0.1-100.0 range, default value is 1.0.
+
+
+chroma_radius, cr
+Set chroma blur filter strength, must be a value in range 0.1-4.0. A
+greater value will result in a more blurred image, and in slower
+processing.
+
+
+chroma_pre_filter_radius, cpfr
+Set chroma pre-filter radius, must be a value in the 0.1-2.0 range.
+
+
+chroma_strength, cs
+Set chroma maximum difference between pixels to still be considered,
+must be a value in the 0.1-100.0 range.
+
+
+
+
Each chroma option value, if not explicitly specified, is set to the
+corresponding luma option value.
+
+
+
28.79 scale# TOC
+
+
Scale (resize) the input video, using the libswscale library.
+
+
The scale filter forces the output display aspect ratio to be the same
+of the input, by changing the output sample aspect ratio.
+
+
If the input image format is different from the format requested by
+the next filter, the scale filter will convert the input to the
+requested format.
+
+
+
28.79.1 Options# TOC
+
The filter accepts the following options, or any of the options
+supported by the libswscale scaler.
+
+
See (ffmpeg-scaler)the ffmpeg-scaler manual for
+the complete list of scaler options.
+
+
+width, w
+height, h
+Set the output video dimension expression. Default value is the input
+dimension.
+
+If the value is 0, the input width is used for the output.
+
+If one of the values is -1, the scale filter will use a value that
+maintains the aspect ratio of the input image, calculated from the
+other specified dimension. If both of them are -1, the input size is
+used
+
+If one of the values is -n with n > 1, the scale filter will also use a value
+that maintains the aspect ratio of the input image, calculated from the other
+specified dimension. After that it will, however, make sure that the calculated
+dimension is divisible by n and adjust the value if necessary.
+
+See below for the list of accepted constants for use in the dimension
+expression.
+
+
+interl
+Set the interlacing mode. It accepts the following values:
+
+
+‘1 ’
+Force interlaced aware scaling.
+
+
+‘0 ’
+Do not apply interlaced scaling.
+
+
+‘-1 ’
+Select interlaced aware scaling depending on whether the source frames
+are flagged as interlaced or not.
+
+
+
+Default value is ‘0 ’.
+
+
+flags
+Set libswscale scaling flags. See
+(ffmpeg-scaler)the ffmpeg-scaler manual for the
+complete list of values. If not explicitly specified the filter applies
+the default flags.
+
+
+size, s
+Set the video size. For the syntax of this option, check the "Video size"
+section in the ffmpeg-utils manual.
+
+
+in_color_matrix
+out_color_matrix
+Set in/output YCbCr color space type.
+
+This allows the autodetected value to be overridden as well as allows forcing
+a specific value used for the output and encoder.
+
+If not specified, the color space type depends on the pixel format.
+
+Possible values:
+
+
+‘auto ’
+Choose automatically.
+
+
+‘bt709 ’
+Format conforming to International Telecommunication Union (ITU)
+Recommendation BT.709.
+
+
+‘fcc ’
+Set color space conforming to the United States Federal Communications
+Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a).
+
+
+‘bt601 ’
+Set color space conforming to:
+
+
+ ITU Radiocommunication Sector (ITU-R) Recommendation BT.601
+
+ ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G
+
+ Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004
+
+
+
+
+‘smpte240m ’
+Set color space conforming to SMPTE ST 240:1999.
+
+
+
+
+in_range
+out_range
+Set in/output YCbCr sample range.
+
+This allows the autodetected value to be overridden as well as allows forcing
+a specific value used for the output and encoder. If not specified, the
+range depends on the pixel format. Possible values:
+
+
+‘auto ’
+Choose automatically.
+
+
+‘jpeg/full/pc ’
+Set full range (0-255 in case of 8-bit luma).
+
+
+‘mpeg/tv ’
+Set "MPEG" range (16-235 in case of 8-bit luma).
+
+
+
+
+force_original_aspect_ratio
+Enable decreasing or increasing output video width or height if necessary to
+keep the original aspect ratio. Possible values:
+
+
+‘disable ’
+Scale the video as specified and disable this feature.
+
+
+‘decrease ’
+The output video dimensions will automatically be decreased if needed.
+
+
+‘increase ’
+The output video dimensions will automatically be increased if needed.
+
+
+
+
+One useful instance of this option is that when you know a specific device’s
+maximum allowed resolution, you can use this to limit the output video to
+that, while retaining the aspect ratio. For example, device A allows
+1280x720 playback, and your video is 1920x800. Using this option (set it to
+decrease) and specifying 1280x720 to the command line makes the output
+1280x533.
+
+Please note that this is a different thing than specifying -1 for w
+or h , you still need to specify the output resolution for this option
+to work.
+
+
+
+
+
The values of the w and h options are expressions
+containing the following constants:
+
+
+in_w
+in_h
+The input width and height
+
+
+iw
+ih
+These are the same as in_w and in_h .
+
+
+out_w
+out_h
+The output (scaled) width and height
+
+
+ow
+oh
+These are the same as out_w and out_h
+
+
+a
+The same as iw / ih
+
+
+sar
+input sample aspect ratio
+
+
+dar
+The input display aspect ratio. Calculated from (iw / ih) * sar
.
+
+
+hsub
+vsub
+horizontal and vertical input chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+ohsub
+ovsub
+horizontal and vertical output chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+
+
+
28.79.2 Examples# TOC
+
+
+
+
+
28.80 separatefields# TOC
+
+
The separatefields
takes a frame-based video input and splits
+each frame into its components fields, producing a new half height clip
+with twice the frame rate and twice the frame count.
+
+
This filter use field-dominance information in frame to decide which
+of each pair of fields to place first in the output.
+If it gets it wrong use setfield filter before separatefields
filter.
+
+
+
28.81 setdar, setsar# TOC
+
+
The setdar
filter sets the Display Aspect Ratio for the filter
+output video.
+
+
This is done by changing the specified Sample (aka Pixel) Aspect
+Ratio, according to the following equation:
+
+
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
+
+
+
Keep in mind that the setdar
filter does not modify the pixel
+dimensions of the video frame. Also, the display aspect ratio set by
+this filter may be changed by later filters in the filterchain,
+e.g. in case of scaling or if another "setdar" or a "setsar" filter is
+applied.
+
+
The setsar
filter sets the Sample (aka Pixel) Aspect Ratio for
+the filter output video.
+
+
Note that as a consequence of the application of this filter, the
+output display aspect ratio will change according to the equation
+above.
+
+
Keep in mind that the sample aspect ratio set by the setsar
+filter may be changed by later filters in the filterchain, e.g. if
+another "setsar" or a "setdar" filter is applied.
+
+
It accepts the following parameters:
+
+
+r, ratio, dar (setdar
only), sar (setsar
only)
+Set the aspect ratio used by the filter.
+
+The parameter can be a floating point number string, an expression, or
+a string of the form num :den , where num and
+den are the numerator and denominator of the aspect ratio. If
+the parameter is not specified, it is assumed the value "0".
+In case the form "num :den " is used, the :
character
+should be escaped.
+
+
+max
+Set the maximum integer value to use for expressing numerator and
+denominator when reducing the expressed aspect ratio to a rational.
+Default value is 100
.
+
+
+
+
+
The parameter sar is an expression containing
+the following constants:
+
+
+E, PI, PHI
+These are approximated values for the mathematical constants e
+(Euler’s number), pi (Greek pi), and phi (the golden ratio).
+
+
+w, h
+The input width and height.
+
+
+a
+These are the same as w / h .
+
+
+sar
+The input sample aspect ratio.
+
+
+dar
+The input display aspect ratio. It is the same as
+(w / h ) * sar .
+
+
+hsub, vsub
+Horizontal and vertical chroma subsample values. For example, for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+
+
+
28.81.1 Examples# TOC
+
+
+ To change the display aspect ratio to 16:9, specify one of the following:
+
+
setdar=dar=1.77777
+setdar=dar=16/9
+setdar=dar=1.77777
+
+
+ To change the sample aspect ratio to 10:11, specify:
+
+
+ To set a display aspect ratio of 16:9, and specify a maximum integer value of
+1000 in the aspect ratio reduction, use the command:
+
+
setdar=ratio=16/9:max=1000
+
+
+
+
+
+
28.82 setfield# TOC
+
+
Force field for the output video frame.
+
+
The setfield
filter marks the interlace type field for the
+output frames. It does not change the input frame, but only sets the
+corresponding property, which affects how the frame is treated by
+following filters (e.g. fieldorder
or yadif
).
+
+
The filter accepts the following options:
+
+
+mode
+Available values are:
+
+
+‘auto ’
+Keep the same field property.
+
+
+‘bff ’
+Mark the frame as bottom-field-first.
+
+
+‘tff ’
+Mark the frame as top-field-first.
+
+
+‘prog ’
+Mark the frame as progressive.
+
+
+
+
+
+
+
28.83 showinfo# TOC
+
+
Show a line containing various information for each input video frame.
+The input video is not modified.
+
+
The shown line contains a sequence of key/value pairs of the form
+key :value .
+
+
The following values are shown in the output:
+
+
+n
+The (sequential) number of the input frame, starting from 0.
+
+
+pts
+The Presentation TimeStamp of the input frame, expressed as a number of
+time base units. The time base unit depends on the filter input pad.
+
+
+pts_time
+The Presentation TimeStamp of the input frame, expressed as a number of
+seconds.
+
+
+pos
+The position of the frame in the input stream, or -1 if this information is
+unavailable and/or meaningless (for example in case of synthetic video).
+
+
+fmt
+The pixel format name.
+
+
+sar
+The sample aspect ratio of the input frame, expressed in the form
+num /den .
+
+
+s
+The size of the input frame. For the syntax of this option, check the "Video size"
+section in the ffmpeg-utils manual.
+
+
+i
+The type of interlaced mode ("P" for "progressive", "T" for top field first, "B"
+for bottom field first).
+
+
+iskey
+This is 1 if the frame is a key frame, 0 otherwise.
+
+
+type
+The picture type of the input frame ("I" for an I-frame, "P" for a
+P-frame, "B" for a B-frame, or "?" for an unknown type).
+Also refer to the documentation of the AVPictureType
enum and of
+the av_get_picture_type_char
function defined in
+libavutil/avutil.h .
+
+
+checksum
+The Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame.
+
+
+plane_checksum
+The Adler-32 checksum (printed in hexadecimal) of each plane of the input frame,
+expressed in the form "[c0 c1 c2 c3 ]".
+
+
+
+
+
28.84 shuffleplanes# TOC
+
+
Reorder and/or duplicate video planes.
+
+
It accepts the following parameters:
+
+
+map0
+The index of the input plane to be used as the first output plane.
+
+
+map1
+The index of the input plane to be used as the second output plane.
+
+
+map2
+The index of the input plane to be used as the third output plane.
+
+
+map3
+The index of the input plane to be used as the fourth output plane.
+
+
+
+
+
The first plane has the index 0. The default is to keep the input unchanged.
+
+
Swap the second and third planes of the input:
+
+
ffmpeg -i INPUT -vf shuffleplanes=0:2:1:3 OUTPUT
+
+
+
+
28.85 signalstats# TOC
+
Evaluate various visual metrics that assist in determining issues associated
+with the digitization of analog video media.
+
+
By default the filter will log these metadata values:
+
+
+YMIN
+Display the minimal Y value contained within the input frame. Expressed in
+range of [0-255].
+
+
+YLOW
+Display the Y value at the 10% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+YAVG
+Display the average Y value within the input frame. Expressed in range of
+[0-255].
+
+
+YHIGH
+Display the Y value at the 90% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+YMAX
+Display the maximum Y value contained within the input frame. Expressed in
+range of [0-255].
+
+
+UMIN
+Display the minimal U value contained within the input frame. Expressed in
+range of [0-255].
+
+
+ULOW
+Display the U value at the 10% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+UAVG
+Display the average U value within the input frame. Expressed in range of
+[0-255].
+
+
+UHIGH
+Display the U value at the 90% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+UMAX
+Display the maximum U value contained within the input frame. Expressed in
+range of [0-255].
+
+
+VMIN
+Display the minimal V value contained within the input frame. Expressed in
+range of [0-255].
+
+
+VLOW
+Display the V value at the 10% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+VAVG
+Display the average V value within the input frame. Expressed in range of
+[0-255].
+
+
+VHIGH
+Display the V value at the 90% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+VMAX
+Display the maximum V value contained within the input frame. Expressed in
+range of [0-255].
+
+
+SATMIN
+Display the minimal saturation value contained within the input frame.
+Expressed in range of [0-~181.02].
+
+
+SATLOW
+Display the saturation value at the 10% percentile within the input frame.
+Expressed in range of [0-~181.02].
+
+
+SATAVG
+Display the average saturation value within the input frame. Expressed in range
+of [0-~181.02].
+
+
+SATHIGH
+Display the saturation value at the 90% percentile within the input frame.
+Expressed in range of [0-~181.02].
+
+
+SATMAX
+Display the maximum saturation value contained within the input frame.
+Expressed in range of [0-~181.02].
+
+
+HUEMED
+Display the median value for hue within the input frame. Expressed in range of
+[0-360].
+
+
+HUEAVG
+Display the average value for hue within the input frame. Expressed in range of
+[0-360].
+
+
+YDIF
+Display the average of sample value difference between all values of the Y
+plane in the current frame and corresponding values of the previous input frame.
+Expressed in range of [0-255].
+
+
+UDIF
+Display the average of sample value difference between all values of the U
+plane in the current frame and corresponding values of the previous input frame.
+Expressed in range of [0-255].
+
+
+VDIF
+Display the average of sample value difference between all values of the V
+plane in the current frame and corresponding values of the previous input frame.
+Expressed in range of [0-255].
+
+
+
+
The filter accepts the following options:
+
+
+stat
+out
+
+stat specify an additional form of image analysis.
+out output video with the specified type of pixel highlighted.
+
+Both options accept the following values:
+
+
+‘tout ’
+Identify temporal outliers pixels. A temporal outlier is a pixel
+unlike the neighboring pixels of the same field. Examples of temporal outliers
+include the results of video dropouts, head clogs, or tape tracking issues.
+
+
+‘vrep ’
+Identify vertical line repetition . Vertical line repetition includes
+similar rows of pixels within a frame. In born-digital video vertical line
+repetition is common, but this pattern is uncommon in video digitized from an
+analog source. When it occurs in video that results from the digitization of an
+analog source it can indicate concealment from a dropout compensator.
+
+
+‘brng ’
+Identify pixels that fall outside of legal broadcast range.
+
+
+
+
+color, c
+Set the highlight color for the out option. The default color is
+yellow.
+
+
+
+
+
28.85.1 Examples# TOC
+
+
+
+
+
28.86 smartblur# TOC
+
+
Blur the input video without impacting the outlines.
+
+
It accepts the following options:
+
+
+luma_radius, lr
+Set the luma radius. The option value must be a float number in
+the range [0.1,5.0] that specifies the variance of the gaussian filter
+used to blur the image (slower if larger). Default value is 1.0.
+
+
+luma_strength, ls
+Set the luma strength. The option value must be a float number
+in the range [-1.0,1.0] that configures the blurring. A value included
+in [0.0,1.0] will blur the image whereas a value included in
+[-1.0,0.0] will sharpen the image. Default value is 1.0.
+
+
+luma_threshold, lt
+Set the luma threshold used as a coefficient to determine
+whether a pixel should be blurred or not. The option value must be an
+integer in the range [-30,30]. A value of 0 will filter all the image,
+a value included in [0,30] will filter flat areas and a value included
+in [-30,0] will filter edges. Default value is 0.
+
+
+chroma_radius, cr
+Set the chroma radius. The option value must be a float number in
+the range [0.1,5.0] that specifies the variance of the gaussian filter
+used to blur the image (slower if larger). Default value is 1.0.
+
+
+chroma_strength, cs
+Set the chroma strength. The option value must be a float number
+in the range [-1.0,1.0] that configures the blurring. A value included
+in [0.0,1.0] will blur the image whereas a value included in
+[-1.0,0.0] will sharpen the image. Default value is 1.0.
+
+
+chroma_threshold, ct
+Set the chroma threshold used as a coefficient to determine
+whether a pixel should be blurred or not. The option value must be an
+integer in the range [-30,30]. A value of 0 will filter all the image,
+a value included in [0,30] will filter flat areas and a value included
+in [-30,0] will filter edges. Default value is 0.
+
+
+
+
If a chroma option is not explicitly set, the corresponding luma value
+is set.
+
+
+
28.87 stereo3d# TOC
+
+
Convert between different stereoscopic image formats.
+
+
The filters accept the following options:
+
+
+in
+Set stereoscopic image format of input.
+
+Available values for input image formats are:
+
+‘sbsl ’
+side by side parallel (left eye left, right eye right)
+
+
+‘sbsr ’
+side by side crosseye (right eye left, left eye right)
+
+
+‘sbs2l ’
+side by side parallel with half width resolution
+(left eye left, right eye right)
+
+
+‘sbs2r ’
+side by side crosseye with half width resolution
+(right eye left, left eye right)
+
+
+‘abl ’
+above-below (left eye above, right eye below)
+
+
+‘abr ’
+above-below (right eye above, left eye below)
+
+
+‘ab2l ’
+above-below with half height resolution
+(left eye above, right eye below)
+
+
+‘ab2r ’
+above-below with half height resolution
+(right eye above, left eye below)
+
+
+‘al ’
+alternating frames (left eye first, right eye second)
+
+
+‘ar ’
+alternating frames (right eye first, left eye second)
+
+Default value is ‘sbsl ’.
+
+
+
+
+out
+Set stereoscopic image format of output.
+
+Available values for output image formats are all the input formats as well as:
+
+‘arbg ’
+anaglyph red/blue gray
+(red filter on left eye, blue filter on right eye)
+
+
+‘argg ’
+anaglyph red/green gray
+(red filter on left eye, green filter on right eye)
+
+
+‘arcg ’
+anaglyph red/cyan gray
+(red filter on left eye, cyan filter on right eye)
+
+
+‘arch ’
+anaglyph red/cyan half colored
+(red filter on left eye, cyan filter on right eye)
+
+
+‘arcc ’
+anaglyph red/cyan color
+(red filter on left eye, cyan filter on right eye)
+
+
+‘arcd ’
+anaglyph red/cyan color optimized with the least squares projection of dubois
+(red filter on left eye, cyan filter on right eye)
+
+
+‘agmg ’
+anaglyph green/magenta gray
+(green filter on left eye, magenta filter on right eye)
+
+
+‘agmh ’
+anaglyph green/magenta half colored
+(green filter on left eye, magenta filter on right eye)
+
+
+‘agmc ’
+anaglyph green/magenta colored
+(green filter on left eye, magenta filter on right eye)
+
+
+‘agmd ’
+anaglyph green/magenta color optimized with the least squares projection of dubois
+(green filter on left eye, magenta filter on right eye)
+
+
+‘aybg ’
+anaglyph yellow/blue gray
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘aybh ’
+anaglyph yellow/blue half colored
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘aybc ’
+anaglyph yellow/blue colored
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘aybd ’
+anaglyph yellow/blue color optimized with the least squares projection of dubois
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘irl ’
+interleaved rows (left eye has top row, right eye starts on next row)
+
+
+‘irr ’
+interleaved rows (right eye has top row, left eye starts on next row)
+
+
+‘ml ’
+mono output (left eye only)
+
+
+‘mr ’
+mono output (right eye only)
+
+
+
+Default value is ‘arcd ’.
+
+
+
+
+
28.87.1 Examples# TOC
+
+
+ Convert input video from side by side parallel to anaglyph yellow/blue dubois:
+
+
+ Convert input video from above bellow (left eye above, right eye below) to side by side crosseye.
+
+
+
+
+
28.88 spp# TOC
+
+
Apply a simple postprocessing filter that compresses and decompresses the image
+at several (or - in the case of quality level 6
- all) shifts
+and average the results.
+
+
The filter accepts the following options:
+
+
+quality
+Set quality. This option defines the number of levels for averaging. It accepts
+an integer in the range 0-6. If set to 0
, the filter will have no
+effect. A value of 6
means the higher quality. For each increment of
+that value the speed drops by a factor of approximately 2. Default value is
+3
.
+
+
+qp
+Force a constant quantization parameter. If not set, the filter will use the QP
+from the video stream (if available).
+
+
+mode
+Set thresholding mode. Available modes are:
+
+
+‘hard ’
+Set hard thresholding (default).
+
+‘soft ’
+Set soft thresholding (better de-ringing effect, but likely blurrier).
+
+
+
+
+use_bframe_qp
+Enable the use of the QP from the B-Frames if set to 1
. Using this
+option may cause flicker since the B-Frames have often larger QP. Default is
+0
(not enabled).
+
+
+
+
+
28.89 subtitles# TOC
+
+
Draw subtitles on top of input video using the libass library.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libass
. This filter also requires a build with libavcodec and
+libavformat to convert the passed subtitles file to ASS (Advanced Substation
+Alpha) subtitles format.
+
+
The filter accepts the following options:
+
+
+filename, f
+Set the filename of the subtitle file to read. It must be specified.
+
+
+original_size
+Specify the size of the original video, the video for which the ASS file
+was composed. For the syntax of this option, check the "Video size" section in
+the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic,
+this is necessary to correctly scale the fonts if the aspect ratio has been
+changed.
+
+
+charenc
+Set subtitles input character encoding. subtitles
filter only. Only
+useful if not UTF-8.
+
+
+stream_index, si
+Set subtitles stream index. subtitles
filter only.
+
+
+
+
If the first key is not specified, it is assumed that the first value
+specifies the filename .
+
+
For example, to render the file sub.srt on top of the input
+video, use the command:
+
+
+
which is equivalent to:
+
+
subtitles=filename=sub.srt
+
+
+
To render the default subtitles stream from file video.mkv , use:
+
+
+
To render the second subtitles stream from that file, use:
+
+
subtitles=video.mkv:si=1
+
+
+
+
28.90 super2xsai# TOC
+
+
Scale the input by 2x and smooth using the Super2xSaI (Scale and
+Interpolate) pixel art scaling algorithm.
+
+
Useful for enlarging pixel art images without reducing sharpness.
+
+
+
28.91 swapuv# TOC
+
Swap U & V plane.
+
+
+
28.92 telecine# TOC
+
+
Apply telecine process to the video.
+
+
This filter accepts the following options:
+
+
+first_field
+
+‘top, t ’
+top field first
+
+‘bottom, b ’
+bottom field first
+The default value is top
.
+
+
+
+
+pattern
+A string of numbers representing the pulldown pattern you wish to apply.
+The default value is 23
.
+
+
+
+
+
Some typical patterns:
+
+NTSC output (30i):
+27.5p: 32222
+24p: 23 (classic)
+24p: 2332 (preferred)
+20p: 33
+18p: 334
+16p: 3444
+
+PAL output (25i):
+27.5p: 12222
+24p: 222222222223 ("Euro pulldown")
+16.67p: 33
+16p: 33333334
+
+
+
+
28.93 thumbnail# TOC
+
Select the most representative frame in a given sequence of consecutive frames.
+
+
The filter accepts the following options:
+
+
+n
+Set the frames batch size to analyze; in a set of n frames, the filter
+will pick one of them, and then handle the next batch of n frames until
+the end. Default is 100
.
+
+
+
+
Since the filter keeps track of the whole frames sequence, a bigger n
+value will result in a higher memory usage, so a high value is not recommended.
+
+
+
28.93.1 Examples# TOC
+
+
+ Extract one picture each 50 frames:
+
+
+ Complete example of a thumbnail creation with ffmpeg
:
+
+
ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
+
+
+
+
+
28.94 tile# TOC
+
+
Tile several successive frames together.
+
+
The filter accepts the following options:
+
+
+layout
+Set the grid size (i.e. the number of lines and columns). For the syntax of
+this option, check the "Video size" section in the ffmpeg-utils manual.
+
+
+nb_frames
+Set the maximum number of frames to render in the given area. It must be less
+than or equal to w xh . The default value is 0
, meaning all
+the area will be used.
+
+
+margin
+Set the outer border margin in pixels.
+
+
+padding
+Set the inner border thickness (i.e. the number of pixels between frames). For
+more advanced padding options (such as having different values for the edges),
+refer to the pad video filter.
+
+
+color
+Specify the color of the unused area. For the syntax of this option, check the
+"Color" section in the ffmpeg-utils manual. The default value of color
+is "black".
+
+
+
+
+
28.94.1 Examples# TOC
+
+
+
+
+
28.95 tinterlace# TOC
+
+
Perform various types of temporal field interlacing.
+
+
Frames are counted starting from 1, so the first input frame is
+considered odd.
+
+
The filter accepts the following options:
+
+
+mode
+Specify the mode of the interlacing. This option can also be specified
+as a value alone. See below for a list of values for this option.
+
+Available values are:
+
+
+‘merge, 0 ’
+Move odd frames into the upper field, even into the lower field,
+generating a double height frame at half frame rate.
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+
+
+
+‘drop_odd, 1 ’
+Only output even frames, odd frames are dropped, generating a frame with
+unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+ 22222 44444
+ 22222 44444
+ 22222 44444
+ 22222 44444
+
+
+
+‘drop_even, 2 ’
+Only output odd frames, even frames are dropped, generating a frame with
+unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+11111 33333
+11111 33333
+11111 33333
+11111 33333
+
+
+
+‘pad, 3 ’
+Expand each frame to full height, but pad alternate lines with black,
+generating a frame with double height at the same input frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+
+
+
+
+‘interleave_top, 4 ’
+Interleave the upper field from odd frames with the lower field from
+even frames, generating a frame with unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111<- 22222 33333<- 44444
+11111 22222<- 33333 44444<-
+11111<- 22222 33333<- 44444
+11111 22222<- 33333 44444<-
+
+Output:
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+
+
+
+
+‘interleave_bottom, 5 ’
+Interleave the lower field from odd frames with the upper field from
+even frames, generating a frame with unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222<- 33333 44444<-
+11111<- 22222 33333<- 44444
+11111 22222<- 33333 44444<-
+11111<- 22222 33333<- 44444
+
+Output:
+22222 44444
+11111 33333
+22222 44444
+11111 33333
+
+
+
+
+‘interlacex2, 6 ’
+Double frame rate with unchanged height. Frames are inserted each
+containing the second temporal field from the previous input frame and
+the first temporal field from the next input frame. This mode relies on
+the top_field_first flag. Useful for interlaced video displays with no
+field synchronisation.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+ 11111 22222 33333 44444
+11111 22222 33333 44444
+ 11111 22222 33333 44444
+
+Output:
+11111 22222 22222 33333 33333 44444 44444
+ 11111 11111 22222 22222 33333 33333 44444
+11111 22222 22222 33333 33333 44444 44444
+ 11111 11111 22222 22222 33333 33333 44444
+
+
+
+
+
+
+Numeric values are deprecated but are accepted for backward
+compatibility reasons.
+
+Default mode is merge
.
+
+
+flags
+Specify flags influencing the filter process.
+
+Available value for flags is:
+
+
+low_pass_filter, vlfp
+Enable vertical low-pass filtering in the filter.
+Vertical low-pass filtering is required when creating an interlaced
+destination from a progressive source which contains high-frequency
+vertical detail. Filtering will reduce interlace ’twitter’ and Moire
+patterning.
+
+Vertical low-pass filtering can only be enabled for mode
+interleave_top and interleave_bottom .
+
+
+
+
+
+
+
+
28.96 transpose# TOC
+
+
Transpose rows with columns in the input video and optionally flip it.
+
+
It accepts the following parameters:
+
+
+dir
+Specify the transposition direction.
+
+Can assume the following values:
+
+‘0, 4, cclock_flip ’
+Rotate by 90 degrees counterclockwise and vertically flip (default), that is:
+
+
L.R L.l
+. . -> . .
+l.r R.r
+
+
+
+‘1, 5, clock ’
+Rotate by 90 degrees clockwise, that is:
+
+
L.R l.L
+. . -> . .
+l.r r.R
+
+
+
+‘2, 6, cclock ’
+Rotate by 90 degrees counterclockwise, that is:
+
+
L.R R.r
+. . -> . .
+l.r L.l
+
+
+
+‘3, 7, clock_flip ’
+Rotate by 90 degrees clockwise and vertically flip, that is:
+
+
L.R r.R
+. . -> . .
+l.r l.L
+
+
+
+
+For values between 4-7, the transposition is only done if the input
+video geometry is portrait and not landscape. These values are
+deprecated, the passthrough
option should be used instead.
+
+Numerical values are deprecated, and should be dropped in favor of
+symbolic constants.
+
+
+passthrough
+Do not apply the transposition if the input geometry matches the one
+specified by the specified value. It accepts the following values:
+
+‘none ’
+Always apply transposition.
+
+‘portrait ’
+Preserve portrait geometry (when height >= width ).
+
+‘landscape ’
+Preserve landscape geometry (when width >= height ).
+
+
+
+Default value is none
.
+
+
+
+
For example to rotate by 90 degrees clockwise and preserve portrait
+layout:
+
+
transpose=dir=1:passthrough=portrait
+
+
+
The command above can also be specified as:
+
+
+
+
28.97 trim# TOC
+
Trim the input so that the output contains one continuous subpart of the input.
+
+
It accepts the following parameters:
+
+start
+Specify the time of the start of the kept section, i.e. the frame with the
+timestamp start will be the first frame in the output.
+
+
+end
+Specify the time of the first frame that will be dropped, i.e. the frame
+immediately preceding the one with the timestamp end will be the last
+frame in the output.
+
+
+start_pts
+This is the same as start , except this option sets the start timestamp
+in timebase units instead of seconds.
+
+
+end_pts
+This is the same as end , except this option sets the end timestamp
+in timebase units instead of seconds.
+
+
+duration
+The maximum duration of the output in seconds.
+
+
+start_frame
+The number of the first frame that should be passed to the output.
+
+
+end_frame
+The number of the first frame that should be dropped.
+
+
+
+
start , end , and duration are expressed as time
+duration specifications; see
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+
+
Note that the first two sets of the start/end options and the duration
+option look at the frame timestamp, while the _frame variants simply count the
+frames that pass through the filter. Also note that this filter does not modify
+the timestamps. If you wish for the output timestamps to start at zero, insert a
+setpts filter after the trim filter.
+
+
If multiple start or end options are set, this filter tries to be greedy and
+keep all the frames that match at least one of the specified constraints. To keep
+only the part that matches all the constraints at once, chain multiple trim
+filters.
+
+
The defaults are such that all the input is kept. So it is possible to set e.g.
+just the end values to keep everything before the specified time.
+
+
Examples:
+
+ Drop everything except the second minute of input:
+
+
ffmpeg -i INPUT -vf trim=60:120
+
+
+ Keep only the first second:
+
+
ffmpeg -i INPUT -vf trim=duration=1
+
+
+
+
+
+
+
28.98 unsharp# TOC
+
+
Sharpen or blur the input video.
+
+
It accepts the following parameters:
+
+
+luma_msize_x, lx
+Set the luma matrix horizontal size. It must be an odd integer between
+3 and 63. The default value is 5.
+
+
+luma_msize_y, ly
+Set the luma matrix vertical size. It must be an odd integer between 3
+and 63. The default value is 5.
+
+
+luma_amount, la
+Set the luma effect strength. It must be a floating point number, reasonable
+values lay between -1.5 and 1.5.
+
+Negative values will blur the input video, while positive values will
+sharpen it, a value of zero will disable the effect.
+
+Default value is 1.0.
+
+
+chroma_msize_x, cx
+Set the chroma matrix horizontal size. It must be an odd integer
+between 3 and 63. The default value is 5.
+
+
+chroma_msize_y, cy
+Set the chroma matrix vertical size. It must be an odd integer
+between 3 and 63. The default value is 5.
+
+
+chroma_amount, ca
+Set the chroma effect strength. It must be a floating point number, reasonable
+values lay between -1.5 and 1.5.
+
+Negative values will blur the input video, while positive values will
+sharpen it, a value of zero will disable the effect.
+
+Default value is 0.0.
+
+
+opencl
+If set to 1, specify using OpenCL capabilities, only available if
+FFmpeg was configured with --enable-opencl
. Default value is 0.
+
+
+
+
+
All parameters are optional and default to the equivalent of the
+string ’5:5:1.0:5:5:0.0’.
+
+
+
28.98.1 Examples# TOC
+
+
+ Apply strong luma sharpen effect:
+
+
unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
+
+
+ Apply a strong blur of both luma and chroma parameters:
+
+
+
+
+
28.99 uspp# TOC
+
+
Apply ultra slow/simple postprocessing filter that compresses and decompresses
+the image at several (or - in the case of quality level 8
- all)
+shifts and average the results.
+
+
The way this differs from the behavior of spp is that uspp actually encodes &
+decodes each case with libavcodec Snow, whereas spp uses a simplified intra only 8x8
+DCT similar to MJPEG.
+
+
The filter accepts the following options:
+
+
+quality
+Set quality. This option defines the number of levels for averaging. It accepts
+an integer in the range 0-8. If set to 0
, the filter will have no
+effect. A value of 8
means the higher quality. For each increment of
+that value the speed drops by a factor of approximately 2. Default value is
+3
.
+
+
+qp
+Force a constant quantization parameter. If not set, the filter will use the QP
+from the video stream (if available).
+
+
+
+
+
28.100 vidstabdetect# TOC
+
+
Analyze video stabilization/deshaking. Perform pass 1 of 2, see
+vidstabtransform for pass 2.
+
+
This filter generates a file with relative translation and rotation
+transform information about subsequent frames, which is then used by
+the vidstabtransform filter.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libvidstab
.
+
+
This filter accepts the following options:
+
+
+result
+Set the path to the file used to write the transforms information.
+Default value is transforms.trf .
+
+
+shakiness
+Set how shaky the video is and how quick the camera is. It accepts an
+integer in the range 1-10, a value of 1 means little shakiness, a
+value of 10 means strong shakiness. Default value is 5.
+
+
+accuracy
+Set the accuracy of the detection process. It must be a value in the
+range 1-15. A value of 1 means low accuracy, a value of 15 means high
+accuracy. Default value is 15.
+
+
+stepsize
+Set stepsize of the search process. The region around minimum is
+scanned with 1 pixel resolution. Default value is 6.
+
+
+mincontrast
+Set minimum contrast. Below this value a local measurement field is
+discarded. Must be a floating point value in the range 0-1. Default
+value is 0.3.
+
+
+tripod
+Set reference frame number for tripod mode.
+
+If enabled, the motion of the frames is compared to a reference frame
+in the filtered stream, identified by the specified number. The idea
+is to compensate all movements in a more-or-less static scene and keep
+the camera view absolutely still.
+
+If set to 0, it is disabled. The frames are counted starting from 1.
+
+
+show
+Show fields and transforms in the resulting frames. It accepts an
+integer in the range 0-2. Default value is 0, which disables any
+visualization.
+
+
+
+
+
28.100.1 Examples# TOC
+
+
+ Use default values:
+
+
+ Analyze strongly shaky movie and put the results in file
+mytransforms.trf :
+
+
vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
+
+
+ Visualize the result of internal transformations in the resulting
+video:
+
+
+ Analyze a video with medium shakiness using ffmpeg
:
+
+
ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
+
+
+
+
+
28.101 vidstabtransform# TOC
+
+
Video stabilization/deshaking: pass 2 of 2,
+see vidstabdetect for pass 1.
+
+
Read a file with transform information for each frame and
+apply/compensate them. Together with the vidstabdetect
+filter this can be used to deshake videos. See also
+http://public.hronopik.de/vid.stab . It is important to also use
+the unsharp filter, see below.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libvidstab
.
+
+
+
28.101.1 Options# TOC
+
+
+input
+Set path to the file used to read the transforms. Default value is
+transforms.trf .
+
+
+smoothing
+Set the number of frames (value*2 + 1) used for lowpass filtering the
+camera movements. Default value is 10.
+
+For example a number of 10 means that 21 frames are used (10 in the
+past and 10 in the future) to smoothen the motion in the video. A
+larger value leads to a smoother video, but limits the acceleration of
+the camera (pan/tilt movements). 0 is a special case where a static
+camera is simulated.
+
+
+optalgo
+Set the camera path optimization algorithm.
+
+Accepted values are:
+
+‘gauss ’
+gaussian kernel low-pass filter on camera motion (default)
+
+‘avg ’
+averaging on transformations
+
+
+
+
+maxshift
+Set maximal number of pixels to translate frames. Default value is -1,
+meaning no limit.
+
+
+maxangle
+Set maximal angle in radians (degree*PI/180) to rotate frames. Default
+value is -1, meaning no limit.
+
+
+crop
+Specify how to deal with borders that may be visible due to movement
+compensation.
+
+Available values are:
+
+‘keep ’
+keep image information from previous frame (default)
+
+‘black ’
+fill the border black
+
+
+
+
+invert
+Invert transforms if set to 1. Default value is 0.
+
+
+relative
+Consider transforms as relative to previous frame if set to 1,
+absolute if set to 0. Default value is 0.
+
+
+zoom
+Set percentage to zoom. A positive value will result in a zoom-in
+effect, a negative value in a zoom-out effect. Default value is 0 (no
+zoom).
+
+
+optzoom
+Set optimal zooming to avoid borders.
+
+Accepted values are:
+
+‘0 ’
+disabled
+
+‘1 ’
+optimal static zoom value is determined (only very strong movements
+will lead to visible borders) (default)
+
+‘2 ’
+optimal adaptive zoom value is determined (no borders will be
+visible), see zoomspeed
+
+
+
+Note that the value given at zoom is added to the one calculated here.
+
+
+zoomspeed
+Set percent to zoom maximally each frame (enabled when
+optzoom is set to 2). Range is from 0 to 5, default value is
+0.25.
+
+
+interpol
+Specify type of interpolation.
+
+Available values are:
+
+‘no ’
+no interpolation
+
+‘linear ’
+linear only horizontal
+
+‘bilinear ’
+linear in both directions (default)
+
+‘bicubic ’
+cubic in both directions (slow)
+
+
+
+
+tripod
+Enable virtual tripod mode if set to 1, which is equivalent to
+relative=0:smoothing=0
. Default value is 0.
+
+Use also tripod
option of vidstabdetect .
+
+
+debug
+Increase log verbosity if set to 1. Also the detected global motions
+are written to the temporary file global_motions.trf . Default
+value is 0.
+
+
+
+
+
28.101.2 Examples# TOC
+
+
+
+
+
28.102 vflip# TOC
+
+
Flip the input video vertically.
+
+
For example, to vertically flip a video with ffmpeg
:
+
+
ffmpeg -i in.avi -vf "vflip" out.avi
+
+
+
+
28.103 vignette# TOC
+
+
Make or reverse a natural vignetting effect.
+
+
The filter accepts the following options:
+
+
+angle, a
+Set lens angle expression as a number of radians.
+
+The value is clipped in the [0,PI/2]
range.
+
+Default value: "PI/5"
+
+
+x0
+y0
+Set center coordinates expressions. Respectively "w/2"
and "h/2"
+by default.
+
+
+mode
+Set forward/backward mode.
+
+Available modes are:
+
+‘forward ’
+The larger the distance from the central point, the darker the image becomes.
+
+
+‘backward ’
+The larger the distance from the central point, the brighter the image becomes.
+This can be used to reverse a vignette effect, though there is no automatic
+detection to extract the lens angle and other settings (yet). It can
+also be used to create a burning effect.
+
+
+
+Default value is ‘forward ’.
+
+
+eval
+Set evaluation mode for the expressions (angle , x0 , y0 ).
+
+It accepts the following values:
+
+‘init ’
+Evaluate expressions only once during the filter initialization.
+
+
+‘frame ’
+Evaluate expressions for each incoming frame. This is way slower than the
+‘init ’ mode since it requires all the scalers to be re-computed, but it
+allows advanced dynamic expressions.
+
+
+
+Default value is ‘init ’.
+
+
+dither
+Set dithering to reduce the circular banding effects. Default is 1
+(enabled).
+
+
+aspect
+Set vignette aspect. This setting allows one to adjust the shape of the vignette.
+Setting this value to the SAR of the input will make a rectangular vignetting
+following the dimensions of the video.
+
+Default is 1/1
.
+
+
+
+
+
28.103.1 Expressions# TOC
+
+
The alpha , x0 and y0 expressions can contain the
+following parameters.
+
+
+w
+h
+input width and height
+
+
+n
+the number of input frame, starting from 0
+
+
+pts
+the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in
+TB units, NAN if undefined
+
+
+r
+frame rate of the input video, NAN if the input frame rate is unknown
+
+
+t
+the PTS (Presentation TimeStamp) of the filtered video frame,
+expressed in seconds, NAN if undefined
+
+
+tb
+time base of the input video
+
+
+
+
+
+
28.103.2 Examples# TOC
+
+
+ Apply simple strong vignetting effect:
+
+
+ Make a flickering vignetting:
+
+
vignette='PI/4+random(1)*PI/50':eval=frame
+
+
+
+
+
+
28.104 w3fdif# TOC
+
+
Deinterlace the input video ("w3fdif" stands for "Weston 3 Field
+Deinterlacing Filter").
+
+
Based on the process described by Martin Weston for BBC R&D, and
+implemented based on the de-interlace algorithm written by Jim
+Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter
+uses filter coefficients calculated by BBC R&D.
+
+
There are two sets of filter coefficients, so called "simple":
+and "complex". Which set of filter coefficients is used can
+be set by passing an optional parameter:
+
+
+filter
+Set the interlacing filter coefficients. Accepts one of the following values:
+
+
+‘simple ’
+Simple filter coefficient set.
+
+‘complex ’
+More-complex filter coefficient set.
+
+
+Default value is ‘complex ’.
+
+
+deint
+Specify which frames to deinterlace. Accept one of the following values:
+
+
+‘all ’
+Deinterlace all frames,
+
+‘interlaced ’
+Only deinterlace frames marked as interlaced.
+
+
+
+Default value is ‘all ’.
+
+
+
+
+
28.105 xbr# TOC
+
Apply the xBR high-quality magnification filter which is designed for pixel
+art. It follows a set of edge-detection rules, see
+http://www.libretro.com/forums/viewtopic.php?f=6&t=134 .
+
+
It accepts the following option:
+
+
+n
+Set the scaling dimension: 2
for 2xBR
, 3
for
+3xBR
and 4
for 4xBR
.
+Default is 3
.
+
+
+
+
+
28.106 yadif# TOC
+
+
Deinterlace the input video ("yadif" means "yet another deinterlacing
+filter").
+
+
It accepts the following parameters:
+
+
+
+mode
+The interlacing mode to adopt. It accepts one of the following values:
+
+
+0, send_frame
+Output one frame for each frame.
+
+1, send_field
+Output one frame for each field.
+
+2, send_frame_nospatial
+Like send_frame
, but it skips the spatial interlacing check.
+
+3, send_field_nospatial
+Like send_field
, but it skips the spatial interlacing check.
+
+
+
+The default value is send_frame
.
+
+
+parity
+The picture field parity assumed for the input interlaced video. It accepts one
+of the following values:
+
+
+0, tff
+Assume the top field is first.
+
+1, bff
+Assume the bottom field is first.
+
+-1, auto
+Enable automatic detection of field parity.
+
+
+
+The default value is auto
.
+If the interlacing is unknown or the decoder does not export this information,
+top field first will be assumed.
+
+
+deint
+Specify which frames to deinterlace. Accept one of the following
+values:
+
+
+0, all
+Deinterlace all frames.
+
+1, interlaced
+Only deinterlace frames marked as interlaced.
+
+
+
+The default value is all
.
+
+
+
+
+
28.107 zoompan# TOC
+
+
Apply Zoom & Pan effect.
+
+
This filter accepts the following options:
+
+
+zoom, z
+Set the zoom expression. Default is 1.
+
+
+x
+y
+Set the x and y expression. Default is 0.
+
+
+d
+Set the duration expression in number of frames.
+This sets for how many number of frames effect will last for
+single input image.
+
+
+s
+Set the output image size, default is ’hd720’.
+
+
+
+
Each expression can contain the following constants:
+
+
+in_w, iw
+Input width.
+
+
+in_h, ih
+Input height.
+
+
+out_w, ow
+Output width.
+
+
+out_h, oh
+Output height.
+
+
+in
+Input frame count.
+
+
+on
+Output frame count.
+
+
+x
+y
+Last calculated ’x’ and ’y’ position from ’x’ and ’y’ expression
+for current input frame.
+
+
+px
+py
+’x’ and ’y’ of last output frame of previous input frame or 0 when there was
+not yet such frame (first input frame).
+
+
+zoom
+Last calculated zoom from ’z’ expression for current input frame.
+
+
+pzoom
+Last calculated zoom of last output frame of previous input frame.
+
+
+duration
+Number of output frames for current input frame. Calculated from ’d’ expression
+for each input frame.
+
+
+pduration
+number of output frames created for previous input frame
+
+
+a
+Rational number: input width / input height
+
+
+sar
+sample aspect ratio
+
+
+dar
+display aspect ratio
+
+
+
+
+
+
28.107.1 Examples# TOC
+
+
+ Zoom-in up to 1.5 and pan at same time to some spot near center of picture:
+
+
zoompan=z='min(zoom+0.0015,1.5)':d=700:x='if(gte(zoom,1.5),x,x+1/a)':y='if(gte(zoom,1.5),y,y+1)':s=640x360
+
+
+
+
+
+
29 Video Sources# TOC
+
+
Below is a description of the currently available video sources.
+
+
+
29.1 buffer# TOC
+
+
Buffer video frames, and make them available to the filter chain.
+
+
This source is mainly intended for a programmatic use, in particular
+through the interface defined in libavfilter/vsrc_buffer.h .
+
+
It accepts the following parameters:
+
+
+video_size
+Specify the size (width and height) of the buffered video frames. For the
+syntax of this option, check the "Video size" section in the ffmpeg-utils
+manual.
+
+
+width
+The input video width.
+
+
+height
+The input video height.
+
+
+pix_fmt
+A string representing the pixel format of the buffered video frames.
+It may be a number corresponding to a pixel format, or a pixel format
+name.
+
+
+time_base
+Specify the timebase assumed by the timestamps of the buffered frames.
+
+
+frame_rate
+Specify the frame rate expected for the video stream.
+
+
+pixel_aspect, sar
+The sample (pixel) aspect ratio of the input video.
+
+
+sws_param
+Specify the optional parameters to be used for the scale filter which
+is automatically inserted when an input change is detected in the
+input size or format.
+
+
+
+
For example:
+
+
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
+
+
+
will instruct the source to accept video frames with size 320x240 and
+with format "yuv410p", assuming 1/24 as the timestamps timebase and
+square pixels (1:1 sample aspect ratio).
+Since the pixel format with name "yuv410p" corresponds to the number 6
+(check the enum AVPixelFormat definition in libavutil/pixfmt.h ),
+this example corresponds to:
+
+
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
+
+
+
Alternatively, the options can be specified as a flat string, but this
+syntax is deprecated:
+
+
width :height :pix_fmt :time_base.num :time_base.den :pixel_aspect.num :pixel_aspect.den [:sws_param ]
+
+
+
29.2 cellauto# TOC
+
+
Create a pattern generated by an elementary cellular automaton.
+
+
The initial state of the cellular automaton can be defined through the
+filename , and pattern options. If such options are
+not specified an initial state is created randomly.
+
+
At each new frame a new row in the video is filled with the result of
+the cellular automaton next generation. The behavior when the whole
+frame is filled is defined by the scroll option.
+
+
This source accepts the following options:
+
+
+filename, f
+Read the initial cellular automaton state, i.e. the starting row, from
+the specified file.
+In the file, each non-whitespace character is considered an alive
+cell, a newline will terminate the row, and further characters in the
+file will be ignored.
+
+
+pattern, p
+Read the initial cellular automaton state, i.e. the starting row, from
+the specified string.
+
+Each non-whitespace character in the string is considered an alive
+cell, a newline will terminate the row, and further characters in the
+string will be ignored.
+
+
+rate, r
+Set the video rate, that is the number of frames generated per second.
+Default is 25.
+
+
+random_fill_ratio, ratio
+Set the random fill ratio for the initial cellular automaton row. It
+is a floating point number value ranging from 0 to 1, defaults to
+1/PHI.
+
+This option is ignored when a file or a pattern is specified.
+
+
+random_seed, seed
+Set the seed for filling randomly the initial row, must be an integer
+included between 0 and UINT32_MAX. If not specified, or if explicitly
+set to -1, the filter will try to use a good random seed on a best
+effort basis.
+
+
+rule
+Set the cellular automaton rule, it is a number ranging from 0 to 255.
+Default value is 110.
+
+
+size, s
+Set the size of the output video. For the syntax of this option, check
+the "Video size" section in the ffmpeg-utils manual.
+
+If filename or pattern is specified, the size is set
+by default to the width of the specified initial state row, and the
+height is set to width * PHI.
+
+If size is set, it must contain the width of the specified
+pattern string, and the specified pattern will be centered in the
+larger row.
+
+If a filename or a pattern string is not specified, the size value
+defaults to "320x518" (used for a randomly generated initial state).
+
+
+scroll
+If set to 1, scroll the output upward when all the rows in the output
+have been already filled. If set to 0, the new generated row will be
+written over the top row just after the bottom row is filled.
+Defaults to 1.
+
+
+start_full, full
+If set to 1, completely fill the output with generated rows before
+outputting the first frame.
+This is the default behavior, for disabling set the value to 0.
+
+
+stitch
+If set to 1, stitch the left and right row edges together.
+This is the default behavior, for disabling set the value to 0.
+
+
+
+
+
29.2.1 Examples# TOC
+
+
+ Read the initial state from pattern , and specify an output of
+size 200x400.
+
+
cellauto=f=pattern:s=200x400
+
+
+ Generate a random initial row with a width of 200 cells, with a fill
+ratio of 2/3:
+
+
cellauto=ratio=2/3:s=200x200
+
+
+ Create a pattern generated by rule 18 starting by a single alive cell
+centered on an initial row with width 100:
+
+
cellauto=p=@:s=100x400:full=0:rule=18
+
+
+ Specify a more elaborated initial pattern:
+
+
cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
+
+
+
+
+
+
29.3 mandelbrot# TOC
+
+
Generate a Mandelbrot set fractal, and progressively zoom towards the
+point specified with start_x and start_y .
+
+
This source accepts the following options:
+
+
+end_pts
+Set the terminal pts value. Default value is 400.
+
+
+end_scale
+Set the terminal scale value.
+Must be a floating point value. Default value is 0.3.
+
+
+inner
+Set the inner coloring mode, that is the algorithm used to draw the
+Mandelbrot fractal internal region.
+
+It shall assume one of the following values:
+
+black
+Set black mode.
+
+convergence
+Show time until convergence.
+
+mincol
+Set color based on point closest to the origin of the iterations.
+
+period
+Set period mode.
+
+
+
+Default value is mincol .
+
+
+bailout
+Set the bailout value. Default value is 10.0.
+
+
+maxiter
+Set the maximum of iterations performed by the rendering
+algorithm. Default value is 7189.
+
+
+outer
+Set outer coloring mode.
+It shall assume one of following values:
+
+iteration_count
+Set iteration cound mode.
+
+normalized_iteration_count
+set normalized iteration count mode.
+
+
+Default value is normalized_iteration_count .
+
+
+rate, r
+Set frame rate, expressed as number of frames per second. Default
+value is "25".
+
+
+size, s
+Set frame size. For the syntax of this option, check the "Video
+size" section in the ffmpeg-utils manual. Default value is "640x480".
+
+
+start_scale
+Set the initial scale value. Default value is 3.0.
+
+
+start_x
+Set the initial x position. Must be a floating point value between
+-100 and 100. Default value is -0.743643887037158704752191506114774.
+
+
+start_y
+Set the initial y position. Must be a floating point value between
+-100 and 100. Default value is -0.131825904205311970493132056385139.
+
+
+
+
+
29.4 mptestsrc# TOC
+
+
Generate various test patterns, as generated by the MPlayer test filter.
+
+
The size of the generated video is fixed, and is 256x256.
+This source is useful in particular for testing encoding features.
+
+
This source accepts the following options:
+
+
+rate, r
+Specify the frame rate of the sourced video, as the number of frames
+generated per second. It has to be a string in the format
+frame_rate_num /frame_rate_den , an integer number, a floating point
+number or a valid video frame rate abbreviation. The default value is
+"25".
+
+
+duration, d
+Set the duration of the sourced video. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+
+If not specified, or the expressed duration is negative, the video is
+supposed to be generated forever.
+
+
+test, t
+
+Set the number or the name of the test to perform. Supported tests are:
+
+dc_luma
+dc_chroma
+freq_luma
+freq_chroma
+amp_luma
+amp_chroma
+cbp
+mv
+ring1
+ring2
+all
+
+
+Default value is "all", which will cycle through the list of all tests.
+
+
+
+
Some examples:
+
+
+
will generate a "dc_luma" test pattern.
+
+
+
29.5 frei0r_src# TOC
+
+
Provide a frei0r source.
+
+
To enable compilation of this filter you need to install the frei0r
+header and configure FFmpeg with --enable-frei0r
.
+
+
This source accepts the following parameters:
+
+
+size
+The size of the video to generate. For the syntax of this option, check the
+"Video size" section in the ffmpeg-utils manual.
+
+
+framerate
+The framerate of the generated video. It may be a string of the form
+num /den or a frame rate abbreviation.
+
+
+filter_name
+The name to the frei0r source to load. For more information regarding frei0r and
+how to set the parameters, read the frei0r section in the video filters
+documentation.
+
+
+filter_params
+A ’|’-separated list of parameters to pass to the frei0r source.
+
+
+
+
+
For example, to generate a frei0r partik0l source with size 200x200
+and frame rate 10 which is overlaid on the overlay filter main input:
+
+
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
+
+
+
+
29.6 life# TOC
+
+
Generate a life pattern.
+
+
This source is based on a generalization of John Conway’s life game.
+
+
The sourced input represents a life grid, each pixel represents a cell
+which can be in one of two possible states, alive or dead. Every cell
+interacts with its eight neighbours, which are the cells that are
+horizontally, vertically, or diagonally adjacent.
+
+
At each interaction the grid evolves according to the adopted rule,
+which specifies the number of neighbor alive cells which will make a
+cell stay alive or born. The rule option allows one to specify
+the rule to adopt.
+
+
This source accepts the following options:
+
+
+filename, f
+Set the file from which to read the initial grid state. In the file,
+each non-whitespace character is considered an alive cell, and newline
+is used to delimit the end of each row.
+
+If this option is not specified, the initial grid is generated
+randomly.
+
+
+rate, r
+Set the video rate, that is the number of frames generated per second.
+Default is 25.
+
+
+random_fill_ratio, ratio
+Set the random fill ratio for the initial random grid. It is a
+floating point number value ranging from 0 to 1, defaults to 1/PHI.
+It is ignored when a file is specified.
+
+
+random_seed, seed
+Set the seed for filling the initial random grid, must be an integer
+included between 0 and UINT32_MAX. If not specified, or if explicitly
+set to -1, the filter will try to use a good random seed on a best
+effort basis.
+
+
+rule
+Set the life rule.
+
+A rule can be specified with a code of the kind "SNS /BNB ",
+where NS and NB are sequences of numbers in the range 0-8,
+NS specifies the number of alive neighbor cells which make a
+live cell stay alive, and NB the number of alive neighbor cells
+which make a dead cell to become alive (i.e. to "born").
+"s" and "b" can be used in place of "S" and "B", respectively.
+
+Alternatively a rule can be specified by an 18-bits integer. The 9
+high order bits are used to encode the next cell state if it is alive
+for each number of neighbor alive cells, the low order bits specify
+the rule for "borning" new cells. Higher order bits encode for an
+higher number of neighbor cells.
+For example the number 6153 = (12<<9)+9
specifies a stay alive
+rule of 12 and a born rule of 9, which corresponds to "S23/B03".
+
+Default value is "S23/B3", which is the original Conway’s game of life
+rule, and will keep a cell alive if it has 2 or 3 neighbor alive
+cells, and will born a new cell if there are three alive cells around
+a dead cell.
+
+
+size, s
+Set the size of the output video. For the syntax of this option, check the
+"Video size" section in the ffmpeg-utils manual.
+
+If filename is specified, the size is set by default to the
+same size of the input file. If size is set, it must contain
+the size specified in the input file, and the initial grid defined in
+that file is centered in the larger resulting area.
+
+If a filename is not specified, the size value defaults to "320x240"
+(used for a randomly generated initial grid).
+
+
+stitch
+If set to 1, stitch the left and right grid edges together, and the
+top and bottom edges also. Defaults to 1.
+
+
+mold
+Set cell mold speed. If set, a dead cell will go from death_color to
+mold_color with a step of mold . mold can have a
+value from 0 to 255.
+
+
+life_color
+Set the color of living (or new born) cells.
+
+
+death_color
+Set the color of dead cells. If mold is set, this is the first color
+used to represent a dead cell.
+
+
+mold_color
+Set mold color, for definitely dead and moldy cells.
+
+For the syntax of these 3 color options, check the "Color" section in the
+ffmpeg-utils manual.
+
+
+
+
+
29.6.1 Examples# TOC
+
+
+ Read a grid from pattern , and center it on a grid of size
+300x300 pixels:
+
+
life=f=pattern:s=300x300
+
+
+ Generate a random grid of size 200x200, with a fill ratio of 2/3:
+
+
life=ratio=2/3:s=200x200
+
+
+ Specify a custom rule for evolving a randomly generated grid:
+
+
+ Full example with slow death effect (mold) using ffplay
:
+
+
ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
+
+
+
+
+
29.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc# TOC
+
+
The color
source provides an uniformly colored input.
+
+
The haldclutsrc
source provides an identity Hald CLUT. See also
+haldclut filter.
+
+
The nullsrc
source returns unprocessed video frames. It is
+mainly useful to be employed in analysis / debugging tools, or as the
+source for filters which ignore the input data.
+
+
The rgbtestsrc
source generates an RGB test pattern useful for
+detecting RGB vs BGR issues. You should see a red, green and blue
+stripe from top to bottom.
+
+
The smptebars
source generates a color bars pattern, based on
+the SMPTE Engineering Guideline EG 1-1990.
+
+
The smptehdbars
source generates a color bars pattern, based on
+the SMPTE RP 219-2002.
+
+
The testsrc
source generates a test video pattern, showing a
+color pattern, a scrolling gradient and a timestamp. This is mainly
+intended for testing purposes.
+
+
The sources accept the following parameters:
+
+
+color, c
+Specify the color of the source, only available in the color
+source. For the syntax of this option, check the "Color" section in the
+ffmpeg-utils manual.
+
+
+level
+Specify the level of the Hald CLUT, only available in the haldclutsrc
+source. A level of N
generates a picture of N*N*N
by N*N*N
+pixels to be used as identity matrix for 3D lookup tables. Each component is
+coded on a 1/(N*N)
scale.
+
+
+size, s
+Specify the size of the sourced video. For the syntax of this option, check the
+"Video size" section in the ffmpeg-utils manual. The default value is
+"320x240".
+
+This option is not available with the haldclutsrc
filter.
+
+
+rate, r
+Specify the frame rate of the sourced video, as the number of frames
+generated per second. It has to be a string in the format
+frame_rate_num /frame_rate_den , an integer number, a floating point
+number or a valid video frame rate abbreviation. The default value is
+"25".
+
+
+sar
+Set the sample aspect ratio of the sourced video.
+
+
+duration, d
+Set the duration of the sourced video. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+
+If not specified, or the expressed duration is negative, the video is
+supposed to be generated forever.
+
+
+decimals, n
+Set the number of decimals to show in the timestamp, only available in the
+testsrc
source.
+
+The displayed timestamp value will correspond to the original
+timestamp value multiplied by the power of 10 of the specified
+value. Default value is 0.
+
+
+
+
For example the following:
+
+
testsrc=duration=5.3:size=qcif:rate=10
+
+
+
will generate a video with a duration of 5.3 seconds, with size
+176x144 and a frame rate of 10 frames per second.
+
+
The following graph description will generate a red source
+with an opacity of 0.2, with size "qcif" and a frame rate of 10
+frames per second.
+
+
color=c=red@0.2:s=qcif:r=10
+
+
+
If the input content is to be ignored, nullsrc
can be used. The
+following command generates noise in the luminance plane by employing
+the geq
filter:
+
+
nullsrc=s=256x256, geq=random(1)*255:128:128
+
+
+
+
29.7.1 Commands# TOC
+
+
The color
source supports the following commands:
+
+
+c, color
+Set the color of the created image. Accepts the same syntax of the
+corresponding color option.
+
+
+
+
+
+
30 Video Sinks# TOC
+
+
Below is a description of the currently available video sinks.
+
+
+
30.1 buffersink# TOC
+
+
Buffer video frames, and make them available to the end of the filter
+graph.
+
+
This sink is mainly intended for programmatic use, in particular
+through the interface defined in libavfilter/buffersink.h
+or the options system.
+
+
It accepts a pointer to an AVBufferSinkContext structure, which
+defines the incoming buffers’ formats, to be passed as the opaque
+parameter to avfilter_init_filter
for initialization.
+
+
+
30.2 nullsink# TOC
+
+
Null video sink: do absolutely nothing with the input video. It is
+mainly useful as a template and for use in analysis / debugging
+tools.
+
+
+
+
31 Multimedia Filters# TOC
+
+
Below is a description of the currently available multimedia filters.
+
+
+
31.1 avectorscope# TOC
+
+
Convert input audio to a video output, representing the audio vector
+scope.
+
+
The filter is used to measure the difference between channels of stereo
+audio stream. A monoaural signal, consisting of identical left and right
+signal, results in straight vertical line. Any stereo separation is visible
+as a deviation from this line, creating a Lissajous figure.
+If the straight (or deviation from it) but horizontal line appears this
+indicates that the left and right channels are out of phase.
+
+
The filter accepts the following options:
+
+
+mode, m
+Set the vectorscope mode.
+
+Available values are:
+
+‘lissajous ’
+Lissajous rotated by 45 degrees.
+
+
+‘lissajous_xy ’
+Same as above but not rotated.
+
+
+
+Default value is ‘lissajous ’.
+
+
+size, s
+Set the video size for the output. For the syntax of this option, check the "Video size"
+section in the ffmpeg-utils manual. Default value is 400x400
.
+
+
+rate, r
+Set the output frame rate. Default value is 25
.
+
+
+rc
+gc
+bc
+Specify the red, green and blue contrast. Default values are 40
, 160
and 80
.
+Allowed range is [0, 255]
.
+
+
+rf
+gf
+bf
+Specify the red, green and blue fade. Default values are 15
, 10
and 5
.
+Allowed range is [0, 255]
.
+
+
+zoom
+Set the zoom factor. Default value is 1
. Allowed range is [1, 10]
.
+
+
+
+
+
31.1.1 Examples# TOC
+
+
+ Complete example using ffplay
:
+
+
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
+ [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
+
+
+
+
+
31.2 concat# TOC
+
+
Concatenate audio and video streams, joining them together one after the
+other.
+
+
The filter works on segments of synchronized video and audio streams. All
+segments must have the same number of streams of each type, and that will
+also be the number of streams at output.
+
+
The filter accepts the following options:
+
+
+n
+Set the number of segments. Default is 2.
+
+
+v
+Set the number of output video streams, that is also the number of video
+streams in each segment. Default is 1.
+
+
+a
+Set the number of output audio streams, that is also the number of audio
+streams in each segment. Default is 0.
+
+
+unsafe
+Activate unsafe mode: do not fail if segments have a different format.
+
+
+
+
+
The filter has v +a outputs: first v video outputs, then
+a audio outputs.
+
+
There are n x(v +a ) inputs: first the inputs for the first
+segment, in the same order as the outputs, then the inputs for the second
+segment, etc.
+
+
Related streams do not always have exactly the same duration, for various
+reasons including codec frame size or sloppy authoring. For that reason,
+related synchronized streams (e.g. a video and its audio track) should be
+concatenated at once. The concat filter will use the duration of the longest
+stream in each segment (except the last one), and if necessary pad shorter
+audio streams with silence.
+
+
For this filter to work correctly, all segments must start at timestamp 0.
+
+
All corresponding streams must have the same parameters in all segments; the
+filtering system will automatically select a common pixel format for video
+streams, and a common sample format, sample rate and channel layout for
+audio streams, but other settings, such as resolution, must be converted
+explicitly by the user.
+
+
Different frame rates are acceptable but will result in variable frame rate
+at output; be sure to configure the output file to handle it.
+
+
+
31.2.1 Examples# TOC
+
+
+
+
+
31.3 ebur128# TOC
+
+
EBU R128 scanner filter. This filter takes an audio stream as input and outputs
+it unchanged. By default, it logs a message at a frequency of 10Hz with the
+Momentary loudness (identified by M
), Short-term loudness (S
),
+Integrated loudness (I
) and Loudness Range (LRA
).
+
+
The filter also has a video output (see the video option) with a real
+time graph to observe the loudness evolution. The graphic contains the logged
+message mentioned above, so it is not printed anymore when this option is set,
+unless the verbose logging is set. The main graphing area contains the
+short-term loudness (3 seconds of analysis), and the gauge on the right is for
+the momentary loudness (400 milliseconds).
+
+
More information about the Loudness Recommendation EBU R128 on
+http://tech.ebu.ch/loudness .
+
+
The filter accepts the following options:
+
+
+video
+Activate the video output. The audio stream is passed unchanged whether this
+option is set or no. The video stream will be the first output stream if
+activated. Default is 0
.
+
+
+size
+Set the video size. This option is for video only. For the syntax of this
+option, check the "Video size" section in the ffmpeg-utils manual. Default
+and minimum resolution is 640x480
.
+
+
+meter
+Set the EBU scale meter. Default is 9
. Common values are 9
and
+18
, respectively for EBU scale meter +9 and EBU scale meter +18. Any
+other integer value between this range is allowed.
+
+
+metadata
+Set metadata injection. If set to 1
, the audio input will be segmented
+into 100ms output frames, each of them containing various loudness information
+in metadata. All the metadata keys are prefixed with lavfi.r128.
.
+
+Default is 0
.
+
+
+framelog
+Force the frame logging level.
+
+Available values are:
+
+‘info ’
+information logging level
+
+‘verbose ’
+verbose logging level
+
+
+
+By default, the logging level is set to info . If the video or
+the metadata options are set, it switches to verbose .
+
+
+peak
+Set peak mode(s).
+
+Available modes can be cumulated (the option is a flag
type). Possible
+values are:
+
+‘none ’
+Disable any peak mode (default).
+
+‘sample ’
+Enable sample-peak mode.
+
+Simple peak mode looking for the higher sample value. It logs a message
+for sample-peak (identified by SPK
).
+
+‘true ’
+Enable true-peak mode.
+
+If enabled, the peak lookup is done on an over-sampled version of the input
+stream for better peak accuracy. It logs a message for true-peak.
+(identified by TPK
) and true-peak per frame (identified by FTPK
).
+This mode requires a build with libswresample
.
+
+
+
+
+
+
+
+
31.3.1 Examples# TOC
+
+
+ Real-time graph using ffplay
, with a EBU scale meter +18:
+
+
ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
+
+
+ Run an analysis with ffmpeg
:
+
+
ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
+
+
+
+
+
31.4 interleave, ainterleave# TOC
+
+
Temporally interleave frames from several inputs.
+
+
interleave
works with video inputs, ainterleave
with audio.
+
+
These filters read frames from several inputs and send the oldest
+queued frame to the output.
+
+
Input streams must have a well defined, monotonically increasing frame
+timestamp values.
+
+
In order to submit one frame to output, these filters need to enqueue
+at least one frame for each input, so they cannot work in case one
+input is not yet terminated and will not receive incoming frames.
+
+
For example consider the case when one input is a select
filter
+which always drop input frames. The interleave
filter will keep
+reading from that input, but it will never be able to send new frames
+to output until the input will send an end-of-stream signal.
+
+
Also, depending on inputs synchronization, the filters will drop
+frames in case one input receives more frames than the other ones, and
+the queue is already filled.
+
+
These filters accept the following options:
+
+
+nb_inputs, n
+Set the number of different inputs, it is 2 by default.
+
+
+
+
+
31.4.1 Examples# TOC
+
+
+ Interleave frames belonging to different streams using ffmpeg
:
+
+
ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
+
+
+ Add flickering blur effect:
+
+
select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
+
+
+
+
+
31.5 perms, aperms# TOC
+
+
Set read/write permissions for the output frames.
+
+
These filters are mainly aimed at developers to test direct path in the
+following filter in the filtergraph.
+
+
The filters accept the following options:
+
+
+mode
+Select the permissions mode.
+
+It accepts the following values:
+
+‘none ’
+Do nothing. This is the default.
+
+‘ro ’
+Set all the output frames read-only.
+
+‘rw ’
+Set all the output frames directly writable.
+
+‘toggle ’
+Make the frame read-only if writable, and writable if read-only.
+
+‘random ’
+Set each output frame read-only or writable randomly.
+
+
+
+
+seed
+Set the seed for the random mode, must be an integer included between
+0
and UINT32_MAX
. If not specified, or if explicitly set to
+-1
, the filter will try to use a good random seed on a best effort
+basis.
+
+
+
+
Note: in case of auto-inserted filter between the permission filter and the
+following one, the permission might not be received as expected in that
+following filter. Inserting a format or aformat filter before the
+perms/aperms filter can avoid this problem.
+
+
+
31.6 select, aselect# TOC
+
+
Select frames to pass in output.
+
+
This filter accepts the following options:
+
+
+expr, e
+Set expression, which is evaluated for each input frame.
+
+If the expression is evaluated to zero, the frame is discarded.
+
+If the evaluation result is negative or NaN, the frame is sent to the
+first output; otherwise it is sent to the output with index
+ceil(val)-1
, assuming that the input index starts from 0.
+
+For example a value of 1.2
corresponds to the output with index
+ceil(1.2)-1 = 2-1 = 1
, that is the second output.
+
+
+outputs, n
+Set the number of outputs. The output to which to send the selected
+frame is based on the result of the evaluation. Default value is 1.
+
+
+
+
The expression can contain the following constants:
+
+
+n
+The (sequential) number of the filtered frame, starting from 0.
+
+
+selected_n
+The (sequential) number of the selected frame, starting from 0.
+
+
+prev_selected_n
+The sequential number of the last selected frame. It’s NAN if undefined.
+
+
+TB
+The timebase of the input timestamps.
+
+
+pts
+The PTS (Presentation TimeStamp) of the filtered video frame,
+expressed in TB units. It’s NAN if undefined.
+
+
+t
+The PTS of the filtered video frame,
+expressed in seconds. It’s NAN if undefined.
+
+
+prev_pts
+The PTS of the previously filtered video frame. It’s NAN if undefined.
+
+
+prev_selected_pts
+The PTS of the last previously filtered video frame. It’s NAN if undefined.
+
+
+prev_selected_t
+The PTS of the last previously selected video frame. It’s NAN if undefined.
+
+
+start_pts
+The PTS of the first video frame in the video. It’s NAN if undefined.
+
+
+start_t
+The time of the first video frame in the video. It’s NAN if undefined.
+
+
+pict_type (video only)
+The type of the filtered frame. It can assume one of the following
+values:
+
+I
+P
+B
+S
+SI
+SP
+BI
+
+
+
+interlace_type (video only)
+The frame interlace type. It can assume one of the following values:
+
+PROGRESSIVE
+The frame is progressive (not interlaced).
+
+TOPFIRST
+The frame is top-field-first.
+
+BOTTOMFIRST
+The frame is bottom-field-first.
+
+
+
+
+consumed_sample_n (audio only)
+the number of selected samples before the current frame
+
+
+samples_n (audio only)
+the number of samples in the current frame
+
+
+sample_rate (audio only)
+the input sample rate
+
+
+key
+This is 1 if the filtered frame is a key-frame, 0 otherwise.
+
+
+pos
+the position in the file of the filtered frame, -1 if the information
+is not available (e.g. for synthetic video)
+
+
+scene (video only)
+value between 0 and 1 to indicate a new scene; a low value reflects a low
+probability for the current frame to introduce a new scene, while a higher
+value means the current frame is more likely to be one (see the example below)
+
+
+
+
+
The default value of the select expression is "1".
+
+
+
31.6.1 Examples# TOC
+
+
+
+
+
31.7 sendcmd, asendcmd# TOC
+
+
Send commands to filters in the filtergraph.
+
+
These filters read commands to be sent to other filters in the
+filtergraph.
+
+
sendcmd
must be inserted between two video filters,
+asendcmd
must be inserted between two audio filters, but apart
+from that they act the same way.
+
+
The specification of commands can be provided in the filter arguments
+with the commands option, or in a file specified by the
+filename option.
+
+
These filters accept the following options:
+
+commands, c
+Set the commands to be read and sent to the other filters.
+
+filename, f
+Set the filename of the commands to be read and sent to the other
+filters.
+
+
+
+
+
31.7.1 Commands syntax# TOC
+
+
A commands description consists of a sequence of interval
+specifications, comprising a list of commands to be executed when a
+particular event related to that interval occurs. The occurring event
+is typically the current frame time entering or leaving a given time
+interval.
+
+
An interval is specified by the following syntax:
+
+
+
The time interval is specified by the START and END times.
+END is optional and defaults to the maximum time.
+
+
The current frame time is considered within the specified interval if
+it is included in the interval [START , END ), that is when
+the time is greater or equal to START and is lesser than
+END .
+
+
COMMANDS consists of a sequence of one or more command
+specifications, separated by ",", relating to that interval. The
+syntax of a command specification is given by:
+
+
[FLAGS ] TARGET COMMAND ARG
+
+
+
FLAGS is optional and specifies the type of events relating to
+the time interval which enable sending the specified command, and must
+be a non-null sequence of identifier flags separated by "+" or "|" and
+enclosed between "[" and "]".
+
+
The following flags are recognized:
+
+enter
+The command is sent when the current frame timestamp enters the
+specified interval. In other words, the command is sent when the
+previous frame timestamp was not in the given interval, and the
+current is.
+
+
+leave
+The command is sent when the current frame timestamp leaves the
+specified interval. In other words, the command is sent when the
+previous frame timestamp was in the given interval, and the
+current is not.
+
+
+
+
If FLAGS is not specified, a default value of [enter]
is
+assumed.
+
+
TARGET specifies the target of the command, usually the name of
+the filter class or a specific filter instance name.
+
+
COMMAND specifies the name of the command for the target filter.
+
+
ARG is optional and specifies the optional list of argument for
+the given COMMAND .
+
+
Between one interval specification and another, whitespaces, or
+sequences of characters starting with #
until the end of line,
+are ignored and can be used to annotate comments.
+
+
A simplified BNF description of the commands specification syntax
+follows:
+
+
COMMAND_FLAG ::= "enter" | "leave"
+COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG ]
+COMMAND ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG ]
+COMMANDS ::= COMMAND [,COMMANDS ]
+INTERVAL ::= START [-END ] COMMANDS
+INTERVALS ::= INTERVAL [;INTERVALS ]
+
+
+
+
31.7.2 Examples# TOC
+
+
+
+
+
31.8 setpts, asetpts# TOC
+
+
Change the PTS (presentation timestamp) of the input frames.
+
+
setpts
works on video frames, asetpts
on audio frames.
+
+
This filter accepts the following options:
+
+
+expr
+The expression which is evaluated for each frame to construct its timestamp.
+
+
+
+
+
The expression is evaluated through the eval API and can contain the following
+constants:
+
+
+FRAME_RATE
+frame rate, only defined for constant frame-rate video
+
+
+PTS
+The presentation timestamp in input
+
+
+N
+The count of the input frame for video or the number of consumed samples,
+not including the current frame for audio, starting from 0.
+
+
+NB_CONSUMED_SAMPLES
+The number of consumed samples, not including the current frame (only
+audio)
+
+
+NB_SAMPLES, S
+The number of samples in the current frame (only audio)
+
+
+SAMPLE_RATE, SR
+The audio sample rate.
+
+
+STARTPTS
+The PTS of the first frame.
+
+
+STARTT
+the time in seconds of the first frame
+
+
+INTERLACED
+State whether the current frame is interlaced.
+
+
+T
+the time in seconds of the current frame
+
+
+POS
+original position in the file of the frame, or undefined if undefined
+for the current frame
+
+
+PREV_INPTS
+The previous input PTS.
+
+
+PREV_INT
+previous input time in seconds
+
+
+PREV_OUTPTS
+The previous output PTS.
+
+
+PREV_OUTT
+previous output time in seconds
+
+
+RTCTIME
+The wallclock (RTC) time in microseconds. This is deprecated, use time(0)
+instead.
+
+
+RTCSTART
+The wallclock (RTC) time at the start of the movie in microseconds.
+
+
+TB
+The timebase of the input timestamps.
+
+
+
+
+
+
31.8.1 Examples# TOC
+
+
+ Start counting PTS from zero
+
+
+ Apply fast motion effect:
+
+
+ Apply slow motion effect:
+
+
+ Set fixed rate of 25 frames per second:
+
+
+ Set fixed rate 25 fps with some jitter:
+
+
setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
+
+
+ Apply an offset of 10 seconds to the input PTS:
+
+
+ Generate timestamps from a "live source" and rebase onto the current timebase:
+
+
setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
+
+
+ Generate timestamps by counting samples:
+
+
+
+
+
+
31.9 settb, asettb# TOC
+
+
Set the timebase to use for the output frames timestamps.
+It is mainly useful for testing timebase configuration.
+
+
It accepts the following parameters:
+
+
+expr, tb
+The expression which is evaluated into the output timebase.
+
+
+
+
+
The value for tb is an arithmetic expression representing a
+rational. The expression can contain the constants "AVTB" (the default
+timebase), "intb" (the input timebase) and "sr" (the sample rate,
+audio only). Default value is "intb".
+
+
+
31.9.1 Examples# TOC
+
+
+ Set the timebase to 1/25:
+
+
+ Set the timebase to 1/10:
+
+
+ Set the timebase to 1001/1000:
+
+
+ Set the timebase to 2*intb:
+
+
+ Set the default timebase value:
+
+
+
+
+
31.10 showcqt# TOC
+
Convert input audio to a video output representing
+frequency spectrum logarithmically (using constant Q transform with
+Brown-Puckette algorithm), with musical tone scale, from E0 to D#10 (10 octaves).
+
+
The filter accepts the following options:
+
+
+volume
+Specify transform volume (multiplier) expression. The expression can contain
+variables:
+
+frequency, freq, f
+the frequency where transform is evaluated
+
+timeclamp, tc
+value of timeclamp option
+
+
+and functions:
+
+a_weighting(f)
+A-weighting of equal loudness
+
+b_weighting(f)
+B-weighting of equal loudness
+
+c_weighting(f)
+C-weighting of equal loudness
+
+
+Default value is 16
.
+
+
+tlength
+Specify transform length expression. The expression can contain variables:
+
+frequency, freq, f
+the frequency where transform is evaluated
+
+timeclamp, tc
+value of timeclamp option
+
+
+Default value is 384/f*tc/(384/f+tc)
.
+
+
+timeclamp
+Specify the transform timeclamp. At low frequency, there is trade-off between
+accuracy in time domain and frequency domain. If timeclamp is lower,
+event in time domain is represented more accurately (such as fast bass drum),
+otherwise event in frequency domain is represented more accurately
+(such as bass guitar). Acceptable value is [0.1, 1.0]. Default value is 0.17
.
+
+
+coeffclamp
+Specify the transform coeffclamp. If coeffclamp is lower, transform is
+more accurate, otherwise transform is faster. Acceptable value is [0.1, 10.0].
+Default value is 1.0
.
+
+
+gamma
+Specify gamma. Lower gamma makes the spectrum more contrast, higher gamma
+makes the spectrum having more range. Acceptable value is [1.0, 7.0].
+Default value is 3.0
.
+
+
+fontfile
+Specify font file for use with freetype. If not specified, use embedded font.
+
+
+fontcolor
+Specify font color expression. This is arithmetic expression that should return
+integer value 0xRRGGBB. The expression can contain variables:
+
+frequency, freq, f
+the frequency where transform is evaluated
+
+timeclamp, tc
+value of timeclamp option
+
+
+and functions:
+
+midi(f)
+midi number of frequency f, some midi numbers: E0(16), C1(24), C2(36), A4(69)
+
+r(x), g(x), b(x)
+red, green, and blue value of intensity x
+
+
+Default value is st(0, (midi(f)-59.5)/12);
+st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));
+r(1-ld(1)) + b(ld(1))
+
+
+fullhd
+If set to 1 (the default), the video size is 1920x1080 (full HD),
+if set to 0, the video size is 960x540. Use this option to make CPU usage lower.
+
+
+fps
+Specify video fps. Default value is 25
.
+
+
+count
+Specify number of transform per frame, so there are fps*count transforms
+per second. Note that audio data rate must be divisible by fps*count.
+Default value is 6
.
+
+
+
+
+
+
31.10.1 Examples# TOC
+
+
+ Playing audio while showing the spectrum:
+
+
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt [out0]'
+
+
+ Same as above, but with frame rate 30 fps:
+
+
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fps=30:count=5 [out0]'
+
+
+ Playing at 960x540 and lower CPU usage:
+
+
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fullhd=0:count=3 [out0]'
+
+
+ A1 and its harmonics: A1, A2, (near)E3, A3:
+
+
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
+ asplit[a][out1]; [a] showcqt [out0]'
+
+
+ Same as above, but with more accuracy in frequency domain (and slower):
+
+
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
+ asplit[a][out1]; [a] showcqt=timeclamp=0.5 [out0]'
+
+
+ B-weighting of equal loudness
+
+
volume=16*b_weighting(f)
+
+
+ Lower Q factor
+
+
tlength=100/f*tc/(100/f+tc)
+
+
+ Custom fontcolor, C-note is colored green, others are colored blue
+
+
fontcolor='if(mod(floor(midi(f)+0.5),12), 0x0000FF, g(1))'
+
+
+
+
+
+
31.11 showspectrum# TOC
+
+
Convert input audio to a video output, representing the audio frequency
+spectrum.
+
+
The filter accepts the following options:
+
+
+size, s
+Specify the video size for the output. For the syntax of this option, check
+the "Video size" section in the ffmpeg-utils manual. Default value is
+640x512
.
+
+
+slide
+Specify how the spectrum should slide along the window.
+
+It accepts the following values:
+
+‘replace ’
+the samples start again on the left when they reach the right
+
+‘scroll ’
+the samples scroll from right to left
+
+‘fullframe ’
+frames are only produced when the samples reach the right
+
+
+
+Default value is replace
.
+
+
+mode
+Specify display mode.
+
+It accepts the following values:
+
+‘combined ’
+all channels are displayed in the same row
+
+‘separate ’
+all channels are displayed in separate rows
+
+
+
+Default value is ‘combined ’.
+
+
+color
+Specify display color mode.
+
+It accepts the following values:
+
+‘channel ’
+each channel is displayed in a separate color
+
+‘intensity ’
+each channel is is displayed using the same color scheme
+
+
+
+Default value is ‘channel ’.
+
+
+scale
+Specify scale used for calculating intensity color values.
+
+It accepts the following values:
+
+‘lin ’
+linear
+
+‘sqrt ’
+square root, default
+
+‘cbrt ’
+cubic root
+
+‘log ’
+logarithmic
+
+
+
+Default value is ‘sqrt ’.
+
+
+saturation
+Set saturation modifier for displayed colors. Negative values provide
+alternative color scheme. 0
is no saturation at all.
+Saturation must be in [-10.0, 10.0] range.
+Default value is 1
.
+
+
+win_func
+Set window function.
+
+It accepts the following values:
+
+‘none ’
+No samples pre-processing (do not expect this to be faster)
+
+‘hann ’
+Hann window
+
+‘hamming ’
+Hamming window
+
+‘blackman ’
+Blackman window
+
+
+
+Default value is hann
.
+
+
+
+
The usage is very similar to the showwaves filter; see the examples in that
+section.
+
+
+
31.11.1 Examples# TOC
+
+
+ Large window with logarithmic color scaling:
+
+
showspectrum=s=1280x480:scale=log
+
+
+ Complete example for a colored and sliding spectrum per channel using ffplay
:
+
+
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
+ [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
+
+
+
+
+
31.12 showwaves# TOC
+
+
Convert input audio to a video output, representing the samples waves.
+
+
The filter accepts the following options:
+
+
+size, s
+Specify the video size for the output. For the syntax of this option, check
+the "Video size" section in the ffmpeg-utils manual. Default value
+is "600x240".
+
+
+mode
+Set display mode.
+
+Available values are:
+
+‘point ’
+Draw a point for each sample.
+
+
+‘line ’
+Draw a vertical line for each sample.
+
+
+‘p2p ’
+Draw a point for each sample and a line between them.
+
+
+‘cline ’
+Draw a centered vertical line for each sample.
+
+
+
+Default value is point
.
+
+
+n
+Set the number of samples which are printed on the same column. A
+larger value will decrease the frame rate. Must be a positive
+integer. This option can be set only if the value for rate
+is not explicitly specified.
+
+
+rate, r
+Set the (approximate) output frame rate. This is done by setting the
+option n . Default value is "25".
+
+
+split_channels
+Set if channels should be drawn separately or overlap. Default value is 0.
+
+
+
+
+
+
31.12.1 Examples# TOC
+
+
+ Output the input file audio and the corresponding video representation
+at the same time:
+
+
amovie=a.mp3,asplit[out0],showwaves[out1]
+
+
+ Create a synthetic signal and show it with showwaves, forcing a
+frame rate of 30 frames per second:
+
+
aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
+
+
+
+
+
31.13 split, asplit# TOC
+
+
Split input into several identical outputs.
+
+
asplit
works with audio input, split
with video.
+
+
The filter accepts a single parameter which specifies the number of outputs. If
+unspecified, it defaults to 2.
+
+
+
31.13.1 Examples# TOC
+
+
+ Create two separate outputs from the same input:
+
+
[in] split [out0][out1]
+
+
+ To create 3 or more outputs, you need to specify the number of
+outputs, like in:
+
+
[in] asplit=3 [out0][out1][out2]
+
+
+ Create two separate outputs from the same input, one cropped and
+one padded:
+
+
[in] split [splitout1][splitout2];
+[splitout1] crop=100:100:0:0 [cropout];
+[splitout2] pad=200:200:100:100 [padout];
+
+
+ Create 5 copies of the input audio with ffmpeg
:
+
+
ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
+
+
+
+
+
31.14 zmq, azmq# TOC
+
+
Receive commands sent through a libzmq client, and forward them to
+filters in the filtergraph.
+
+
zmq
and azmq
work as a pass-through filters. zmq
+must be inserted between two video filters, azmq
between two
+audio filters.
+
+
To enable these filters you need to install the libzmq library and
+headers and configure FFmpeg with --enable-libzmq
.
+
+
For more information about libzmq see:
+http://www.zeromq.org/
+
+
The zmq
and azmq
filters work as a libzmq server, which
+receives messages sent through a network interface defined by the
+bind_address option.
+
+
The received message must be in the form:
+
+
+
TARGET specifies the target of the command, usually the name of
+the filter class or a specific filter instance name.
+
+
COMMAND specifies the name of the command for the target filter.
+
+
ARG is optional and specifies the optional argument list for the
+given COMMAND .
+
+
Upon reception, the message is processed and the corresponding command
+is injected into the filtergraph. Depending on the result, the filter
+will send a reply to the client, adopting the format:
+
+
ERROR_CODE ERROR_REASON
+MESSAGE
+
+
+
MESSAGE is optional.
+
+
+
31.14.1 Examples# TOC
+
+
Look at tools/zmqsend for an example of a zmq client which can
+be used to send commands processed by these filters.
+
+
Consider the following filtergraph generated by ffplay
+
+
ffplay -dumpgraph 1 -f lavfi "
+color=s=100x100:c=red [l];
+color=s=100x100:c=blue [r];
+nullsrc=s=200x100, zmq [bg];
+[bg][l] overlay [bg+l];
+[bg+l][r] overlay=x=100 "
+
+
+
To change the color of the left side of the video, the following
+command can be used:
+
+
echo Parsed_color_0 c yellow | tools/zmqsend
+
+
+
To change the right side:
+
+
echo Parsed_color_1 c pink | tools/zmqsend
+
+
+
+
+
32 Multimedia Sources# TOC
+
+
Below is a description of the currently available multimedia sources.
+
+
+
32.1 amovie# TOC
+
+
This is the same as movie source, except it selects an audio
+stream by default.
+
+
+
32.2 movie# TOC
+
+
Read audio and/or video stream(s) from a movie container.
+
+
It accepts the following parameters:
+
+
+filename
+The name of the resource to read (not necessarily a file; it can also be a
+device or a stream accessed through some protocol).
+
+
+format_name, f
+Specifies the format assumed for the movie to read, and can be either
+the name of a container or an input device. If not specified, the
+format is guessed from movie_name or by probing.
+
+
+seek_point, sp
+Specifies the seek point in seconds. The frames will be output
+starting from this seek point. The parameter is evaluated with
+av_strtod
, so the numerical value may be suffixed by an IS
+postfix. The default value is "0".
+
+
+streams, s
+Specifies the streams to read. Several streams can be specified,
+separated by "+". The source will then have as many outputs, in the
+same order. The syntax is explained in the “Stream specifiers”
+section in the ffmpeg manual. Two special names, "dv" and "da" specify
+respectively the default (best suited) video and audio stream. Default
+is "dv", or "da" if the filter is called as "amovie".
+
+
+stream_index, si
+Specifies the index of the video stream to read. If the value is -1,
+the most suitable video stream will be automatically selected. The default
+value is "-1". Deprecated. If the filter is called "amovie", it will select
+audio instead of video.
+
+
+loop
+Specifies how many times to read the stream in sequence.
+If the value is less than 1, the stream will be read again and again.
+Default value is "1".
+
+Note that when the movie is looped the source timestamps are not
+changed, so it will generate non monotonically increasing timestamps.
+
+
+
+
It allows overlaying a second video on top of the main input of
+a filtergraph, as shown in this graph:
+
+
input -----------> deltapts0 --> overlay --> output
+ ^
+ |
+movie --> scale--> deltapts1 -------+
+
+
+
32.2.1 Examples# TOC
+
+
+ Skip 3.2 seconds from the start of the AVI file in.avi, and overlay it
+on top of the input labelled "in":
+
+
movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
+[in] setpts=PTS-STARTPTS [main];
+[main][over] overlay=16:16 [out]
+
+
+ Read from a video4linux2 device, and overlay it on top of the input
+labelled "in":
+
+
movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
+[in] setpts=PTS-STARTPTS [main];
+[main][over] overlay=16:16 [out]
+
+
+ Read the first video stream and the audio stream with id 0x81 from
+dvd.vob; the video is connected to the pad named "video" and the audio is
+connected to the pad named "audio":
+
+
movie=dvd.vob:s=v:0+#0x81 [video] [audio]
+
+
+
+
+
+
33 See Also# TOC
+
+
ffplay ,
+ffmpeg , ffprobe , ffserver ,
+ffmpeg-utils ,
+ffmpeg-scaler ,
+ffmpeg-resampler ,
+ffmpeg-codecs ,
+ffmpeg-bitstream-filters ,
+ffmpeg-formats ,
+ffmpeg-devices ,
+ffmpeg-protocols ,
+ffmpeg-filters
+
+
+
+
34 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/ffplay.html b/Externals/ffmpeg/dev/doc/ffplay.html
new file mode 100644
index 0000000000..e072758fa8
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/ffplay.html
@@ -0,0 +1,745 @@
+
+
+
+
+
+
+ ffplay Documentation
+
+
+
+
+
+
+
+
+ ffplay Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Synopsis# TOC
+
+
ffplay [options ] [input_file ]
+
+
+
2 Description# TOC
+
+
FFplay is a very simple and portable media player using the FFmpeg
+libraries and the SDL library. It is mostly used as a testbed for the
+various FFmpeg APIs.
+
+
+
3 Options# TOC
+
+
All the numerical options, if not specified otherwise, accept a string
+representing a number as input, which may be followed by one of the SI
+unit prefixes, for example: ’K’, ’M’, or ’G’.
+
+
If ’i’ is appended to the SI unit prefix, the complete prefix will be
+interpreted as a unit prefix for binary multiples, which are based on
+powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
+prefix multiplies the value by 8. This allows using, for example:
+’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
+
+
Options which do not take arguments are boolean options, and set the
+corresponding value to true. They can be set to false by prefixing
+the option name with "no". For example using "-nofoo"
+will set the boolean option with name "foo" to false.
+
+
+
3.1 Stream specifiers# TOC
+
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
+are used to precisely specify which stream(s) a given option belongs to.
+
+
A stream specifier is a string generally appended to the option name and
+separated from it by a colon. E.g. -codec:a:1 ac3
contains the
+a:1
stream specifier, which matches the second audio stream. Therefore, it
+would select the ac3 codec for the second audio stream.
+
+
A stream specifier can match several streams, so that the option is applied to all
+of them. E.g. the stream specifier in -b:a 128k
matches all audio
+streams.
+
+
An empty stream specifier matches all streams. For example, -codec copy
+or -codec: copy
would copy all the streams without reencoding.
+
+
Possible forms of stream specifiers are:
+
+stream_index
+Matches the stream with this index. E.g. -threads:1 4
would set the
+thread count for the second stream to 4.
+
+stream_type [:stream_index ]
+stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
+’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
+stream number stream_index of this type. Otherwise, it matches all
+streams of this type.
+
+p:program_id [:stream_index ]
+If stream_index is given, then it matches the stream with number stream_index
+in the program with the id program_id . Otherwise, it matches all streams in the
+program.
+
+#stream_id or i:stream_id
+Match the stream by stream id (e.g. PID in MPEG-TS container).
+
+m:key [:value ]
+Matches streams with the metadata tag key having the specified value. If
+value is not given, matches streams that contain the given tag with any
+value.
+
+Note that in ffmpeg
, matching by metadata will only work properly for
+input files.
+
+
+
+
+
3.2 Generic options# TOC
+
+
These options are shared amongst the ff* tools.
+
+
+-L
+Show license.
+
+
+-h, -?, -help, --help [arg ]
+Show help. An optional parameter may be specified to print help about a specific
+item. If no argument is specified, only basic (non advanced) tool
+options are shown.
+
+Possible values of arg are:
+
+long
+Print advanced tool options in addition to the basic tool options.
+
+
+full
+Print complete list of options, including shared and private options
+for encoders, decoders, demuxers, muxers, filters, etc.
+
+
+decoder=decoder_name
+Print detailed information about the decoder named decoder_name . Use the
+-decoders option to get a list of all decoders.
+
+
+encoder=encoder_name
+Print detailed information about the encoder named encoder_name . Use the
+-encoders option to get a list of all encoders.
+
+
+demuxer=demuxer_name
+Print detailed information about the demuxer named demuxer_name . Use the
+-formats option to get a list of all demuxers and muxers.
+
+
+muxer=muxer_name
+Print detailed information about the muxer named muxer_name . Use the
+-formats option to get a list of all muxers and demuxers.
+
+
+filter=filter_name
+Print detailed information about the filter name filter_name . Use the
+-filters option to get a list of all filters.
+
+
+
+
+-version
+Show version.
+
+
+-formats
+Show available formats (including devices).
+
+
+-devices
+Show available devices.
+
+
+-codecs
+Show all codecs known to libavcodec.
+
+Note that the term ’codec’ is used throughout this documentation as a shortcut
+for what is more correctly called a media bitstream format.
+
+
+-decoders
+Show available decoders.
+
+
+-encoders
+Show all available encoders.
+
+
+-bsfs
+Show available bitstream filters.
+
+
+-protocols
+Show available protocols.
+
+
+-filters
+Show available libavfilter filters.
+
+
+-pix_fmts
+Show available pixel formats.
+
+
+-sample_fmts
+Show available sample formats.
+
+
+-layouts
+Show channel names and standard channel layouts.
+
+
+-colors
+Show recognized color names.
+
+
+-sources device [,opt1 =val1 [,opt2 =val2 ]...]
+Show autodetected sources of the intput device.
+Some devices may provide system-dependent source names that cannot be autodetected.
+The returned list cannot be assumed to be always complete.
+
+
ffmpeg -sources pulse,server=192.168.0.4
+
+
+
+-sinks device [,opt1 =val1 [,opt2 =val2 ]...]
+Show autodetected sinks of the output device.
+Some devices may provide system-dependent sink names that cannot be autodetected.
+The returned list cannot be assumed to be always complete.
+
+
ffmpeg -sinks pulse,server=192.168.0.4
+
+
+
+-loglevel [repeat+]loglevel | -v [repeat+]loglevel
+Set the logging level used by the library.
+Adding "repeat+" indicates that repeated log output should not be compressed
+to the first line and the "Last message repeated n times" line will be
+omitted. "repeat" can also be used alone.
+If "repeat" is used alone, and with no prior loglevel set, the default
+loglevel will be used. If multiple loglevel parameters are given, using
+’repeat’ will not change the loglevel.
+loglevel is a string or a number containing one of the following values:
+
+‘quiet, -8 ’
+Show nothing at all; be silent.
+
+‘panic, 0 ’
+Only show fatal errors which could lead the process to crash, such as
+and assert failure. This is not currently used for anything.
+
+‘fatal, 8 ’
+Only show fatal errors. These are errors after which the process absolutely
+cannot continue after.
+
+‘error, 16 ’
+Show all errors, including ones which can be recovered from.
+
+‘warning, 24 ’
+Show all warnings and errors. Any message related to possibly
+incorrect or unexpected events will be shown.
+
+‘info, 32 ’
+Show informative messages during processing. This is in addition to
+warnings and errors. This is the default value.
+
+‘verbose, 40 ’
+Same as info
, except more verbose.
+
+‘debug, 48 ’
+Show everything, including debugging information.
+
+
+
+By default the program logs to stderr, if coloring is supported by the
+terminal, colors are used to mark errors and warnings. Log coloring
+can be disabled setting the environment variable
+AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
+the environment variable AV_LOG_FORCE_COLOR
.
+The use of the environment variable NO_COLOR
is deprecated and
+will be dropped in a following FFmpeg version.
+
+
+-report
+Dump full command line and console output to a file named
+program -YYYYMMDD -HHMMSS .log
in the current
+directory.
+This file can be useful for bug reports.
+It also implies -loglevel verbose
.
+
+Setting the environment variable FFREPORT
to any value has the
+same effect. If the value is a ’:’-separated key=value sequence, these
+options will affect the report; option values must be escaped if they
+contain special characters or the options delimiter ’:’ (see the
+“Quoting and escaping” section in the ffmpeg-utils manual).
+
+The following options are recognized:
+
+file
+set the file name to use for the report; %p
is expanded to the name
+of the program, %t
is expanded to a timestamp, %%
is expanded
+to a plain %
+
+level
+set the log verbosity level using a numerical value (see -loglevel
).
+
+
+
+For example, to output a report to a file named ffreport.log
+using a log level of 32
(alias for log level info
):
+
+
+
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
+
+
+Errors in parsing the environment variable are not fatal, and will not
+appear in the report.
+
+
+-hide_banner
+Suppress printing banner.
+
+All FFmpeg tools will normally show a copyright notice, build options
+and library versions. This option can be used to suppress printing
+this information.
+
+
+-cpuflags flags (global )
+Allows setting and clearing cpu flags. This option is intended
+for testing. Do not use it unless you know what you’re doing.
+
+
ffmpeg -cpuflags -sse+mmx ...
+ffmpeg -cpuflags mmx ...
+ffmpeg -cpuflags 0 ...
+
+Possible flags for this option are:
+
+‘x86 ’
+
+‘mmx ’
+‘mmxext ’
+‘sse ’
+‘sse2 ’
+‘sse2slow ’
+‘sse3 ’
+‘sse3slow ’
+‘ssse3 ’
+‘atom ’
+‘sse4.1 ’
+‘sse4.2 ’
+‘avx ’
+‘xop ’
+‘fma4 ’
+‘3dnow ’
+‘3dnowext ’
+‘cmov ’
+
+
+‘ARM ’
+
+‘armv5te ’
+‘armv6 ’
+‘armv6t2 ’
+‘vfp ’
+‘vfpv3 ’
+‘neon ’
+
+
+‘PowerPC ’
+
+‘altivec ’
+
+
+‘Specific Processors ’
+
+‘pentium2 ’
+‘pentium3 ’
+‘pentium4 ’
+‘k6 ’
+‘k62 ’
+‘athlon ’
+‘athlonxp ’
+‘k8 ’
+
+
+
+
+
+-opencl_bench
+Benchmark all available OpenCL devices and show the results. This option
+is only available when FFmpeg has been compiled with --enable-opencl
.
+
+
+-opencl_options options (global )
+Set OpenCL environment options. This option is only available when
+FFmpeg has been compiled with --enable-opencl
.
+
+options must be a list of key =value option pairs
+separated by ’:’. See the “OpenCL Options” section in the
+ffmpeg-utils manual for the list of supported options.
+
+
+
+
+
3.3 AVOptions# TOC
+
+
These options are provided directly by the libavformat, libavdevice and
+libavcodec libraries. To see the list of available AVOptions, use the
+-help option. They are separated into two categories:
+
+generic
+These options can be set for any container, codec or device. Generic options
+are listed under AVFormatContext options for containers/devices and under
+AVCodecContext options for codecs.
+
+private
+These options are specific to the given container, device or codec. Private
+options are listed under their corresponding containers/devices/codecs.
+
+
+
+
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
+an MP3 file, use the id3v2_version private option of the MP3
+muxer:
+
+
ffmpeg -i input.flac -id3v2_version 3 out.mp3
+
+
+
All codec AVOptions are per-stream, and thus a stream specifier
+should be attached to them.
+
+
Note: the -nooption syntax cannot be used for boolean
+AVOptions, use -option 0 /-option 1 .
+
+
Note: the old undocumented way of specifying per-stream AVOptions by
+prepending v/a/s to the options name is now obsolete and will be
+removed soon.
+
+
+
3.4 Main options# TOC
+
+
+-x width
+Force displayed width.
+
+-y height
+Force displayed height.
+
+-s size
+Set frame size (WxH or abbreviation), needed for videos which do
+not contain a header with the frame size like raw YUV. This option
+has been deprecated in favor of private options, try -video_size.
+
+-fs
+Start in fullscreen mode.
+
+-an
+Disable audio.
+
+-vn
+Disable video.
+
+-sn
+Disable subtitles.
+
+-ss pos
+Seek to a given position in seconds.
+
+-t duration
+play <duration> seconds of audio/video
+
+-bytes
+Seek by bytes.
+
+-nodisp
+Disable graphical display.
+
+-f fmt
+Force format.
+
+-window_title title
+Set window title (default is the input filename).
+
+-loop number
+Loops movie playback <number> times. 0 means forever.
+
+-showmode mode
+Set the show mode to use.
+Available values for mode are:
+
+‘0, video ’
+show video
+
+‘1, waves ’
+show audio waves
+
+‘2, rdft ’
+show audio frequency band using RDFT ((Inverse) Real Discrete Fourier Transform)
+
+
+
+Default value is "video", if video is not present or cannot be played
+"rdft" is automatically selected.
+
+You can interactively cycle through the available show modes by
+pressing the key w .
+
+
+-vf filtergraph
+Create the filtergraph specified by filtergraph and use it to
+filter the video stream.
+
+filtergraph is a description of the filtergraph to apply to
+the stream, and must have a single video input and a single video
+output. In the filtergraph, the input is associated to the label
+in
, and the output to the label out
. See the
+ffmpeg-filters manual for more information about the filtergraph
+syntax.
+
+You can specify this parameter multiple times and cycle through the specified
+filtergraphs along with the show modes by pressing the key w .
+
+
+-af filtergraph
+filtergraph is a description of the filtergraph to apply to
+the input audio.
+Use the option "-filters" to show all the available filters (including
+sources and sinks).
+
+
+-i input_file
+Read input_file .
+
+
+
+
+
3.5 Advanced options# TOC
+
+-pix_fmt format
+Set pixel format.
+This option has been deprecated in favor of private options, try -pixel_format.
+
+
+-stats
+Print several playback statistics, in particular show the stream
+duration, the codec parameters, the current position in the stream and
+the audio/video synchronisation drift. It is on by default, to
+explicitly disable it you need to specify -nostats
.
+
+
+-fast
+Non-spec-compliant optimizations.
+
+-genpts
+Generate pts.
+
+-sync type
+Set the master clock to audio (type=audio
), video
+(type=video
) or external (type=ext
). Default is audio. The
+master clock is used to control audio-video synchronization. Most media
+players use audio as master clock, but in some cases (streaming or high
+quality broadcast) it is necessary to change that. This option is mainly
+used for debugging purposes.
+
+-ast audio_stream_specifier
+Select the desired audio stream using the given stream specifier. The stream
+specifiers are described in the Stream specifiers chapter. If this option
+is not specified, the "best" audio stream is selected in the program of the
+already selected video stream.
+
+-vst video_stream_specifier
+Select the desired video stream using the given stream specifier. The stream
+specifiers are described in the Stream specifiers chapter. If this option
+is not specified, the "best" video stream is selected.
+
+-sst subtitle_stream_specifier
+Select the desired subtitle stream using the given stream specifier. The stream
+specifiers are described in the Stream specifiers chapter. If this option
+is not specified, the "best" subtitle stream is selected in the program of the
+already selected video or audio stream.
+
+-autoexit
+Exit when video is done playing.
+
+-exitonkeydown
+Exit if any key is pressed.
+
+-exitonmousedown
+Exit if any mouse button is pressed.
+
+
+-codec:media_specifier codec_name
+Force a specific decoder implementation for the stream identified by
+media_specifier , which can assume the values a
(audio),
+v
(video), and s
subtitle.
+
+
+-acodec codec_name
+Force a specific audio decoder.
+
+
+-vcodec codec_name
+Force a specific video decoder.
+
+
+-scodec codec_name
+Force a specific subtitle decoder.
+
+
+-autorotate
+Automatically rotate the video according to presentation metadata. Enabled by
+default, use -noautorotate to disable it.
+
+
+-framedrop
+Drop video frames if video is out of sync. Enabled by default if the master
+clock is not set to video. Use this option to enable frame dropping for all
+master clock sources, use -noframedrop to disable it.
+
+
+-infbuf
+Do not limit the input buffer size, read as much data as possible from the
+input as soon as possible. Enabled by default for realtime streams, where data
+may be dropped if not read in time. Use this option to enable infinite buffers
+for all inputs, use -noinfbuf to disable it.
+
+
+
+
+
+
3.6 While playing# TOC
+
+
+q, ESC
+Quit.
+
+
+f
+Toggle full screen.
+
+
+p, SPC
+Pause.
+
+
+a
+Cycle audio channel in the current program.
+
+
+v
+Cycle video channel.
+
+
+t
+Cycle subtitle channel in the current program.
+
+
+c
+Cycle program.
+
+
+w
+Cycle video filters or show modes.
+
+
+s
+Step to the next frame.
+
+Pause if the stream is not already paused, step to the next video
+frame, and pause.
+
+
+left/right
+Seek backward/forward 10 seconds.
+
+
+down/up
+Seek backward/forward 1 minute.
+
+
+page down/page up
+Seek to the previous/next chapter.
+or if there are no chapters
+Seek backward/forward 10 minutes.
+
+
+mouse click
+Seek to percentage in file corresponding to fraction of width.
+
+
+
+
+
+
+
+
4 See Also# TOC
+
+
ffmpeg-all ,
+ffmpeg , ffprobe , ffserver ,
+ffmpeg-utils ,
+ffmpeg-scaler ,
+ffmpeg-resampler ,
+ffmpeg-codecs ,
+ffmpeg-bitstream-filters ,
+ffmpeg-formats ,
+ffmpeg-devices ,
+ffmpeg-protocols ,
+ffmpeg-filters
+
+
+
+
5 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/ffprobe-all.html b/Externals/ffmpeg/dev/doc/ffprobe-all.html
new file mode 100644
index 0000000000..a52af3304f
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/ffprobe-all.html
@@ -0,0 +1,21676 @@
+
+
+
+
+
+
+ ffprobe Documentation
+
+
+
+
+
+
+
+
+ ffprobe Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Synopsis# TOC
+
+
ffprobe [options ] [input_file ]
+
+
+
2 Description# TOC
+
+
ffprobe gathers information from multimedia streams and prints it in
+human- and machine-readable fashion.
+
+
For example it can be used to check the format of the container used
+by a multimedia stream and the format and type of each media stream
+contained in it.
+
+
If a filename is specified in input, ffprobe will try to open and
+probe the file content. If the file cannot be opened or recognized as
+a multimedia file, a positive exit code is returned.
+
+
ffprobe may be employed both as a standalone application or in
+combination with a textual filter, which may perform more
+sophisticated processing, e.g. statistical processing or plotting.
+
+
Options are used to list some of the formats supported by ffprobe or
+for specifying which information to display, and for setting how
+ffprobe will show it.
+
+
ffprobe output is designed to be easily parsable by a textual filter,
+and consists of one or more sections of a form defined by the selected
+writer, which is specified by the print_format option.
+
+
Sections may contain other nested sections, and are identified by a
+name (which may be shared by other sections), and an unique
+name. See the output of sections .
+
+
Metadata tags stored in the container or in the streams are recognized
+and printed in the corresponding "FORMAT", "STREAM" or "PROGRAM_STREAM"
+section.
+
+
+
+
3 Options# TOC
+
+
All the numerical options, if not specified otherwise, accept a string
+representing a number as input, which may be followed by one of the SI
+unit prefixes, for example: ’K’, ’M’, or ’G’.
+
+
If ’i’ is appended to the SI unit prefix, the complete prefix will be
+interpreted as a unit prefix for binary multiples, which are based on
+powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
+prefix multiplies the value by 8. This allows using, for example:
+’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
+
+
Options which do not take arguments are boolean options, and set the
+corresponding value to true. They can be set to false by prefixing
+the option name with "no". For example using "-nofoo"
+will set the boolean option with name "foo" to false.
+
+
+
3.1 Stream specifiers# TOC
+
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
+are used to precisely specify which stream(s) a given option belongs to.
+
+
A stream specifier is a string generally appended to the option name and
+separated from it by a colon. E.g. -codec:a:1 ac3
contains the
+a:1
stream specifier, which matches the second audio stream. Therefore, it
+would select the ac3 codec for the second audio stream.
+
+
A stream specifier can match several streams, so that the option is applied to all
+of them. E.g. the stream specifier in -b:a 128k
matches all audio
+streams.
+
+
An empty stream specifier matches all streams. For example, -codec copy
+or -codec: copy
would copy all the streams without reencoding.
+
+
Possible forms of stream specifiers are:
+
+stream_index
+Matches the stream with this index. E.g. -threads:1 4
would set the
+thread count for the second stream to 4.
+
+stream_type [:stream_index ]
+stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
+’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
+stream number stream_index of this type. Otherwise, it matches all
+streams of this type.
+
+p:program_id [:stream_index ]
+If stream_index is given, then it matches the stream with number stream_index
+in the program with the id program_id . Otherwise, it matches all streams in the
+program.
+
+#stream_id or i:stream_id
+Match the stream by stream id (e.g. PID in MPEG-TS container).
+
+m:key [:value ]
+Matches streams with the metadata tag key having the specified value. If
+value is not given, matches streams that contain the given tag with any
+value.
+
+Note that in ffmpeg
, matching by metadata will only work properly for
+input files.
+
+
+
+
+
3.2 Generic options# TOC
+
+
These options are shared amongst the ff* tools.
+
+
+-L
+Show license.
+
+
+-h, -?, -help, --help [arg ]
+Show help. An optional parameter may be specified to print help about a specific
+item. If no argument is specified, only basic (non advanced) tool
+options are shown.
+
+Possible values of arg are:
+
+long
+Print advanced tool options in addition to the basic tool options.
+
+
+full
+Print complete list of options, including shared and private options
+for encoders, decoders, demuxers, muxers, filters, etc.
+
+
+decoder=decoder_name
+Print detailed information about the decoder named decoder_name . Use the
+-decoders option to get a list of all decoders.
+
+
+encoder=encoder_name
+Print detailed information about the encoder named encoder_name . Use the
+-encoders option to get a list of all encoders.
+
+
+demuxer=demuxer_name
+Print detailed information about the demuxer named demuxer_name . Use the
+-formats option to get a list of all demuxers and muxers.
+
+
+muxer=muxer_name
+Print detailed information about the muxer named muxer_name . Use the
+-formats option to get a list of all muxers and demuxers.
+
+
+filter=filter_name
+Print detailed information about the filter name filter_name . Use the
+-filters option to get a list of all filters.
+
+
+
+
+-version
+Show version.
+
+
+-formats
+Show available formats (including devices).
+
+
+-devices
+Show available devices.
+
+
+-codecs
+Show all codecs known to libavcodec.
+
+Note that the term ’codec’ is used throughout this documentation as a shortcut
+for what is more correctly called a media bitstream format.
+
+
+-decoders
+Show available decoders.
+
+
+-encoders
+Show all available encoders.
+
+
+-bsfs
+Show available bitstream filters.
+
+
+-protocols
+Show available protocols.
+
+
+-filters
+Show available libavfilter filters.
+
+
+-pix_fmts
+Show available pixel formats.
+
+
+-sample_fmts
+Show available sample formats.
+
+
+-layouts
+Show channel names and standard channel layouts.
+
+
+-colors
+Show recognized color names.
+
+
+-sources device [,opt1 =val1 [,opt2 =val2 ]...]
+Show autodetected sources of the intput device.
+Some devices may provide system-dependent source names that cannot be autodetected.
+The returned list cannot be assumed to be always complete.
+
+
ffmpeg -sources pulse,server=192.168.0.4
+
+
+
+-sinks device [,opt1 =val1 [,opt2 =val2 ]...]
+Show autodetected sinks of the output device.
+Some devices may provide system-dependent sink names that cannot be autodetected.
+The returned list cannot be assumed to be always complete.
+
+
ffmpeg -sinks pulse,server=192.168.0.4
+
+
+
+-loglevel [repeat+]loglevel | -v [repeat+]loglevel
+Set the logging level used by the library.
+Adding "repeat+" indicates that repeated log output should not be compressed
+to the first line and the "Last message repeated n times" line will be
+omitted. "repeat" can also be used alone.
+If "repeat" is used alone, and with no prior loglevel set, the default
+loglevel will be used. If multiple loglevel parameters are given, using
+’repeat’ will not change the loglevel.
+loglevel is a string or a number containing one of the following values:
+
+‘quiet, -8 ’
+Show nothing at all; be silent.
+
+‘panic, 0 ’
+Only show fatal errors which could lead the process to crash, such as
+and assert failure. This is not currently used for anything.
+
+‘fatal, 8 ’
+Only show fatal errors. These are errors after which the process absolutely
+cannot continue after.
+
+‘error, 16 ’
+Show all errors, including ones which can be recovered from.
+
+‘warning, 24 ’
+Show all warnings and errors. Any message related to possibly
+incorrect or unexpected events will be shown.
+
+‘info, 32 ’
+Show informative messages during processing. This is in addition to
+warnings and errors. This is the default value.
+
+‘verbose, 40 ’
+Same as info
, except more verbose.
+
+‘debug, 48 ’
+Show everything, including debugging information.
+
+
+
+By default the program logs to stderr, if coloring is supported by the
+terminal, colors are used to mark errors and warnings. Log coloring
+can be disabled setting the environment variable
+AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
+the environment variable AV_LOG_FORCE_COLOR
.
+The use of the environment variable NO_COLOR
is deprecated and
+will be dropped in a following FFmpeg version.
+
+
+-report
+Dump full command line and console output to a file named
+program -YYYYMMDD -HHMMSS .log
in the current
+directory.
+This file can be useful for bug reports.
+It also implies -loglevel verbose
.
+
+Setting the environment variable FFREPORT
to any value has the
+same effect. If the value is a ’:’-separated key=value sequence, these
+options will affect the report; option values must be escaped if they
+contain special characters or the options delimiter ’:’ (see the
+“Quoting and escaping” section in the ffmpeg-utils manual).
+
+The following options are recognized:
+
+file
+set the file name to use for the report; %p
is expanded to the name
+of the program, %t
is expanded to a timestamp, %%
is expanded
+to a plain %
+
+level
+set the log verbosity level using a numerical value (see -loglevel
).
+
+
+
+For example, to output a report to a file named ffreport.log
+using a log level of 32
(alias for log level info
):
+
+
+
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
+
+
+Errors in parsing the environment variable are not fatal, and will not
+appear in the report.
+
+
+-hide_banner
+Suppress printing banner.
+
+All FFmpeg tools will normally show a copyright notice, build options
+and library versions. This option can be used to suppress printing
+this information.
+
+
+-cpuflags flags (global )
+Allows setting and clearing cpu flags. This option is intended
+for testing. Do not use it unless you know what you’re doing.
+
+
ffmpeg -cpuflags -sse+mmx ...
+ffmpeg -cpuflags mmx ...
+ffmpeg -cpuflags 0 ...
+
+Possible flags for this option are:
+
+‘x86 ’
+
+‘mmx ’
+‘mmxext ’
+‘sse ’
+‘sse2 ’
+‘sse2slow ’
+‘sse3 ’
+‘sse3slow ’
+‘ssse3 ’
+‘atom ’
+‘sse4.1 ’
+‘sse4.2 ’
+‘avx ’
+‘xop ’
+‘fma4 ’
+‘3dnow ’
+‘3dnowext ’
+‘cmov ’
+
+
+‘ARM ’
+
+‘armv5te ’
+‘armv6 ’
+‘armv6t2 ’
+‘vfp ’
+‘vfpv3 ’
+‘neon ’
+
+
+‘PowerPC ’
+
+‘altivec ’
+
+
+‘Specific Processors ’
+
+‘pentium2 ’
+‘pentium3 ’
+‘pentium4 ’
+‘k6 ’
+‘k62 ’
+‘athlon ’
+‘athlonxp ’
+‘k8 ’
+
+
+
+
+
+-opencl_bench
+Benchmark all available OpenCL devices and show the results. This option
+is only available when FFmpeg has been compiled with --enable-opencl
.
+
+
+-opencl_options options (global )
+Set OpenCL environment options. This option is only available when
+FFmpeg has been compiled with --enable-opencl
.
+
+options must be a list of key =value option pairs
+separated by ’:’. See the “OpenCL Options” section in the
+ffmpeg-utils manual for the list of supported options.
+
+
+
+
+
3.3 AVOptions# TOC
+
+
These options are provided directly by the libavformat, libavdevice and
+libavcodec libraries. To see the list of available AVOptions, use the
+-help option. They are separated into two categories:
+
+generic
+These options can be set for any container, codec or device. Generic options
+are listed under AVFormatContext options for containers/devices and under
+AVCodecContext options for codecs.
+
+private
+These options are specific to the given container, device or codec. Private
+options are listed under their corresponding containers/devices/codecs.
+
+
+
+
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
+an MP3 file, use the id3v2_version private option of the MP3
+muxer:
+
+
ffmpeg -i input.flac -id3v2_version 3 out.mp3
+
+
+
All codec AVOptions are per-stream, and thus a stream specifier
+should be attached to them.
+
+
Note: the -nooption syntax cannot be used for boolean
+AVOptions, use -option 0 /-option 1 .
+
+
Note: the old undocumented way of specifying per-stream AVOptions by
+prepending v/a/s to the options name is now obsolete and will be
+removed soon.
+
+
+
3.4 Main options# TOC
+
+
+-f format
+Force format to use.
+
+
+-unit
+Show the unit of the displayed values.
+
+
+-prefix
+Use SI prefixes for the displayed values.
+Unless the "-byte_binary_prefix" option is used all the prefixes
+are decimal.
+
+
+-byte_binary_prefix
+Force the use of binary prefixes for byte values.
+
+
+-sexagesimal
+Use sexagesimal format HH:MM:SS.MICROSECONDS for time values.
+
+
+-pretty
+Prettify the format of the displayed values, it corresponds to the
+options "-unit -prefix -byte_binary_prefix -sexagesimal".
+
+
+-of, -print_format writer_name [=writer_options ]
+Set the output printing format.
+
+writer_name specifies the name of the writer, and
+writer_options specifies the options to be passed to the writer.
+
+For example for printing the output in JSON format, specify:
+
+
+For more details on the available output printing formats, see the
+Writers section below.
+
+
+-sections
+Print sections structure and section information, and exit. The output
+is not meant to be parsed by a machine.
+
+
+-select_streams stream_specifier
+Select only the streams specified by stream_specifier . This
+option affects only the options related to streams
+(e.g. show_streams
, show_packets
, etc.).
+
+For example to show only audio streams, you can use the command:
+
+
ffprobe -show_streams -select_streams a INPUT
+
+
+To show only video packets belonging to the video stream with index 1:
+
+
ffprobe -show_packets -select_streams v:1 INPUT
+
+
+
+-show_data
+Show payload data, as a hexadecimal and ASCII dump. Coupled with
+-show_packets , it will dump the packets’ data. Coupled with
+-show_streams , it will dump the codec extradata.
+
+The dump is printed as the "data" field. It may contain newlines.
+
+
+-show_data_hash algorithm
+Show a hash of payload data, for packets with -show_packets and for
+codec extradata with -show_streams .
+
+
+-show_error
+Show information about the error found when trying to probe the input.
+
+The error information is printed within a section with name "ERROR".
+
+
+-show_format
+Show information about the container format of the input multimedia
+stream.
+
+All the container format information is printed within a section with
+name "FORMAT".
+
+
+-show_format_entry name
+Like -show_format , but only prints the specified entry of the
+container format information, rather than all. This option may be given more
+than once, then all specified entries will be shown.
+
+This option is deprecated, use show_entries
instead.
+
+
+-show_entries section_entries
+Set list of entries to show.
+
+Entries are specified according to the following
+syntax. section_entries contains a list of section entries
+separated by :
. Each section entry is composed by a section
+name (or unique name), optionally followed by a list of entries local
+to that section, separated by ,
.
+
+If section name is specified but is followed by no =
, all
+entries are printed to output, together with all the contained
+sections. Otherwise only the entries specified in the local section
+entries list are printed. In particular, if =
is specified but
+the list of local entries is empty, then no entries will be shown for
+that section.
+
+Note that the order of specification of the local section entries is
+not honored in the output, and the usual display order will be
+retained.
+
+The formal syntax is given by:
+
+
LOCAL_SECTION_ENTRIES ::= SECTION_ENTRY_NAME [,LOCAL_SECTION_ENTRIES ]
+SECTION_ENTRY ::= SECTION_NAME [=[LOCAL_SECTION_ENTRIES ]]
+SECTION_ENTRIES ::= SECTION_ENTRY [:SECTION_ENTRIES ]
+
+
+For example, to show only the index and type of each stream, and the PTS
+time, duration time, and stream index of the packets, you can specify
+the argument:
+
+
packet=pts_time,duration_time,stream_index : stream=index,codec_type
+
+
+To show all the entries in the section "format", but only the codec
+type in the section "stream", specify the argument:
+
+
format : stream=codec_type
+
+
+To show all the tags in the stream and format sections:
+
+
stream_tags : format_tags
+
+
+To show only the title
tag (if available) in the stream
+sections:
+
+
+
+-show_packets
+Show information about each packet contained in the input multimedia
+stream.
+
+The information for each single packet is printed within a dedicated
+section with name "PACKET".
+
+
+-show_frames
+Show information about each frame and subtitle contained in the input
+multimedia stream.
+
+The information for each single frame is printed within a dedicated
+section with name "FRAME" or "SUBTITLE".
+
+
+-show_streams
+Show information about each media stream contained in the input
+multimedia stream.
+
+Each media stream information is printed within a dedicated section
+with name "STREAM".
+
+
+-show_programs
+Show information about programs and their streams contained in the input
+multimedia stream.
+
+Each media stream information is printed within a dedicated section
+with name "PROGRAM_STREAM".
+
+
+-show_chapters
+Show information about chapters stored in the format.
+
+Each chapter is printed within a dedicated section with name "CHAPTER".
+
+
+-count_frames
+Count the number of frames per stream and report it in the
+corresponding stream section.
+
+
+-count_packets
+Count the number of packets per stream and report it in the
+corresponding stream section.
+
+
+-read_intervals read_intervals
+
+Read only the specified intervals. read_intervals must be a
+sequence of interval specifications separated by ",".
+ffprobe
will seek to the interval starting point, and will
+continue reading from that.
+
+Each interval is specified by two optional parts, separated by "%".
+
+The first part specifies the interval start position. It is
+interpreted as an abolute position, or as a relative offset from the
+current position if it is preceded by the "+" character. If this first
+part is not specified, no seeking will be performed when reading this
+interval.
+
+The second part specifies the interval end position. It is interpreted
+as an absolute position, or as a relative offset from the current
+position if it is preceded by the "+" character. If the offset
+specification starts with "#", it is interpreted as the number of
+packets to read (not including the flushing packets) from the interval
+start. If no second part is specified, the program will read until the
+end of the input.
+
+Note that seeking is not accurate, thus the actual interval start
+point may be different from the specified position. Also, when an
+interval duration is specified, the absolute end time will be computed
+by adding the duration to the interval start point found by seeking
+the file, rather than to the specified start value.
+
+The formal syntax is given by:
+
+
INTERVAL ::= [START |+START_OFFSET ][%[END |+END_OFFSET ]]
+INTERVALS ::= INTERVAL [,INTERVALS ]
+
+
+A few examples follow.
+
+ Seek to time 10, read packets until 20 seconds after the found seek
+point, then seek to position 01:30
(1 minute and thirty
+seconds) and read packets until position 01:45
.
+
+
+ Read only 42 packets after seeking to position 01:23
:
+
+
+ Read only the first 20 seconds from the start:
+
+
+ Read from the start until position 02:30
:
+
+
+
+
+-show_private_data, -private
+Show private data, that is data depending on the format of the
+particular shown element.
+This option is enabled by default, but you may need to disable it
+for specific uses, for example when creating XSD-compliant XML output.
+
+
+-show_program_version
+Show information related to program version.
+
+Version information is printed within a section with name
+"PROGRAM_VERSION".
+
+
+-show_library_versions
+Show information related to library versions.
+
+Version information for each library is printed within a section with
+name "LIBRARY_VERSION".
+
+
+-show_versions
+Show information related to program and library versions. This is the
+equivalent of setting both -show_program_version and
+-show_library_versions options.
+
+
+-show_pixel_formats
+Show information about all pixel formats supported by FFmpeg.
+
+Pixel format information for each format is printed within a section
+with name "PIXEL_FORMAT".
+
+
+-bitexact
+Force bitexact output, useful to produce output which is not dependent
+on the specific build.
+
+
+-i input_file
+Read input_file .
+
+
+
+
+
+
4 Writers# TOC
+
+
A writer defines the output format adopted by ffprobe
, and will be
+used for printing all the parts of the output.
+
+
A writer may accept one or more arguments, which specify the options
+to adopt. The options are specified as a list of key =value
+pairs, separated by ":".
+
+
All writers support the following options:
+
+
+string_validation, sv
+Set string validation mode.
+
+The following values are accepted.
+
+‘fail ’
+The writer will fail immediately in case an invalid string (UTF-8)
+sequence or code point is found in the input. This is especially
+useful to validate input metadata.
+
+
+‘ignore ’
+Any validation error will be ignored. This will result in possibly
+broken output, especially with the json or xml writer.
+
+
+‘replace ’
+The writer will substitute invalid UTF-8 sequences or code points with
+the string specified with the string_validation_replacement .
+
+
+
+Default value is ‘replace ’.
+
+
+string_validation_replacement, svr
+Set replacement string to use in case string_validation is
+set to ‘replace ’.
+
+In case the option is not specified, the writer will assume the empty
+string, that is it will remove the invalid sequences from the input
+strings.
+
+
+
+
A description of the currently available writers follows.
+
+
+
4.1 default# TOC
+
Default format.
+
+
Print each section in the form:
+
+
[SECTION]
+key1=val1
+...
+keyN=valN
+[/SECTION]
+
+
+
Metadata tags are printed as a line in the corresponding FORMAT, STREAM or
+PROGRAM_STREAM section, and are prefixed by the string "TAG:".
+
+
A description of the accepted options follows.
+
+
+nokey, nk
+If set to 1 specify not to print the key of each field. Default value
+is 0.
+
+
+noprint_wrappers, nw
+If set to 1 specify not to print the section header and footer.
+Default value is 0.
+
+
+
+
+
4.2 compact, csv# TOC
+
Compact and CSV format.
+
+
The csv
writer is equivalent to compact
, but supports
+different defaults.
+
+
Each section is printed on a single line.
+If no option is specifid, the output has the form:
+
+
section|key1=val1| ... |keyN=valN
+
+
+
Metadata tags are printed in the corresponding "format" or "stream"
+section. A metadata tag key, if printed, is prefixed by the string
+"tag:".
+
+
The description of the accepted options follows.
+
+
+item_sep, s
+Specify the character to use for separating fields in the output line.
+It must be a single printable character, it is "|" by default ("," for
+the csv
writer).
+
+
+nokey, nk
+If set to 1 specify not to print the key of each field. Its default
+value is 0 (1 for the csv
writer).
+
+
+escape, e
+Set the escape mode to use, default to "c" ("csv" for the csv
+writer).
+
+It can assume one of the following values:
+
+c
+Perform C-like escaping. Strings containing a newline (’\n’), carriage
+return (’\r’), a tab (’\t’), a form feed (’\f’), the escaping
+character (’\’) or the item separator character SEP are escaped using C-like fashioned
+escaping, so that a newline is converted to the sequence "\n", a
+carriage return to "\r", ’\’ to "\\" and the separator SEP is
+converted to "\SEP ".
+
+
+csv
+Perform CSV-like escaping, as described in RFC4180. Strings
+containing a newline (’\n’), a carriage return (’\r’), a double quote
+(’"’), or SEP are enclosed in double-quotes.
+
+
+none
+Perform no escaping.
+
+
+
+
+print_section, p
+Print the section name at the begin of each line if the value is
+1
, disable it with value set to 0
. Default value is
+1
.
+
+
+
+
+
+
4.3 flat# TOC
+
Flat format.
+
+
A free-form output where each line contains an explicit key=value, such as
+"streams.stream.3.tags.foo=bar". The output is shell escaped, so it can be
+directly embedded in sh scripts as long as the separator character is an
+alphanumeric character or an underscore (see sep_char option).
+
+
The description of the accepted options follows.
+
+
+sep_char, s
+Separator character used to separate the chapter, the section name, IDs and
+potential tags in the printed field key.
+
+Default value is ’.’.
+
+
+hierarchical, h
+Specify if the section name specification should be hierarchical. If
+set to 1, and if there is more than one section in the current
+chapter, the section name will be prefixed by the name of the
+chapter. A value of 0 will disable this behavior.
+
+Default value is 1.
+
+
+
+
+
+
INI format output.
+
+
Print output in an INI based format.
+
+
The following conventions are adopted:
+
+
+ all key and values are UTF-8
+ ’.’ is the subgroup separator
+ newline, ’\t’, ’\f’, ’\b’ and the following characters are escaped
+ ’\’ is the escape character
+ ’#’ is the comment indicator
+ ’=’ is the key/value separator
+ ’:’ is not used but usually parsed as key/value separator
+
+
+
This writer accepts options as a list of key =value pairs,
+separated by ":".
+
+
The description of the accepted options follows.
+
+
+hierarchical, h
+Specify if the section name specification should be hierarchical. If
+set to 1, and if there is more than one section in the current
+chapter, the section name will be prefixed by the name of the
+chapter. A value of 0 will disable this behavior.
+
+Default value is 1.
+
+
+
+
+
4.5 json# TOC
+
JSON based format.
+
+
Each section is printed using JSON notation.
+
+
The description of the accepted options follows.
+
+
+compact, c
+If set to 1 enable compact output, that is each section will be
+printed on a single line. Default value is 0.
+
+
+
+
For more information about JSON, see http://www.json.org/ .
+
+
+
+
XML based format.
+
+
The XML output is described in the XML schema description file
+ffprobe.xsd installed in the FFmpeg datadir.
+
+
An updated version of the schema can be retrieved at the url
+http://www.ffmpeg.org/schema/ffprobe.xsd , which redirects to the
+latest schema committed into the FFmpeg development source code tree.
+
+
Note that the output issued will be compliant to the
+ffprobe.xsd schema only when no special global output options
+(unit , prefix , byte_binary_prefix ,
+sexagesimal etc.) are specified.
+
+
The description of the accepted options follows.
+
+
+fully_qualified, q
+If set to 1 specify if the output should be fully qualified. Default
+value is 0.
+This is required for generating an XML file which can be validated
+through an XSD file.
+
+
+xsd_compliant, x
+If set to 1 perform more checks for ensuring that the output is XSD
+compliant. Default value is 0.
+This option automatically sets fully_qualified to 1.
+
+
+
+
For more information about the XML format, see
+http://www.w3.org/XML/ .
+
+
+
5 Timecode# TOC
+
+
ffprobe
supports Timecode extraction:
+
+
+ MPEG1/2 timecode is extracted from the GOP, and is available in the video
+stream details (-show_streams , see timecode ).
+
+ MOV timecode is extracted from tmcd track, so is available in the tmcd
+stream metadata (-show_streams , see TAG:timecode ).
+
+ DV, GXF and AVI timecodes are available in format metadata
+(-show_format , see TAG:timecode ).
+
+
+
+
+
6 Syntax# TOC
+
+
This section documents the syntax and formats employed by the FFmpeg
+libraries and tools.
+
+
+
6.1 Quoting and escaping# TOC
+
+
FFmpeg adopts the following quoting and escaping mechanism, unless
+explicitly specified. The following rules are applied:
+
+
+ '
and \
are special characters (respectively used for
+quoting and escaping). In addition to them, there might be other
+special characters depending on the specific syntax where the escaping
+and quoting are employed.
+
+ A special character is escaped by prefixing it with a ’\’.
+
+ All characters enclosed between ” are included literally in the
+parsed string. The quote character '
itself cannot be quoted,
+so you may need to close the quote and escape it.
+
+ Leading and trailing whitespaces, unless escaped or quoted, are
+removed from the parsed string.
+
+
+
Note that you may need to add a second level of escaping when using
+the command line or a script, which depends on the syntax of the
+adopted shell language.
+
+
The function av_get_token
defined in
+libavutil/avstring.h can be used to parse a token quoted or
+escaped according to the rules defined above.
+
+
The tool tools/ffescape in the FFmpeg source tree can be used
+to automatically quote or escape a string in a script.
+
+
+
6.1.1 Examples# TOC
+
+
+ Escape the string Crime d'Amour
containing the '
special
+character:
+
+
+ The string above contains a quote, so the '
needs to be escaped
+when quoting it:
+
+
+ Include leading or trailing whitespaces using quoting:
+
+
' this string starts and ends with whitespaces '
+
+
+ Escaping and quoting can be mixed together:
+
+
' The string '\'string\'' is a string '
+
+
+ To include a literal \
you can use either escaping or quoting:
+
+
'c:\foo' can be written as c:\\foo
+
+
+
+
+
6.2 Date# TOC
+
+
The accepted syntax is:
+
+
[(YYYY-MM-DD|YYYYMMDD)[T|t| ]]((HH:MM:SS[.m...]]])|(HHMMSS[.m...]]]))[Z]
+now
+
+
+
If the value is "now" it takes the current time.
+
+
Time is local time unless Z is appended, in which case it is
+interpreted as UTC.
+If the year-month-day part is not specified it takes the current
+year-month-day.
+
+
+
6.3 Time duration# TOC
+
+
There are two accepted syntaxes for expressing time duration.
+
+
+
+
HH expresses the number of hours, MM the number of minutes
+for a maximum of 2 digits, and SS the number of seconds for a
+maximum of 2 digits. The m at the end expresses decimal value for
+SS .
+
+
or
+
+
+
+
S expresses the number of seconds, with the optional decimal part
+m .
+
+
In both expressions, the optional ‘- ’ indicates negative duration.
+
+
+
6.3.1 Examples# TOC
+
+
The following examples are all valid time duration:
+
+
+‘55 ’
+55 seconds
+
+
+‘12:03:45 ’
+12 hours, 03 minutes and 45 seconds
+
+
+‘23.189 ’
+23.189 seconds
+
+
+
+
+
6.4 Video size# TOC
+
Specify the size of the sourced video, it may be a string of the form
+width xheight , or the name of a size abbreviation.
+
+
The following abbreviations are recognized:
+
+‘ntsc ’
+720x480
+
+‘pal ’
+720x576
+
+‘qntsc ’
+352x240
+
+‘qpal ’
+352x288
+
+‘sntsc ’
+640x480
+
+‘spal ’
+768x576
+
+‘film ’
+352x240
+
+‘ntsc-film ’
+352x240
+
+‘sqcif ’
+128x96
+
+‘qcif ’
+176x144
+
+‘cif ’
+352x288
+
+‘4cif ’
+704x576
+
+‘16cif ’
+1408x1152
+
+‘qqvga ’
+160x120
+
+‘qvga ’
+320x240
+
+‘vga ’
+640x480
+
+‘svga ’
+800x600
+
+‘xga ’
+1024x768
+
+‘uxga ’
+1600x1200
+
+‘qxga ’
+2048x1536
+
+‘sxga ’
+1280x1024
+
+‘qsxga ’
+2560x2048
+
+‘hsxga ’
+5120x4096
+
+‘wvga ’
+852x480
+
+‘wxga ’
+1366x768
+
+‘wsxga ’
+1600x1024
+
+‘wuxga ’
+1920x1200
+
+‘woxga ’
+2560x1600
+
+‘wqsxga ’
+3200x2048
+
+‘wquxga ’
+3840x2400
+
+‘whsxga ’
+6400x4096
+
+‘whuxga ’
+7680x4800
+
+‘cga ’
+320x200
+
+‘ega ’
+640x350
+
+‘hd480 ’
+852x480
+
+‘hd720 ’
+1280x720
+
+‘hd1080 ’
+1920x1080
+
+‘2k ’
+2048x1080
+
+‘2kflat ’
+1998x1080
+
+‘2kscope ’
+2048x858
+
+‘4k ’
+4096x2160
+
+‘4kflat ’
+3996x2160
+
+‘4kscope ’
+4096x1716
+
+‘nhd ’
+640x360
+
+‘hqvga ’
+240x160
+
+‘wqvga ’
+400x240
+
+‘fwqvga ’
+432x240
+
+‘hvga ’
+480x320
+
+‘qhd ’
+960x540
+
+
+
+
+
6.5 Video rate# TOC
+
+
Specify the frame rate of a video, expressed as the number of frames
+generated per second. It has to be a string in the format
+frame_rate_num /frame_rate_den , an integer number, a float
+number or a valid video frame rate abbreviation.
+
+
The following abbreviations are recognized:
+
+‘ntsc ’
+30000/1001
+
+‘pal ’
+25/1
+
+‘qntsc ’
+30000/1001
+
+‘qpal ’
+25/1
+
+‘sntsc ’
+30000/1001
+
+‘spal ’
+25/1
+
+‘film ’
+24/1
+
+‘ntsc-film ’
+24000/1001
+
+
+
+
+
6.6 Ratio# TOC
+
+
A ratio can be expressed as an expression, or in the form
+numerator :denominator .
+
+
Note that a ratio with infinite (1/0) or negative value is
+considered valid, so you should check on the returned value if you
+want to exclude those values.
+
+
The undefined value can be expressed using the "0:0" string.
+
+
+
6.7 Color# TOC
+
+
It can be the name of a color as defined below (case insensitive match) or a
+[0x|#]RRGGBB[AA]
sequence, possibly followed by @ and a string
+representing the alpha component.
+
+
The alpha component may be a string composed by "0x" followed by an
+hexadecimal number or a decimal number between 0.0 and 1.0, which
+represents the opacity value (‘0x00 ’ or ‘0.0 ’ means completely
+transparent, ‘0xff ’ or ‘1.0 ’ completely opaque). If the alpha
+component is not specified then ‘0xff ’ is assumed.
+
+
The string ‘random ’ will result in a random color.
+
+
The following names of colors are recognized:
+
+‘AliceBlue ’
+0xF0F8FF
+
+‘AntiqueWhite ’
+0xFAEBD7
+
+‘Aqua ’
+0x00FFFF
+
+‘Aquamarine ’
+0x7FFFD4
+
+‘Azure ’
+0xF0FFFF
+
+‘Beige ’
+0xF5F5DC
+
+‘Bisque ’
+0xFFE4C4
+
+‘Black ’
+0x000000
+
+‘BlanchedAlmond ’
+0xFFEBCD
+
+‘Blue ’
+0x0000FF
+
+‘BlueViolet ’
+0x8A2BE2
+
+‘Brown ’
+0xA52A2A
+
+‘BurlyWood ’
+0xDEB887
+
+‘CadetBlue ’
+0x5F9EA0
+
+‘Chartreuse ’
+0x7FFF00
+
+‘Chocolate ’
+0xD2691E
+
+‘Coral ’
+0xFF7F50
+
+‘CornflowerBlue ’
+0x6495ED
+
+‘Cornsilk ’
+0xFFF8DC
+
+‘Crimson ’
+0xDC143C
+
+‘Cyan ’
+0x00FFFF
+
+‘DarkBlue ’
+0x00008B
+
+‘DarkCyan ’
+0x008B8B
+
+‘DarkGoldenRod ’
+0xB8860B
+
+‘DarkGray ’
+0xA9A9A9
+
+‘DarkGreen ’
+0x006400
+
+‘DarkKhaki ’
+0xBDB76B
+
+‘DarkMagenta ’
+0x8B008B
+
+‘DarkOliveGreen ’
+0x556B2F
+
+‘Darkorange ’
+0xFF8C00
+
+‘DarkOrchid ’
+0x9932CC
+
+‘DarkRed ’
+0x8B0000
+
+‘DarkSalmon ’
+0xE9967A
+
+‘DarkSeaGreen ’
+0x8FBC8F
+
+‘DarkSlateBlue ’
+0x483D8B
+
+‘DarkSlateGray ’
+0x2F4F4F
+
+‘DarkTurquoise ’
+0x00CED1
+
+‘DarkViolet ’
+0x9400D3
+
+‘DeepPink ’
+0xFF1493
+
+‘DeepSkyBlue ’
+0x00BFFF
+
+‘DimGray ’
+0x696969
+
+‘DodgerBlue ’
+0x1E90FF
+
+‘FireBrick ’
+0xB22222
+
+‘FloralWhite ’
+0xFFFAF0
+
+‘ForestGreen ’
+0x228B22
+
+‘Fuchsia ’
+0xFF00FF
+
+‘Gainsboro ’
+0xDCDCDC
+
+‘GhostWhite ’
+0xF8F8FF
+
+‘Gold ’
+0xFFD700
+
+‘GoldenRod ’
+0xDAA520
+
+‘Gray ’
+0x808080
+
+‘Green ’
+0x008000
+
+‘GreenYellow ’
+0xADFF2F
+
+‘HoneyDew ’
+0xF0FFF0
+
+‘HotPink ’
+0xFF69B4
+
+‘IndianRed ’
+0xCD5C5C
+
+‘Indigo ’
+0x4B0082
+
+‘Ivory ’
+0xFFFFF0
+
+‘Khaki ’
+0xF0E68C
+
+‘Lavender ’
+0xE6E6FA
+
+‘LavenderBlush ’
+0xFFF0F5
+
+‘LawnGreen ’
+0x7CFC00
+
+‘LemonChiffon ’
+0xFFFACD
+
+‘LightBlue ’
+0xADD8E6
+
+‘LightCoral ’
+0xF08080
+
+‘LightCyan ’
+0xE0FFFF
+
+‘LightGoldenRodYellow ’
+0xFAFAD2
+
+‘LightGreen ’
+0x90EE90
+
+‘LightGrey ’
+0xD3D3D3
+
+‘LightPink ’
+0xFFB6C1
+
+‘LightSalmon ’
+0xFFA07A
+
+‘LightSeaGreen ’
+0x20B2AA
+
+‘LightSkyBlue ’
+0x87CEFA
+
+‘LightSlateGray ’
+0x778899
+
+‘LightSteelBlue ’
+0xB0C4DE
+
+‘LightYellow ’
+0xFFFFE0
+
+‘Lime ’
+0x00FF00
+
+‘LimeGreen ’
+0x32CD32
+
+‘Linen ’
+0xFAF0E6
+
+‘Magenta ’
+0xFF00FF
+
+‘Maroon ’
+0x800000
+
+‘MediumAquaMarine ’
+0x66CDAA
+
+‘MediumBlue ’
+0x0000CD
+
+‘MediumOrchid ’
+0xBA55D3
+
+‘MediumPurple ’
+0x9370D8
+
+‘MediumSeaGreen ’
+0x3CB371
+
+‘MediumSlateBlue ’
+0x7B68EE
+
+‘MediumSpringGreen ’
+0x00FA9A
+
+‘MediumTurquoise ’
+0x48D1CC
+
+‘MediumVioletRed ’
+0xC71585
+
+‘MidnightBlue ’
+0x191970
+
+‘MintCream ’
+0xF5FFFA
+
+‘MistyRose ’
+0xFFE4E1
+
+‘Moccasin ’
+0xFFE4B5
+
+‘NavajoWhite ’
+0xFFDEAD
+
+‘Navy ’
+0x000080
+
+‘OldLace ’
+0xFDF5E6
+
+‘Olive ’
+0x808000
+
+‘OliveDrab ’
+0x6B8E23
+
+‘Orange ’
+0xFFA500
+
+‘OrangeRed ’
+0xFF4500
+
+‘Orchid ’
+0xDA70D6
+
+‘PaleGoldenRod ’
+0xEEE8AA
+
+‘PaleGreen ’
+0x98FB98
+
+‘PaleTurquoise ’
+0xAFEEEE
+
+‘PaleVioletRed ’
+0xD87093
+
+‘PapayaWhip ’
+0xFFEFD5
+
+‘PeachPuff ’
+0xFFDAB9
+
+‘Peru ’
+0xCD853F
+
+‘Pink ’
+0xFFC0CB
+
+‘Plum ’
+0xDDA0DD
+
+‘PowderBlue ’
+0xB0E0E6
+
+‘Purple ’
+0x800080
+
+‘Red ’
+0xFF0000
+
+‘RosyBrown ’
+0xBC8F8F
+
+‘RoyalBlue ’
+0x4169E1
+
+‘SaddleBrown ’
+0x8B4513
+
+‘Salmon ’
+0xFA8072
+
+‘SandyBrown ’
+0xF4A460
+
+‘SeaGreen ’
+0x2E8B57
+
+‘SeaShell ’
+0xFFF5EE
+
+‘Sienna ’
+0xA0522D
+
+‘Silver ’
+0xC0C0C0
+
+‘SkyBlue ’
+0x87CEEB
+
+‘SlateBlue ’
+0x6A5ACD
+
+‘SlateGray ’
+0x708090
+
+‘Snow ’
+0xFFFAFA
+
+‘SpringGreen ’
+0x00FF7F
+
+‘SteelBlue ’
+0x4682B4
+
+‘Tan ’
+0xD2B48C
+
+‘Teal ’
+0x008080
+
+‘Thistle ’
+0xD8BFD8
+
+‘Tomato ’
+0xFF6347
+
+‘Turquoise ’
+0x40E0D0
+
+‘Violet ’
+0xEE82EE
+
+‘Wheat ’
+0xF5DEB3
+
+‘White ’
+0xFFFFFF
+
+‘WhiteSmoke ’
+0xF5F5F5
+
+‘Yellow ’
+0xFFFF00
+
+‘YellowGreen ’
+0x9ACD32
+
+
+
+
+
6.8 Channel Layout# TOC
+
+
A channel layout specifies the spatial disposition of the channels in
+a multi-channel audio stream. To specify a channel layout, FFmpeg
+makes use of a special syntax.
+
+
Individual channels are identified by an id, as given by the table
+below:
+
+‘FL ’
+front left
+
+‘FR ’
+front right
+
+‘FC ’
+front center
+
+‘LFE ’
+low frequency
+
+‘BL ’
+back left
+
+‘BR ’
+back right
+
+‘FLC ’
+front left-of-center
+
+‘FRC ’
+front right-of-center
+
+‘BC ’
+back center
+
+‘SL ’
+side left
+
+‘SR ’
+side right
+
+‘TC ’
+top center
+
+‘TFL ’
+top front left
+
+‘TFC ’
+top front center
+
+‘TFR ’
+top front right
+
+‘TBL ’
+top back left
+
+‘TBC ’
+top back center
+
+‘TBR ’
+top back right
+
+‘DL ’
+downmix left
+
+‘DR ’
+downmix right
+
+‘WL ’
+wide left
+
+‘WR ’
+wide right
+
+‘SDL ’
+surround direct left
+
+‘SDR ’
+surround direct right
+
+‘LFE2 ’
+low frequency 2
+
+
+
+
Standard channel layout compositions can be specified by using the
+following identifiers:
+
+‘mono ’
+FC
+
+‘stereo ’
+FL+FR
+
+‘2.1 ’
+FL+FR+LFE
+
+‘3.0 ’
+FL+FR+FC
+
+‘3.0(back) ’
+FL+FR+BC
+
+‘4.0 ’
+FL+FR+FC+BC
+
+‘quad ’
+FL+FR+BL+BR
+
+‘quad(side) ’
+FL+FR+SL+SR
+
+‘3.1 ’
+FL+FR+FC+LFE
+
+‘5.0 ’
+FL+FR+FC+BL+BR
+
+‘5.0(side) ’
+FL+FR+FC+SL+SR
+
+‘4.1 ’
+FL+FR+FC+LFE+BC
+
+‘5.1 ’
+FL+FR+FC+LFE+BL+BR
+
+‘5.1(side) ’
+FL+FR+FC+LFE+SL+SR
+
+‘6.0 ’
+FL+FR+FC+BC+SL+SR
+
+‘6.0(front) ’
+FL+FR+FLC+FRC+SL+SR
+
+‘hexagonal ’
+FL+FR+FC+BL+BR+BC
+
+‘6.1 ’
+FL+FR+FC+LFE+BC+SL+SR
+
+‘6.1 ’
+FL+FR+FC+LFE+BL+BR+BC
+
+‘6.1(front) ’
+FL+FR+LFE+FLC+FRC+SL+SR
+
+‘7.0 ’
+FL+FR+FC+BL+BR+SL+SR
+
+‘7.0(front) ’
+FL+FR+FC+FLC+FRC+SL+SR
+
+‘7.1 ’
+FL+FR+FC+LFE+BL+BR+SL+SR
+
+‘7.1(wide) ’
+FL+FR+FC+LFE+BL+BR+FLC+FRC
+
+‘7.1(wide-side) ’
+FL+FR+FC+LFE+FLC+FRC+SL+SR
+
+‘octagonal ’
+FL+FR+FC+BL+BR+BC+SL+SR
+
+‘downmix ’
+DL+DR
+
+
+
+
A custom channel layout can be specified as a sequence of terms, separated by
+’+’ or ’|’. Each term can be:
+
+ the name of a standard channel layout (e.g. ‘mono ’,
+‘stereo ’, ‘4.0 ’, ‘quad ’, ‘5.0 ’, etc.)
+
+ the name of a single channel (e.g. ‘FL ’, ‘FR ’, ‘FC ’, ‘LFE ’, etc.)
+
+ a number of channels, in decimal, optionally followed by ’c’, yielding
+the default channel layout for that number of channels (see the
+function av_get_default_channel_layout
)
+
+ a channel layout mask, in hexadecimal starting with "0x" (see the
+AV_CH_*
macros in libavutil/channel_layout.h .
+
+
+
Starting from libavutil version 53 the trailing character "c" to
+specify a number of channels will be required, while a channel layout
+mask could also be specified as a decimal number (if and only if not
+followed by "c").
+
+
See also the function av_get_channel_layout
defined in
+libavutil/channel_layout.h .
+
+
+
7 Expression Evaluation# TOC
+
+
When evaluating an arithmetic expression, FFmpeg uses an internal
+formula evaluator, implemented through the libavutil/eval.h
+interface.
+
+
An expression may contain unary, binary operators, constants, and
+functions.
+
+
Two expressions expr1 and expr2 can be combined to form
+another expression "expr1 ;expr2 ".
+expr1 and expr2 are evaluated in turn, and the new
+expression evaluates to the value of expr2 .
+
+
The following binary operators are available: +
, -
,
+*
, /
, ^
.
+
+
The following unary operators are available: +
, -
.
+
+
The following functions are available:
+
+abs(x)
+Compute absolute value of x .
+
+
+acos(x)
+Compute arccosine of x .
+
+
+asin(x)
+Compute arcsine of x .
+
+
+atan(x)
+Compute arctangent of x .
+
+
+between(x, min, max)
+Return 1 if x is greater than or equal to min and lesser than or
+equal to max , 0 otherwise.
+
+
+bitand(x, y)
+bitor(x, y)
+Compute bitwise and/or operation on x and y .
+
+The results of the evaluation of x and y are converted to
+integers before executing the bitwise operation.
+
+Note that both the conversion to integer and the conversion back to
+floating point can lose precision. Beware of unexpected results for
+large numbers (usually 2^53 and larger).
+
+
+ceil(expr)
+Round the value of expression expr upwards to the nearest
+integer. For example, "ceil(1.5)" is "2.0".
+
+
+clip(x, min, max)
+Return the value of x clipped between min and max .
+
+
+cos(x)
+Compute cosine of x .
+
+
+cosh(x)
+Compute hyperbolic cosine of x .
+
+
+eq(x, y)
+Return 1 if x and y are equivalent, 0 otherwise.
+
+
+exp(x)
+Compute exponential of x (with base e
, the Euler’s number).
+
+
+floor(expr)
+Round the value of expression expr downwards to the nearest
+integer. For example, "floor(-1.5)" is "-2.0".
+
+
+gauss(x)
+Compute Gauss function of x , corresponding to
+exp(-x*x/2) / sqrt(2*PI)
.
+
+
+gcd(x, y)
+Return the greatest common divisor of x and y . If both x and
+y are 0 or either or both are less than zero then behavior is undefined.
+
+
+gt(x, y)
+Return 1 if x is greater than y , 0 otherwise.
+
+
+gte(x, y)
+Return 1 if x is greater than or equal to y , 0 otherwise.
+
+
+hypot(x, y)
+This function is similar to the C function with the same name; it returns
+"sqrt(x *x + y *y )", the length of the hypotenuse of a
+right triangle with sides of length x and y , or the distance of the
+point (x , y ) from the origin.
+
+
+if(x, y)
+Evaluate x , and if the result is non-zero return the result of
+the evaluation of y , return 0 otherwise.
+
+
+if(x, y, z)
+Evaluate x , and if the result is non-zero return the evaluation
+result of y , otherwise the evaluation result of z .
+
+
+ifnot(x, y)
+Evaluate x , and if the result is zero return the result of the
+evaluation of y , return 0 otherwise.
+
+
+ifnot(x, y, z)
+Evaluate x , and if the result is zero return the evaluation
+result of y , otherwise the evaluation result of z .
+
+
+isinf(x)
+Return 1.0 if x is +/-INFINITY, 0.0 otherwise.
+
+
+isnan(x)
+Return 1.0 if x is NAN, 0.0 otherwise.
+
+
+ld(var)
+Allow to load the value of the internal variable with number
+var , which was previously stored with st(var , expr ).
+The function returns the loaded value.
+
+
+log(x)
+Compute natural logarithm of x .
+
+
+lt(x, y)
+Return 1 if x is lesser than y , 0 otherwise.
+
+
+lte(x, y)
+Return 1 if x is lesser than or equal to y , 0 otherwise.
+
+
+max(x, y)
+Return the maximum between x and y .
+
+
+min(x, y)
+Return the maximum between x and y .
+
+
+mod(x, y)
+Compute the remainder of division of x by y .
+
+
+not(expr)
+Return 1.0 if expr is zero, 0.0 otherwise.
+
+
+pow(x, y)
+Compute the power of x elevated y , it is equivalent to
+"(x )^(y )".
+
+
+print(t)
+print(t, l)
+Print the value of expression t with loglevel l . If
+l is not specified then a default log level is used.
+Returns the value of the expression printed.
+
+Prints t with loglevel l
+
+
+random(x)
+Return a pseudo random value between 0.0 and 1.0. x is the index of the
+internal variable which will be used to save the seed/state.
+
+
+root(expr, max)
+Find an input value for which the function represented by expr
+with argument ld(0) is 0 in the interval 0..max .
+
+The expression in expr must denote a continuous function or the
+result is undefined.
+
+ld(0) is used to represent the function input value, which means
+that the given expression will be evaluated multiple times with
+various input values that the expression can access through
+ld(0)
. When the expression evaluates to 0 then the
+corresponding input value will be returned.
+
+
+sin(x)
+Compute sine of x .
+
+
+sinh(x)
+Compute hyperbolic sine of x .
+
+
+sqrt(expr)
+Compute the square root of expr . This is equivalent to
+"(expr )^.5".
+
+
+squish(x)
+Compute expression 1/(1 + exp(4*x))
.
+
+
+st(var, expr)
+Allow to store the value of the expression expr in an internal
+variable. var specifies the number of the variable where to
+store the value, and it is a value ranging from 0 to 9. The function
+returns the value stored in the internal variable.
+Note, Variables are currently not shared between expressions.
+
+
+tan(x)
+Compute tangent of x .
+
+
+tanh(x)
+Compute hyperbolic tangent of x .
+
+
+taylor(expr, x)
+taylor(expr, x, id)
+Evaluate a Taylor series at x , given an expression representing
+the ld(id)
-th derivative of a function at 0.
+
+When the series does not converge the result is undefined.
+
+ld(id) is used to represent the derivative order in expr ,
+which means that the given expression will be evaluated multiple times
+with various input values that the expression can access through
+ld(id)
. If id is not specified then 0 is assumed.
+
+Note, when you have the derivatives at y instead of 0,
+taylor(expr, x-y)
can be used.
+
+
+time(0)
+Return the current (wallclock) time in seconds.
+
+
+trunc(expr)
+Round the value of expression expr towards zero to the nearest
+integer. For example, "trunc(-1.5)" is "-1.0".
+
+
+while(cond, expr)
+Evaluate expression expr while the expression cond is
+non-zero, and returns the value of the last expr evaluation, or
+NAN if cond was always false.
+
+
+
+
The following constants are available:
+
+PI
+area of the unit disc, approximately 3.14
+
+E
+exp(1) (Euler’s number), approximately 2.718
+
+PHI
+golden ratio (1+sqrt(5))/2, approximately 1.618
+
+
+
+
Assuming that an expression is considered "true" if it has a non-zero
+value, note that:
+
+
*
works like AND
+
+
+
works like OR
+
+
For example the construct:
+
+
is equivalent to:
+
+
+
In your C code, you can extend the list of unary and binary functions,
+and define recognized constants, so that they are available for your
+expressions.
+
+
The evaluator also recognizes the International System unit prefixes.
+If ’i’ is appended after the prefix, binary prefixes are used, which
+are based on powers of 1024 instead of powers of 1000.
+The ’B’ postfix multiplies the value by 8, and can be appended after a
+unit prefix or used alone. This allows using for example ’KB’, ’MiB’,
+’G’ and ’B’ as number postfix.
+
+
The list of available International System prefixes follows, with
+indication of the corresponding powers of 10 and of 2.
+
+y
+10^-24 / 2^-80
+
+z
+10^-21 / 2^-70
+
+a
+10^-18 / 2^-60
+
+f
+10^-15 / 2^-50
+
+p
+10^-12 / 2^-40
+
+n
+10^-9 / 2^-30
+
+u
+10^-6 / 2^-20
+
+m
+10^-3 / 2^-10
+
+c
+10^-2
+
+d
+10^-1
+
+h
+10^2
+
+k
+10^3 / 2^10
+
+K
+10^3 / 2^10
+
+M
+10^6 / 2^20
+
+G
+10^9 / 2^30
+
+T
+10^12 / 2^40
+
+P
+10^15 / 2^40
+
+E
+10^18 / 2^50
+
+Z
+10^21 / 2^60
+
+Y
+10^24 / 2^70
+
+
+
+
+
+
8 OpenCL Options# TOC
+
+
When FFmpeg is configured with --enable-opencl
, it is possible
+to set the options for the global OpenCL context.
+
+
The list of supported options follows:
+
+
+build_options
+Set build options used to compile the registered kernels.
+
+See reference "OpenCL Specification Version: 1.2 chapter 5.6.4".
+
+
+platform_idx
+Select the index of the platform to run OpenCL code.
+
+The specified index must be one of the indexes in the device list
+which can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
+
+
+device_idx
+Select the index of the device used to run OpenCL code.
+
+The specified index must be one of the indexes in the device list which
+can be obtained with ffmpeg -opencl_bench
or av_opencl_get_device_list()
.
+
+
+
+
+
+
9 Codec Options# TOC
+
+
libavcodec provides some generic global options, which can be set on
+all the encoders and decoders. In addition each codec may support
+so-called private options, which are specific for a given codec.
+
+
Sometimes, a global option may only affect a specific kind of codec,
+and may be nonsensical or ignored by another, so you need to be aware
+of the meaning of the specified options. Also some options are
+meant only for decoding or encoding.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, or by setting the value explicitly in the
+AVCodecContext
options or using the libavutil/opt.h API
+for programmatic use.
+
+
The list of supported options follow:
+
+
+b integer (encoding,audio,video )
+Set bitrate in bits/s. Default value is 200K.
+
+
+ab integer (encoding,audio )
+Set audio bitrate (in bits/s). Default value is 128K.
+
+
+bt integer (encoding,video )
+Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate
+tolerance specifies how far ratecontrol is willing to deviate from the
+target average bitrate value. This is not related to min/max
+bitrate. Lowering tolerance too much has an adverse effect on quality.
+
+
+flags flags (decoding/encoding,audio,video,subtitles )
+Set generic flags.
+
+Possible values:
+
+‘mv4 ’
+Use four motion vector by macroblock (mpeg4).
+
+‘qpel ’
+Use 1/4 pel motion compensation.
+
+‘loop ’
+Use loop filter.
+
+‘qscale ’
+Use fixed qscale.
+
+‘gmc ’
+Use gmc.
+
+‘mv0 ’
+Always try a mb with mv=<0,0>.
+
+‘input_preserved ’
+‘pass1 ’
+Use internal 2pass ratecontrol in first pass mode.
+
+‘pass2 ’
+Use internal 2pass ratecontrol in second pass mode.
+
+‘gray ’
+Only decode/encode grayscale.
+
+‘emu_edge ’
+Do not draw edges.
+
+‘psnr ’
+Set error[?] variables during encoding.
+
+‘truncated ’
+‘naq ’
+Normalize adaptive quantization.
+
+‘ildct ’
+Use interlaced DCT.
+
+‘low_delay ’
+Force low delay.
+
+‘global_header ’
+Place global headers in extradata instead of every keyframe.
+
+‘bitexact ’
+Only write platform-, build- and time-independent data. (except (I)DCT).
+This ensures that file and data checksums are reproducible and match between
+platforms. Its primary use is for regression testing.
+
+‘aic ’
+Apply H263 advanced intra coding / mpeg4 ac prediction.
+
+‘cbp ’
+Deprecated, use mpegvideo private options instead.
+
+‘qprd ’
+Deprecated, use mpegvideo private options instead.
+
+‘ilme ’
+Apply interlaced motion estimation.
+
+‘cgop ’
+Use closed gop.
+
+
+
+
+me_method integer (encoding,video )
+Set motion estimation method.
+
+Possible values:
+
+‘zero ’
+zero motion estimation (fastest)
+
+‘full ’
+full motion estimation (slowest)
+
+‘epzs ’
+EPZS motion estimation (default)
+
+‘esa ’
+esa motion estimation (alias for full)
+
+‘tesa ’
+tesa motion estimation
+
+‘dia ’
+dia motion estimation (alias for epzs)
+
+‘log ’
+log motion estimation
+
+‘phods ’
+phods motion estimation
+
+‘x1 ’
+X1 motion estimation
+
+‘hex ’
+hex motion estimation
+
+‘umh ’
+umh motion estimation
+
+‘iter ’
+iter motion estimation
+
+
+
+
+extradata_size integer
+Set extradata size.
+
+
+time_base rational number
+Set codec time base.
+
+It is the fundamental unit of time (in seconds) in terms of which
+frame timestamps are represented. For fixed-fps content, timebase
+should be 1 / frame_rate
and timestamp increments should be
+identically 1.
+
+
+g integer (encoding,video )
+Set the group of picture size. Default value is 12.
+
+
+ar integer (decoding/encoding,audio )
+Set audio sampling rate (in Hz).
+
+
+ac integer (decoding/encoding,audio )
+Set number of audio channels.
+
+
+cutoff integer (encoding,audio )
+Set cutoff bandwidth.
+
+
+frame_size integer (encoding,audio )
+Set audio frame size.
+
+Each submitted frame except the last must contain exactly frame_size
+samples per channel. May be 0 when the codec has
+CODEC_CAP_VARIABLE_FRAME_SIZE set, in that case the frame size is not
+restricted. It is set by some decoders to indicate constant frame
+size.
+
+
+frame_number integer
+Set the frame number.
+
+
+delay integer
+qcomp float (encoding,video )
+Set video quantizer scale compression (VBR). It is used as a constant
+in the ratecontrol equation. Recommended range for default rc_eq:
+0.0-1.0.
+
+
+qblur float (encoding,video )
+Set video quantizer scale blur (VBR).
+
+
+qmin integer (encoding,video )
+Set min video quantizer scale (VBR). Must be included between -1 and
+69, default value is 2.
+
+
+qmax integer (encoding,video )
+Set max video quantizer scale (VBR). Must be included between -1 and
+1024, default value is 31.
+
+
+qdiff integer (encoding,video )
+Set max difference between the quantizer scale (VBR).
+
+
+bf integer (encoding,video )
+Set max number of B frames between non-B-frames.
+
+Must be an integer between -1 and 16. 0 means that B-frames are
+disabled. If a value of -1 is used, it will choose an automatic value
+depending on the encoder.
+
+Default value is 0.
+
+
+b_qfactor float (encoding,video )
+Set qp factor between P and B frames.
+
+
+rc_strategy integer (encoding,video )
+Set ratecontrol method.
+
+
+b_strategy integer (encoding,video )
+Set strategy to choose between I/P/B-frames.
+
+
+ps integer (encoding,video )
+Set RTP payload size in bytes.
+
+
+mv_bits integer
+header_bits integer
+i_tex_bits integer
+p_tex_bits integer
+i_count integer
+p_count integer
+skip_count integer
+misc_bits integer
+frame_bits integer
+codec_tag integer
+bug flags (decoding,video )
+Workaround not auto detected encoder bugs.
+
+Possible values:
+
+‘autodetect ’
+‘old_msmpeg4 ’
+some old lavc generated msmpeg4v3 files (no autodetection)
+
+‘xvid_ilace ’
+Xvid interlacing bug (autodetected if fourcc==XVIX)
+
+‘ump4 ’
+(autodetected if fourcc==UMP4)
+
+‘no_padding ’
+padding bug (autodetected)
+
+‘amv ’
+‘ac_vlc ’
+illegal vlc bug (autodetected per fourcc)
+
+‘qpel_chroma ’
+‘std_qpel ’
+old standard qpel (autodetected per fourcc/version)
+
+‘qpel_chroma2 ’
+‘direct_blocksize ’
+direct-qpel-blocksize bug (autodetected per fourcc/version)
+
+‘edge ’
+edge padding bug (autodetected per fourcc/version)
+
+‘hpel_chroma ’
+‘dc_clip ’
+‘ms ’
+Workaround various bugs in microsoft broken decoders.
+
+‘trunc ’
+trancated frames
+
+
+
+
+lelim integer (encoding,video )
+Set single coefficient elimination threshold for luminance (negative
+values also consider DC coefficient).
+
+
+celim integer (encoding,video )
+Set single coefficient elimination threshold for chrominance (negative
+values also consider dc coefficient)
+
+
+strict integer (decoding/encoding,audio,video )
+Specify how strictly to follow the standards.
+
+Possible values:
+
+‘very ’
+strictly conform to a older more strict version of the spec or reference software
+
+‘strict ’
+strictly conform to all the things in the spec no matter what consequences
+
+‘normal ’
+‘unofficial ’
+allow unofficial extensions
+
+‘experimental ’
+allow non standardized experimental things, experimental
+(unfinished/work in progress/not well tested) decoders and encoders.
+Note: experimental decoders can pose a security risk, do not use this for
+decoding untrusted input.
+
+
+
+
+b_qoffset float (encoding,video )
+Set QP offset between P and B frames.
+
+
+err_detect flags (decoding,audio,video )
+Set error detection flags.
+
+Possible values:
+
+‘crccheck ’
+verify embedded CRCs
+
+‘bitstream ’
+detect bitstream specification deviations
+
+‘buffer ’
+detect improper bitstream length
+
+‘explode ’
+abort decoding on minor error detection
+
+‘ignore_err ’
+ignore decoding errors, and continue decoding.
+This is useful if you want to analyze the content of a video and thus want
+everything to be decoded no matter what. This option will not result in a video
+that is pleasing to watch in case of errors.
+
+‘careful ’
+consider things that violate the spec and have not been seen in the wild as errors
+
+‘compliant ’
+consider all spec non compliancies as errors
+
+‘aggressive ’
+consider things that a sane encoder should not do as an error
+
+
+
+
+has_b_frames integer
+block_align integer
+mpeg_quant integer (encoding,video )
+Use MPEG quantizers instead of H.263.
+
+
+qsquish float (encoding,video )
+How to keep quantizer between qmin and qmax (0 = clip, 1 = use
+differentiable function).
+
+
+rc_qmod_amp float (encoding,video )
+Set experimental quantizer modulation.
+
+
+rc_qmod_freq integer (encoding,video )
+Set experimental quantizer modulation.
+
+
+rc_override_count integer
+rc_eq string (encoding,video )
+Set rate control equation. When computing the expression, besides the
+standard functions defined in the section ’Expression Evaluation’, the
+following functions are available: bits2qp(bits), qp2bits(qp). Also
+the following constants are available: iTex pTex tex mv fCode iCount
+mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex
+avgTex.
+
+
+maxrate integer (encoding,audio,video )
+Set max bitrate tolerance (in bits/s). Requires bufsize to be set.
+
+
+minrate integer (encoding,audio,video )
+Set min bitrate tolerance (in bits/s). Most useful in setting up a CBR
+encode. It is of little use elsewise.
+
+
+bufsize integer (encoding,audio,video )
+Set ratecontrol buffer size (in bits).
+
+
+rc_buf_aggressivity float (encoding,video )
+Currently useless.
+
+
+i_qfactor float (encoding,video )
+Set QP factor between P and I frames.
+
+
+i_qoffset float (encoding,video )
+Set QP offset between P and I frames.
+
+
+rc_init_cplx float (encoding,video )
+Set initial complexity for 1-pass encoding.
+
+
+dct integer (encoding,video )
+Set DCT algorithm.
+
+Possible values:
+
+‘auto ’
+autoselect a good one (default)
+
+‘fastint ’
+fast integer
+
+‘int ’
+accurate integer
+
+‘mmx ’
+‘altivec ’
+‘faan ’
+floating point AAN DCT
+
+
+
+
+lumi_mask float (encoding,video )
+Compress bright areas stronger than medium ones.
+
+
+tcplx_mask float (encoding,video )
+Set temporal complexity masking.
+
+
+scplx_mask float (encoding,video )
+Set spatial complexity masking.
+
+
+p_mask float (encoding,video )
+Set inter masking.
+
+
+dark_mask float (encoding,video )
+Compress dark areas stronger than medium ones.
+
+
+idct integer (decoding/encoding,video )
+Select IDCT implementation.
+
+Possible values:
+
+‘auto ’
+‘int ’
+‘simple ’
+‘simplemmx ’
+‘simpleauto ’
+Automatically pick a IDCT compatible with the simple one
+
+
+‘arm ’
+‘altivec ’
+‘sh4 ’
+‘simplearm ’
+‘simplearmv5te ’
+‘simplearmv6 ’
+‘simpleneon ’
+‘simplealpha ’
+‘ipp ’
+‘xvidmmx ’
+‘faani ’
+floating point AAN IDCT
+
+
+
+
+slice_count integer
+ec flags (decoding,video )
+Set error concealment strategy.
+
+Possible values:
+
+‘guess_mvs ’
+iterative motion vector (MV) search (slow)
+
+‘deblock ’
+use strong deblock filter for damaged MBs
+
+‘favor_inter ’
+favor predicting from the previous frame instead of the current
+
+
+
+
+bits_per_coded_sample integer
+pred integer (encoding,video )
+Set prediction method.
+
+Possible values:
+
+‘left ’
+‘plane ’
+‘median ’
+
+
+
+aspect rational number (encoding,video )
+Set sample aspect ratio.
+
+
+debug flags (decoding/encoding,audio,video,subtitles )
+Print specific debug info.
+
+Possible values:
+
+‘pict ’
+picture info
+
+‘rc ’
+rate control
+
+‘bitstream ’
+‘mb_type ’
+macroblock (MB) type
+
+‘qp ’
+per-block quantization parameter (QP)
+
+‘mv ’
+motion vector
+
+‘dct_coeff ’
+‘skip ’
+‘startcode ’
+‘pts ’
+‘er ’
+error recognition
+
+‘mmco ’
+memory management control operations (H.264)
+
+‘bugs ’
+‘vis_qp ’
+visualize quantization parameter (QP), lower QP are tinted greener
+
+‘vis_mb_type ’
+visualize block types
+
+‘buffers ’
+picture buffer allocations
+
+‘thread_ops ’
+threading operations
+
+‘nomc ’
+skip motion compensation
+
+
+
+
+vismv integer (decoding,video )
+Visualize motion vectors (MVs).
+
+This option is deprecated, see the codecview filter instead.
+
+Possible values:
+
+‘pf ’
+forward predicted MVs of P-frames
+
+‘bf ’
+forward predicted MVs of B-frames
+
+‘bb ’
+backward predicted MVs of B-frames
+
+
+
+
+cmp integer (encoding,video )
+Set full pel me compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+subcmp integer (encoding,video )
+Set sub pel me compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+mbcmp integer (encoding,video )
+Set macroblock compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+ildctcmp integer (encoding,video )
+Set interlaced dct compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+dia_size integer (encoding,video )
+Set diamond type & size for motion estimation.
+
+
+last_pred integer (encoding,video )
+Set amount of motion predictors from the previous frame.
+
+
+preme integer (encoding,video )
+Set pre motion estimation.
+
+
+precmp integer (encoding,video )
+Set pre motion estimation compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+pre_dia_size integer (encoding,video )
+Set diamond type & size for motion estimation pre-pass.
+
+
+subq integer (encoding,video )
+Set sub pel motion estimation quality.
+
+
+dtg_active_format integer
+me_range integer (encoding,video )
+Set limit motion vectors range (1023 for DivX player).
+
+
+ibias integer (encoding,video )
+Set intra quant bias.
+
+
+pbias integer (encoding,video )
+Set inter quant bias.
+
+
+color_table_id integer
+global_quality integer (encoding,audio,video )
+coder integer (encoding,video )
+
+Possible values:
+
+‘vlc ’
+variable length coder / huffman coder
+
+‘ac ’
+arithmetic coder
+
+‘raw ’
+raw (no encoding)
+
+‘rle ’
+run-length coder
+
+‘deflate ’
+deflate-based coder
+
+
+
+
+context integer (encoding,video )
+Set context model.
+
+
+slice_flags integer
+xvmc_acceleration integer
+mbd integer (encoding,video )
+Set macroblock decision algorithm (high quality mode).
+
+Possible values:
+
+‘simple ’
+use mbcmp (default)
+
+‘bits ’
+use fewest bits
+
+‘rd ’
+use best rate distortion
+
+
+
+
+stream_codec_tag integer
+sc_threshold integer (encoding,video )
+Set scene change threshold.
+
+
+lmin integer (encoding,video )
+Set min lagrange factor (VBR).
+
+
+lmax integer (encoding,video )
+Set max lagrange factor (VBR).
+
+
+nr integer (encoding,video )
+Set noise reduction.
+
+
+rc_init_occupancy integer (encoding,video )
+Set number of bits which should be loaded into the rc buffer before
+decoding starts.
+
+
+flags2 flags (decoding/encoding,audio,video )
+
+Possible values:
+
+‘fast ’
+Allow non spec compliant speedup tricks.
+
+‘sgop ’
+Deprecated, use mpegvideo private options instead.
+
+‘noout ’
+Skip bitstream encoding.
+
+‘ignorecrop ’
+Ignore cropping information from sps.
+
+‘local_header ’
+Place global headers at every keyframe instead of in extradata.
+
+‘chunks ’
+Frame data might be split into multiple chunks.
+
+‘showall ’
+Show all frames before the first keyframe.
+
+‘skiprd ’
+Deprecated, use mpegvideo private options instead.
+
+‘export_mvs ’
+Export motion vectors into frame side-data (see AV_FRAME_DATA_MOTION_VECTORS
)
+for codecs that support it. See also doc/examples/export_mvs.c .
+
+
+
+
+error integer (encoding,video )
+qns integer (encoding,video )
+Deprecated, use mpegvideo private options instead.
+
+
+threads integer (decoding/encoding,video )
+
+Possible values:
+
+‘auto ’
+detect a good number of threads
+
+
+
+
+me_threshold integer (encoding,video )
+Set motion estimation threshold.
+
+
+mb_threshold integer (encoding,video )
+Set macroblock threshold.
+
+
+dc integer (encoding,video )
+Set intra_dc_precision.
+
+
+nssew integer (encoding,video )
+Set nsse weight.
+
+
+skip_top integer (decoding,video )
+Set number of macroblock rows at the top which are skipped.
+
+
+skip_bottom integer (decoding,video )
+Set number of macroblock rows at the bottom which are skipped.
+
+
+profile integer (encoding,audio,video )
+
+Possible values:
+
+‘unknown ’
+‘aac_main ’
+‘aac_low ’
+‘aac_ssr ’
+‘aac_ltp ’
+‘aac_he ’
+‘aac_he_v2 ’
+‘aac_ld ’
+‘aac_eld ’
+‘mpeg2_aac_low ’
+‘mpeg2_aac_he ’
+‘mpeg4_sp ’
+‘mpeg4_core ’
+‘mpeg4_main ’
+‘mpeg4_asp ’
+‘dts ’
+‘dts_es ’
+‘dts_96_24 ’
+‘dts_hd_hra ’
+‘dts_hd_ma ’
+
+
+
+level integer (encoding,audio,video )
+
+Possible values:
+
+‘unknown ’
+
+
+
+lowres integer (decoding,audio,video )
+Decode at 1= 1/2, 2=1/4, 3=1/8 resolutions.
+
+
+skip_threshold integer (encoding,video )
+Set frame skip threshold.
+
+
+skip_factor integer (encoding,video )
+Set frame skip factor.
+
+
+skip_exp integer (encoding,video )
+Set frame skip exponent.
+Negative values behave identical to the corresponding positive ones, except
+that the score is normalized.
+Positive values exist primarily for compatibility reasons and are not so useful.
+
+
+skipcmp integer (encoding,video )
+Set frame skip compare function.
+
+Possible values:
+
+‘sad ’
+sum of absolute differences, fast (default)
+
+‘sse ’
+sum of squared errors
+
+‘satd ’
+sum of absolute Hadamard transformed differences
+
+‘dct ’
+sum of absolute DCT transformed differences
+
+‘psnr ’
+sum of squared quantization errors (avoid, low quality)
+
+‘bit ’
+number of bits needed for the block
+
+‘rd ’
+rate distortion optimal, slow
+
+‘zero ’
+0
+
+‘vsad ’
+sum of absolute vertical differences
+
+‘vsse ’
+sum of squared vertical differences
+
+‘nsse ’
+noise preserving sum of squared differences
+
+‘w53 ’
+5/3 wavelet, only used in snow
+
+‘w97 ’
+9/7 wavelet, only used in snow
+
+‘dctmax ’
+‘chroma ’
+
+
+
+border_mask float (encoding,video )
+Increase the quantizer for macroblocks close to borders.
+
+
+mblmin integer (encoding,video )
+Set min macroblock lagrange factor (VBR).
+
+
+mblmax integer (encoding,video )
+Set max macroblock lagrange factor (VBR).
+
+
+mepc integer (encoding,video )
+Set motion estimation bitrate penalty compensation (1.0 = 256).
+
+
+skip_loop_filter integer (decoding,video )
+skip_idct integer (decoding,video )
+skip_frame integer (decoding,video )
+
+Make decoder discard processing depending on the frame type selected
+by the option value.
+
+skip_loop_filter skips frame loop filtering, skip_idct
+skips frame IDCT/dequantization, skip_frame skips decoding.
+
+Possible values:
+
+‘none ’
+Discard no frame.
+
+
+‘default ’
+Discard useless frames like 0-sized frames.
+
+
+‘noref ’
+Discard all non-reference frames.
+
+
+‘bidir ’
+Discard all bidirectional frames.
+
+
+‘nokey ’
+Discard all frames excepts keyframes.
+
+
+‘all ’
+Discard all frames.
+
+
+
+Default value is ‘default ’.
+
+
+bidir_refine integer (encoding,video )
+Refine the two motion vectors used in bidirectional macroblocks.
+
+
+brd_scale integer (encoding,video )
+Downscale frames for dynamic B-frame decision.
+
+
+keyint_min integer (encoding,video )
+Set minimum interval between IDR-frames.
+
+
+refs integer (encoding,video )
+Set reference frames to consider for motion compensation.
+
+
+chromaoffset integer (encoding,video )
+Set chroma qp offset from luma.
+
+
+trellis integer (encoding,audio,video )
+Set rate-distortion optimal quantization.
+
+
+sc_factor integer (encoding,video )
+Set value multiplied by qscale for each frame and added to
+scene_change_score.
+
+
+mv0_threshold integer (encoding,video )
+b_sensitivity integer (encoding,video )
+Adjust sensitivity of b_frame_strategy 1.
+
+
+compression_level integer (encoding,audio,video )
+min_prediction_order integer (encoding,audio )
+max_prediction_order integer (encoding,audio )
+timecode_frame_start integer (encoding,video )
+Set GOP timecode frame start number, in non drop frame format.
+
+
+request_channels integer (decoding,audio )
+Set desired number of audio channels.
+
+
+bits_per_raw_sample integer
+channel_layout integer (decoding/encoding,audio )
+
+Possible values:
+
+request_channel_layout integer (decoding,audio )
+
+Possible values:
+
+rc_max_vbv_use float (encoding,video )
+rc_min_vbv_use float (encoding,video )
+ticks_per_frame integer (decoding/encoding,audio,video )
+color_primaries integer (decoding/encoding,video )
+color_trc integer (decoding/encoding,video )
+colorspace integer (decoding/encoding,video )
+color_range integer (decoding/encoding,video )
+chroma_sample_location integer (decoding/encoding,video )
+log_level_offset integer
+Set the log level offset.
+
+
+slices integer (encoding,video )
+Number of slices, used in parallelized encoding.
+
+
+thread_type flags (decoding/encoding,video )
+Select which multithreading methods to use.
+
+Use of ‘frame ’ will increase decoding delay by one frame per
+thread, so clients which cannot provide future frames should not use
+it.
+
+Possible values:
+
+‘slice ’
+Decode more than one part of a single frame at once.
+
+Multithreading using slices works only when the video was encoded with
+slices.
+
+
+‘frame ’
+Decode more than one frame at once.
+
+
+
+Default value is ‘slice+frame ’.
+
+
+audio_service_type integer (encoding,audio )
+Set audio service type.
+
+Possible values:
+
+‘ma ’
+Main Audio Service
+
+‘ef ’
+Effects
+
+‘vi ’
+Visually Impaired
+
+‘hi ’
+Hearing Impaired
+
+‘di ’
+Dialogue
+
+‘co ’
+Commentary
+
+‘em ’
+Emergency
+
+‘vo ’
+Voice Over
+
+‘ka ’
+Karaoke
+
+
+
+
+request_sample_fmt sample_fmt (decoding,audio )
+Set sample format audio decoders should prefer. Default value is
+none
.
+
+
+pkt_timebase rational number
+sub_charenc encoding (decoding,subtitles )
+Set the input subtitles character encoding.
+
+
+field_order field_order (video )
+Set/override the field order of the video.
+Possible values:
+
+‘progressive ’
+Progressive video
+
+‘tt ’
+Interlaced video, top field coded and displayed first
+
+‘bb ’
+Interlaced video, bottom field coded and displayed first
+
+‘tb ’
+Interlaced video, top coded first, bottom displayed first
+
+‘bt ’
+Interlaced video, bottom coded first, top displayed first
+
+
+
+
+skip_alpha integer (decoding,video )
+Set to 1 to disable processing alpha (transparency). This works like the
+‘gray ’ flag in the flags option which skips chroma information
+instead of alpha. Default is 0.
+
+
+codec_whitelist list (input )
+"," separated List of allowed decoders. By default all are allowed.
+
+
+dump_separator string (input )
+Separator used to separate the fields printed on the command line about the
+Stream parameters.
+For example to separate the fields with newlines and indention:
+
+
ffprobe -dump_separator "
+ " -i ~/videos/matrixbench_mpeg2.mpg
+
+
+
+
+
+
+
+
10 Decoders# TOC
+
+
Decoders are configured elements in FFmpeg which allow the decoding of
+multimedia streams.
+
+
When you configure your FFmpeg build, all the supported native decoders
+are enabled by default. Decoders requiring an external library must be enabled
+manually via the corresponding --enable-lib
option. You can list all
+available decoders using the configure option --list-decoders
.
+
+
You can disable all the decoders with the configure option
+--disable-decoders
and selectively enable / disable single decoders
+with the options --enable-decoder=DECODER
/
+--disable-decoder=DECODER
.
+
+
The option -decoders
of the ff* tools will display the list of
+enabled decoders.
+
+
+
+
11 Video Decoders# TOC
+
+
A description of some of the currently available video decoders
+follows.
+
+
+
11.1 rawvideo# TOC
+
+
Raw video decoder.
+
+
This decoder decodes rawvideo streams.
+
+
+
11.1.1 Options# TOC
+
+
+top top_field_first
+Specify the assumed field type of the input video.
+
+-1
+the video is assumed to be progressive (default)
+
+0
+bottom-field-first is assumed
+
+1
+top-field-first is assumed
+
+
+
+
+
+
+
+
+
12 Audio Decoders# TOC
+
+
A description of some of the currently available audio decoders
+follows.
+
+
+
12.1 ac3# TOC
+
+
AC-3 audio decoder.
+
+
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
+the undocumented RealAudio 3 (a.k.a. dnet).
+
+
+
12.1.1 AC-3 Decoder Options# TOC
+
+
+-drc_scale value
+Dynamic Range Scale Factor. The factor to apply to dynamic range values
+from the AC-3 stream. This factor is applied exponentially.
+There are 3 notable scale factor ranges:
+
+drc_scale == 0
+DRC disabled. Produces full range audio.
+
+0 < drc_scale <= 1
+DRC enabled. Applies a fraction of the stream DRC value.
+Audio reproduction is between full range and full compression.
+
+drc_scale > 1
+DRC enabled. Applies drc_scale asymmetrically.
+Loud sounds are fully compressed. Soft sounds are enhanced.
+
+
+
+
+
+
+
+
12.2 ffwavesynth# TOC
+
+
Internal wave synthetizer.
+
+
This decoder generates wave patterns according to predefined sequences. Its
+use is purely internal and the format of the data it accepts is not publicly
+documented.
+
+
+
12.3 libcelt# TOC
+
+
libcelt decoder wrapper.
+
+
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
+Requires the presence of the libcelt headers and library during configuration.
+You need to explicitly configure the build with --enable-libcelt
.
+
+
+
12.4 libgsm# TOC
+
+
libgsm decoder wrapper.
+
+
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
+the presence of the libgsm headers and library during configuration. You need
+to explicitly configure the build with --enable-libgsm
.
+
+
This decoder supports both the ordinary GSM and the Microsoft variant.
+
+
+
12.5 libilbc# TOC
+
+
libilbc decoder wrapper.
+
+
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
+audio codec. Requires the presence of the libilbc headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libilbc
.
+
+
+
12.5.1 Options# TOC
+
+
The following option is supported by the libilbc wrapper.
+
+
+enhance
+
+Enable the enhancement of the decoded audio when set to 1. The default
+value is 0 (disabled).
+
+
+
+
+
+
12.6 libopencore-amrnb# TOC
+
+
libopencore-amrnb decoder wrapper.
+
+
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
+Narrowband audio codec. Using it requires the presence of the
+libopencore-amrnb headers and library during configuration. You need to
+explicitly configure the build with --enable-libopencore-amrnb
.
+
+
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
+without this library.
+
+
+
12.7 libopencore-amrwb# TOC
+
+
libopencore-amrwb decoder wrapper.
+
+
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
+Wideband audio codec. Using it requires the presence of the
+libopencore-amrwb headers and library during configuration. You need to
+explicitly configure the build with --enable-libopencore-amrwb
.
+
+
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
+without this library.
+
+
+
12.8 libopus# TOC
+
+
libopus decoder wrapper.
+
+
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
+Requires the presence of the libopus headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libopus
.
+
+
An FFmpeg native decoder for Opus exists, so users can decode Opus
+without this library.
+
+
+
+
13 Subtitles Decoders# TOC
+
+
+
13.1 dvdsub# TOC
+
+
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
+also be found in VobSub file pairs and in some Matroska files.
+
+
+
13.1.1 Options# TOC
+
+
+palette
+Specify the global palette used by the bitmaps. When stored in VobSub, the
+palette is normally specified in the index file; in Matroska, the palette is
+stored in the codec extra-data in the same format as in VobSub. In DVDs, the
+palette is stored in the IFO file, and therefore not available when reading
+from dumped VOB files.
+
+The format for this option is a string containing 16 24-bits hexadecimal
+numbers (without 0x prefix) separated by comas, for example 0d00ee,
+ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
+7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b
.
+
+
+ifo_palette
+Specify the IFO file from which the global palette is obtained.
+(experimental)
+
+
+forced_subs_only
+Only decode subtitle entries marked as forced. Some titles have forced
+and non-forced subtitles in the same track. Setting this flag to 1
+will only keep the forced subtitles. Default value is 0
.
+
+
+
+
+
13.2 libzvbi-teletext# TOC
+
+
Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext
+subtitles. Requires the presence of the libzvbi headers and library during
+configuration. You need to explicitly configure the build with
+--enable-libzvbi
.
+
+
+
13.2.1 Options# TOC
+
+
+txt_page
+List of teletext page numbers to decode. You may use the special * string to
+match all pages. Pages that do not match the specified list are dropped.
+Default value is *.
+
+txt_chop_top
+Discards the top teletext line. Default value is 1.
+
+txt_format
+Specifies the format of the decoded subtitles. The teletext decoder is capable
+of decoding the teletext pages to bitmaps or to simple text, you should use
+"bitmap" for teletext pages, because certain graphics and colors cannot be
+expressed in simple text. You might use "text" for teletext based subtitles if
+your application can handle simple text based subtitles. Default value is
+bitmap.
+
+txt_left
+X offset of generated bitmaps, default is 0.
+
+txt_top
+Y offset of generated bitmaps, default is 0.
+
+txt_chop_spaces
+Chops leading and trailing spaces and removes empty lines from the generated
+text. This option is useful for teletext based subtitles where empty spaces may
+be present at the start or at the end of the lines or empty lines may be
+present between the subtitle lines because of double-sized teletext charactes.
+Default value is 1.
+
+txt_duration
+Sets the display duration of the decoded teletext pages or subtitles in
+miliseconds. Default value is 30000 which is 30 seconds.
+
+txt_transparent
+Force transparent background of the generated teletext bitmaps. Default value
+is 0 which means an opaque (black) background.
+
+
+
+
+
14 Bitstream Filters# TOC
+
+
When you configure your FFmpeg build, all the supported bitstream
+filters are enabled by default. You can list all available ones using
+the configure option --list-bsfs
.
+
+
You can disable all the bitstream filters using the configure option
+--disable-bsfs
, and selectively enable any bitstream filter using
+the option --enable-bsf=BSF
, or you can disable a particular
+bitstream filter using the option --disable-bsf=BSF
.
+
+
The option -bsfs
of the ff* tools will display the list of
+all the supported bitstream filters included in your build.
+
+
The ff* tools have a -bsf option applied per stream, taking a
+comma-separated list of filters, whose parameters follow the filter
+name after a ’=’.
+
+
+
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
+
+
+
Below is a description of the currently available bitstream filters,
+with their parameters, if any.
+
+
+
14.1 aac_adtstoasc# TOC
+
+
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
+bitstream filter.
+
+
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
+ADTS header and removes the ADTS header.
+
+
This is required for example when copying an AAC stream from a raw
+ADTS AAC container to a FLV or a MOV/MP4 file.
+
+
+
14.2 chomp# TOC
+
+
Remove zero padding at the end of a packet.
+
+
+
14.3 dump_extra# TOC
+
+
Add extradata to the beginning of the filtered packets.
+
+
The additional argument specifies which packets should be filtered.
+It accepts the values:
+
+‘a ’
+add extradata to all key packets, but only if local_header is
+set in the flags2 codec context field
+
+
+‘k ’
+add extradata to all key packets
+
+
+‘e ’
+add extradata to all packets
+
+
+
+
If not specified it is assumed ‘k ’.
+
+
For example the following ffmpeg
command forces a global
+header (thus disabling individual packet headers) in the H.264 packets
+generated by the libx264
encoder, but corrects them by adding
+the header stored in extradata to the key packets:
+
+
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
+
+
+
+
14.4 h264_mp4toannexb# TOC
+
+
Convert an H.264 bitstream from length prefixed mode to start code
+prefixed mode (as defined in the Annex B of the ITU-T H.264
+specification).
+
+
This is required by some streaming formats, typically the MPEG-2
+transport stream format ("mpegts").
+
+
For example to remux an MP4 file containing an H.264 stream to mpegts
+format with ffmpeg
, you can use the command:
+
+
+
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
+
+
+
+
14.5 imxdump# TOC
+
+
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
+Pro decoder. This filter only applies to the mpeg2video codec, and is
+likely not needed for Final Cut Pro 7 and newer with the appropriate
+-tag:v .
+
+
For example, to remux 30 MB/sec NTSC IMX to MOV:
+
+
+
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
+
+
+
+
14.6 mjpeg2jpeg# TOC
+
+
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
+
+
MJPEG is a video codec wherein each video frame is essentially a
+JPEG image. The individual frames can be extracted without loss,
+e.g. by
+
+
+
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
+
+
+
Unfortunately, these chunks are incomplete JPEG images, because
+they lack the DHT segment required for decoding. Quoting from
+http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml :
+
+
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
+commented that "MJPEG, or at least the MJPEG in AVIs having the
+MJPG fourcc, is restricted JPEG with a fixed – and *omitted* –
+Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
+and it must use basic Huffman encoding, not arithmetic or
+progressive. . . . You can indeed extract the MJPEG frames and
+decode them with a regular JPEG decoder, but you have to prepend
+the DHT segment to them, or else the decoder won’t have any idea
+how to decompress the data. The exact table necessary is given in
+the OpenDML spec."
+
+
This bitstream filter patches the header of frames extracted from an MJPEG
+stream (carrying the AVI1 header ID and lacking a DHT segment) to
+produce fully qualified JPEG images.
+
+
+
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
+exiftran -i -9 frame*.jpg
+ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
+
+
+
+
14.7 mjpega_dump_header# TOC
+
+
+
14.8 movsub# TOC
+
+
+
14.9 mp3_header_decompress# TOC
+
+
+
14.10 noise# TOC
+
+
Damages the contents of packets without damaging the container. Can be
+used for fuzzing or testing error resilience/concealment.
+
+
Parameters:
+A numeral string, whose value is related to how often output bytes will
+be modified. Therefore, values below or equal to 0 are forbidden, and
+the lower the more frequent bytes will be modified, with 1 meaning
+every byte is modified.
+
+
+
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
+
+
applies the modification to every byte.
+
+
+
14.11 remove_extra# TOC
+
+
+
15 Format Options# TOC
+
+
The libavformat library provides some generic global options, which
+can be set on all the muxers and demuxers. In addition each muxer or
+demuxer may support so-called private options, which are specific for
+that component.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, or by setting the value explicitly in the
+AVFormatContext
options or using the libavutil/opt.h API
+for programmatic use.
+
+
The list of supported options follows:
+
+
+avioflags flags (input/output )
+Possible values:
+
+‘direct ’
+Reduce buffering.
+
+
+
+
+probesize integer (input )
+Set probing size in bytes, i.e. the size of the data to analyze to get
+stream information. A higher value will allow to detect more
+information in case it is dispersed into the stream, but will increase
+latency. Must be an integer not lesser than 32. It is 5000000 by default.
+
+
+packetsize integer (output )
+Set packet size.
+
+
+fflags flags (input/output )
+Set format flags.
+
+Possible values:
+
+‘ignidx ’
+Ignore index.
+
+‘genpts ’
+Generate PTS.
+
+‘nofillin ’
+Do not fill in missing values that can be exactly calculated.
+
+‘noparse ’
+Disable AVParsers, this needs +nofillin
too.
+
+‘igndts ’
+Ignore DTS.
+
+‘discardcorrupt ’
+Discard corrupted frames.
+
+‘sortdts ’
+Try to interleave output packets by DTS.
+
+‘keepside ’
+Do not merge side data.
+
+‘latm ’
+Enable RTP MP4A-LATM payload.
+
+‘nobuffer ’
+Reduce the latency introduced by optional buffering
+
+‘bitexact ’
+Only write platform-, build- and time-independent data.
+This ensures that file and data checksums are reproducible and match between
+platforms. Its primary use is for regression testing.
+
+
+
+
+seek2any integer (input )
+Allow seeking to non-keyframes on demuxer level when supported if set to 1.
+Default is 0.
+
+
+analyzeduration integer (input )
+Specify how many microseconds are analyzed to probe the input. A
+higher value will allow to detect more accurate information, but will
+increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
+
+
+cryptokey hexadecimal string (input )
+Set decryption key.
+
+
+indexmem integer (input )
+Set max memory used for timestamp index (per stream).
+
+
+rtbufsize integer (input )
+Set max memory used for buffering real-time frames.
+
+
+fdebug flags (input/output )
+Print specific debug info.
+
+Possible values:
+
+‘ts ’
+
+
+
+max_delay integer (input/output )
+Set maximum muxing or demuxing delay in microseconds.
+
+
+fpsprobesize integer (input )
+Set number of frames used to probe fps.
+
+
+audio_preload integer (output )
+Set microseconds by which audio packets should be interleaved earlier.
+
+
+chunk_duration integer (output )
+Set microseconds for each chunk.
+
+
+chunk_size integer (output )
+Set size in bytes for each chunk.
+
+
+err_detect, f_err_detect flags (input )
+Set error detection flags. f_err_detect
is deprecated and
+should be used only via the ffmpeg
tool.
+
+Possible values:
+
+‘crccheck ’
+Verify embedded CRCs.
+
+‘bitstream ’
+Detect bitstream specification deviations.
+
+‘buffer ’
+Detect improper bitstream length.
+
+‘explode ’
+Abort decoding on minor error detection.
+
+‘careful ’
+Consider things that violate the spec and have not been seen in the
+wild as errors.
+
+‘compliant ’
+Consider all spec non compliancies as errors.
+
+‘aggressive ’
+Consider things that a sane encoder should not do as an error.
+
+
+
+
+use_wallclock_as_timestamps integer (input )
+Use wallclock as timestamps.
+
+
+avoid_negative_ts integer (output )
+
+Possible values:
+
+‘make_non_negative ’
+Shift timestamps to make them non-negative.
+Also note that this affects only leading negative timestamps, and not
+non-monotonic negative timestamps.
+
+‘make_zero ’
+Shift timestamps so that the first timestamp is 0.
+
+‘auto (default) ’
+Enables shifting when required by the target format.
+
+‘disabled ’
+Disables shifting of timestamp.
+
+
+
+When shifting is enabled, all output timestamps are shifted by the
+same amount. Audio, video, and subtitles desynching and relative
+timestamp differences are preserved compared to how they would have
+been without shifting.
+
+
+skip_initial_bytes integer (input )
+Set number of bytes to skip before reading header and frames if set to 1.
+Default is 0.
+
+
+correct_ts_overflow integer (input )
+Correct single timestamp overflows if set to 1. Default is 1.
+
+
+flush_packets integer (output )
+Flush the underlying I/O stream after each packet. Default 1 enables it, and
+has the effect of reducing the latency; 0 disables it and may slightly
+increase performance in some cases.
+
+
+output_ts_offset offset (output )
+Set the output time offset.
+
+offset must be a time duration specification,
+see (ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
+
+The offset is added by the muxer to the output timestamps.
+
+Specifying a positive offset means that the corresponding streams are
+delayed bt the time duration specified in offset . Default value
+is 0
(meaning that no offset is applied).
+
+
+format_whitelist list (input )
+"," separated List of allowed demuxers. By default all are allowed.
+
+
+dump_separator string (input )
+Separator used to separate the fields printed on the command line about the
+Stream parameters.
+For example to separate the fields with newlines and indention:
+
+
ffprobe -dump_separator "
+ " -i ~/videos/matrixbench_mpeg2.mpg
+
+
+
+
+
+
+
15.1 Format stream specifiers# TOC
+
+
Format stream specifiers allow selection of one or more streams that
+match specific properties.
+
+
Possible forms of stream specifiers are:
+
+stream_index
+Matches the stream with this index.
+
+
+stream_type [:stream_index ]
+stream_type is one of following: ’v’ for video, ’a’ for audio,
+’s’ for subtitle, ’d’ for data, and ’t’ for attachments. If
+stream_index is given, then it matches the stream number
+stream_index of this type. Otherwise, it matches all streams of
+this type.
+
+
+p:program_id [:stream_index ]
+If stream_index is given, then it matches the stream with number
+stream_index in the program with the id
+program_id . Otherwise, it matches all streams in the program.
+
+
+#stream_id
+Matches the stream by a format-specific ID.
+
+
+
+
The exact semantics of stream specifiers is defined by the
+avformat_match_stream_specifier()
function declared in the
+libavformat/avformat.h header.
+
+
+
16 Demuxers# TOC
+
+
Demuxers are configured elements in FFmpeg that can read the
+multimedia streams from a particular type of file.
+
+
When you configure your FFmpeg build, all the supported demuxers
+are enabled by default. You can list all available ones using the
+configure option --list-demuxers
.
+
+
You can disable all the demuxers using the configure option
+--disable-demuxers
, and selectively enable a single demuxer with
+the option --enable-demuxer=DEMUXER
, or disable it
+with the option --disable-demuxer=DEMUXER
.
+
+
The option -formats
of the ff* tools will display the list of
+enabled demuxers.
+
+
The description of some of the currently available demuxers follows.
+
+
+
16.1 applehttp# TOC
+
+
Apple HTTP Live Streaming demuxer.
+
+
This demuxer presents all AVStreams from all variant streams.
+The id field is set to the bitrate variant index number. By setting
+the discard flags on AVStreams (by pressing ’a’ or ’v’ in ffplay),
+the caller can decide which variant streams to actually receive.
+The total bitrate of the variant that the stream belongs to is
+available in a metadata key named "variant_bitrate".
+
+
+
16.2 apng# TOC
+
+
Animated Portable Network Graphics demuxer.
+
+
This demuxer is used to demux APNG files.
+All headers, but the PNG signature, up to (but not including) the first
+fcTL chunk are transmitted as extradata.
+Frames are then split as being all the chunks between two fcTL ones, or
+between the last fcTL and IEND chunks.
+
+
+-ignore_loop bool
+Ignore the loop variable in the file if set.
+
+-max_fps int
+Maximum framerate in frames per second (0 for no limit).
+
+-default_fps int
+Default framerate in frames per second when none is specified in the file
+(0 meaning as fast as possible).
+
+
+
+
+
16.3 asf# TOC
+
+
Advanced Systems Format demuxer.
+
+
This demuxer is used to demux ASF files and MMS network streams.
+
+
+-no_resync_search bool
+Do not try to resynchronize by looking for a certain optional start code.
+
+
+
+
+
16.4 concat# TOC
+
+
Virtual concatenation script demuxer.
+
+
This demuxer reads a list of files and other directives from a text file and
+demuxes them one after the other, as if all their packet had been muxed
+together.
+
+
The timestamps in the files are adjusted so that the first file starts at 0
+and each next file starts where the previous one finishes. Note that it is
+done globally and may cause gaps if all streams do not have exactly the same
+length.
+
+
All files must have the same streams (same codecs, same time base, etc.).
+
+
The duration of each file is used to adjust the timestamps of the next file:
+if the duration is incorrect (because it was computed using the bit-rate or
+because the file is truncated, for example), it can cause artifacts. The
+duration
directive can be used to override the duration stored in
+each file.
+
+
+
16.4.1 Syntax# TOC
+
+
The script is a text file in extended-ASCII, with one directive per line.
+Empty lines, leading spaces and lines starting with ’#’ are ignored. The
+following directive is recognized:
+
+
+file path
+Path to a file to read; special characters and spaces must be escaped with
+backslash or single quotes.
+
+All subsequent file-related directives apply to that file.
+
+
+ffconcat version 1.0
+Identify the script type and version. It also sets the safe option
+to 1 if it was to its default -1.
+
+To make FFmpeg recognize the format automatically, this directive must
+appears exactly as is (no extra space or byte-order-mark) on the very first
+line of the script.
+
+
+duration dur
+Duration of the file. This information can be specified from the file;
+specifying it here may be more efficient or help if the information from the
+file is not available or accurate.
+
+If the duration is set for all files, then it is possible to seek in the
+whole concatenated video.
+
+
+stream
+Introduce a stream in the virtual file.
+All subsequent stream-related directives apply to the last introduced
+stream.
+Some streams properties must be set in order to allow identifying the
+matching streams in the subfiles.
+If no streams are defined in the script, the streams from the first file are
+copied.
+
+
+exact_stream_id id
+Set the id of the stream.
+If this directive is given, the string with the corresponding id in the
+subfiles will be used.
+This is especially useful for MPEG-PS (VOB) files, where the order of the
+streams is not reliable.
+
+
+
+
+
+
16.4.2 Options# TOC
+
+
This demuxer accepts the following option:
+
+
+safe
+If set to 1, reject unsafe file paths. A file path is considered safe if it
+does not contain a protocol specification and is relative and all components
+only contain characters from the portable character set (letters, digits,
+period, underscore and hyphen) and have no period at the beginning of a
+component.
+
+If set to 0, any file name is accepted.
+
+The default is -1, it is equivalent to 1 if the format was automatically
+probed and 0 otherwise.
+
+
+auto_convert
+If set to 1, try to perform automatic conversions on packet data to make the
+streams concatenable.
+
+Currently, the only conversion is adding the h264_mp4toannexb bitstream
+filter to H.264 streams in MP4 format. This is necessary in particular if
+there are resolution changes.
+
+
+
+
+
+
16.5 flv# TOC
+
+
Adobe Flash Video Format demuxer.
+
+
This demuxer is used to demux FLV files and RTMP network streams.
+
+
+-flv_metadata bool
+Allocate the streams according to the onMetaData array content.
+
+
+
+
+
16.6 libgme# TOC
+
+
The Game Music Emu library is a collection of video game music file emulators.
+
+
See http://code.google.com/p/game-music-emu/ for more information.
+
+
Some files have multiple tracks. The demuxer will pick the first track by
+default. The track_index option can be used to select a different
+track. Track indexes start at 0. The demuxer exports the number of tracks as
+tracks meta data entry.
+
+
For very large files, the max_size option may have to be adjusted.
+
+
+
16.7 libquvi# TOC
+
+
Play media from Internet services using the quvi project.
+
+
The demuxer accepts a format option to request a specific quality. It
+is by default set to best .
+
+
See http://quvi.sourceforge.net/ for more information.
+
+
FFmpeg needs to be built with --enable-libquvi
for this demuxer to be
+enabled.
+
+
+
16.8 gif# TOC
+
+
Animated GIF demuxer.
+
+
It accepts the following options:
+
+
+min_delay
+Set the minimum valid delay between frames in hundredths of seconds.
+Range is 0 to 6000. Default value is 2.
+
+
+default_delay
+Set the default delay between frames in hundredths of seconds.
+Range is 0 to 6000. Default value is 10.
+
+
+ignore_loop
+GIF files can contain information to loop a certain number of times (or
+infinitely). If ignore_loop is set to 1, then the loop setting
+from the input will be ignored and looping will not occur. If set to 0,
+then looping will occur and will cycle the number of times according to
+the GIF. Default value is 1.
+
+
+
+
For example, with the overlay filter, place an infinitely looping GIF
+over another video:
+
+
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
+
+
+
Note that in the above example the shortest option for overlay filter is
+used to end the output video at the length of the shortest input file,
+which in this case is input.mp4 as the GIF in this example loops
+infinitely.
+
+
+
16.9 image2# TOC
+
+
Image file demuxer.
+
+
This demuxer reads from a list of image files specified by a pattern.
+The syntax and meaning of the pattern is specified by the
+option pattern_type .
+
+
The pattern may contain a suffix which is used to automatically
+determine the format of the images contained in the files.
+
+
The size, the pixel format, and the format of each image must be the
+same for all the files in the sequence.
+
+
This demuxer accepts the following options:
+
+framerate
+Set the frame rate for the video stream. It defaults to 25.
+
+loop
+If set to 1, loop over the input. Default value is 0.
+
+pattern_type
+Select the pattern type used to interpret the provided filename.
+
+pattern_type accepts one of the following values.
+
+sequence
+Select a sequence pattern type, used to specify a sequence of files
+indexed by sequential numbers.
+
+A sequence pattern may contain the string "%d" or "%0N d", which
+specifies the position of the characters representing a sequential
+number in each filename matched by the pattern. If the form
+"%d0N d" is used, the string representing the number in each
+filename is 0-padded and N is the total number of 0-padded
+digits representing the number. The literal character ’%’ can be
+specified in the pattern with the string "%%".
+
+If the sequence pattern contains "%d" or "%0N d", the first filename of
+the file list specified by the pattern must contain a number
+inclusively contained between start_number and
+start_number +start_number_range -1, and all the following
+numbers must be sequential.
+
+For example the pattern "img-%03d.bmp" will match a sequence of
+filenames of the form img-001.bmp , img-002.bmp , ...,
+img-010.bmp , etc.; the pattern "i%%m%%g-%d.jpg" will match a
+sequence of filenames of the form i%m%g-1.jpg ,
+i%m%g-2.jpg , ..., i%m%g-10.jpg , etc.
+
+Note that the pattern must not necessarily contain "%d" or
+"%0N d", for example to convert a single image file
+img.jpeg you can employ the command:
+
+
ffmpeg -i img.jpeg img.png
+
+
+
+glob
+Select a glob wildcard pattern type.
+
+The pattern is interpreted like a glob()
pattern. This is only
+selectable if libavformat was compiled with globbing support.
+
+
+glob_sequence (deprecated, will be removed)
+Select a mixed glob wildcard/sequence pattern.
+
+If your version of libavformat was compiled with globbing support, and
+the provided pattern contains at least one glob meta character among
+%*?[]{}
that is preceded by an unescaped "%", the pattern is
+interpreted like a glob()
pattern, otherwise it is interpreted
+like a sequence pattern.
+
+All glob special characters %*?[]{}
must be prefixed
+with "%". To escape a literal "%" you shall use "%%".
+
+For example the pattern foo-%*.jpeg
will match all the
+filenames prefixed by "foo-" and terminating with ".jpeg", and
+foo-%?%?%?.jpeg
will match all the filenames prefixed with
+"foo-", followed by a sequence of three characters, and terminating
+with ".jpeg".
+
+This pattern type is deprecated in favor of glob and
+sequence .
+
+
+
+Default value is glob_sequence .
+
+pixel_format
+Set the pixel format of the images to read. If not specified the pixel
+format is guessed from the first image file in the sequence.
+
+start_number
+Set the index of the file matched by the image file pattern to start
+to read from. Default value is 0.
+
+start_number_range
+Set the index interval range to check when looking for the first image
+file in the sequence, starting from start_number . Default value
+is 5.
+
+ts_from_file
+If set to 1, will set frame timestamp to modification time of image file. Note
+that monotonity of timestamps is not provided: images go in the same order as
+without this option. Default value is 0.
+If set to 2, will set frame timestamp to the modification time of the image file in
+nanosecond precision.
+
+video_size
+Set the video size of the images to read. If not specified the video
+size is guessed from the first image file in the sequence.
+
+
+
+
+
16.9.1 Examples# TOC
+
+
+ Use ffmpeg
for creating a video from the images in the file
+sequence img-001.jpeg , img-002.jpeg , ..., assuming an
+input frame rate of 10 frames per second:
+
+
ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
+
+
+ As above, but start by reading from a file with index 100 in the sequence:
+
+
ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
+
+
+ Read images matching the "*.png" glob pattern , that is all the files
+terminating with the ".png" suffix:
+
+
ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
+
+
+
+
+
16.10 mpegts# TOC
+
+
MPEG-2 transport stream demuxer.
+
+
+fix_teletext_pts
+Overrides teletext packet PTS and DTS values with the timestamps calculated
+from the PCR of the first program which the teletext stream is part of and is
+not discarded. Default value is 1, set this option to 0 if you want your
+teletext packet PTS and DTS values untouched.
+
+
+
+
+
16.11 rawvideo# TOC
+
+
Raw video demuxer.
+
+
This demuxer allows one to read raw video data. Since there is no header
+specifying the assumed video parameters, the user must specify them
+in order to be able to decode the data correctly.
+
+
This demuxer accepts the following options:
+
+framerate
+Set input video frame rate. Default value is 25.
+
+
+pixel_format
+Set the input video pixel format. Default value is yuv420p
.
+
+
+video_size
+Set the input video size. This value must be specified explicitly.
+
+
+
+
For example to read a rawvideo file input.raw with
+ffplay
, assuming a pixel format of rgb24
, a video
+size of 320x240
, and a frame rate of 10 images per second, use
+the command:
+
+
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
+
+
+
+
16.12 sbg# TOC
+
+
SBaGen script demuxer.
+
+
This demuxer reads the script language used by SBaGen
+http://uazu.net/sbagen/ to generate binaural beats sessions. A SBG
+script looks like that:
+
+
-SE
+a: 300-2.5/3 440+4.5/0
+b: 300-2.5/0 440+4.5/3
+off: -
+NOW == a
++0:07:00 == b
++0:14:00 == a
++0:21:00 == b
++0:30:00 off
+
+
+
A SBG script can mix absolute and relative timestamps. If the script uses
+either only absolute timestamps (including the script start time) or only
+relative ones, then its layout is fixed, and the conversion is
+straightforward. On the other hand, if the script mixes both kind of
+timestamps, then the NOW reference for relative timestamps will be
+taken from the current time of day at the time the script is read, and the
+script layout will be frozen according to that reference. That means that if
+the script is directly played, the actual times will match the absolute
+timestamps up to the sound controller’s clock accuracy, but if the user
+somehow pauses the playback or seeks, all times will be shifted accordingly.
+
+
+
16.13 tedcaptions# TOC
+
+
JSON captions used for TED Talks .
+
+
TED does not provide links to the captions, but they can be guessed from the
+page. The file tools/bookmarklets.html from the FFmpeg source tree
+contains a bookmarklet to expose them.
+
+
This demuxer accepts the following option:
+
+start_time
+Set the start time of the TED talk, in milliseconds. The default is 15000
+(15s). It is used to sync the captions with the downloadable videos, because
+they include a 15s intro.
+
+
+
+
Example: convert the captions to a format most players understand:
+
+
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
+
+
+
+
17 Metadata# TOC
+
+
FFmpeg is able to dump metadata from media files into a simple UTF-8-encoded
+INI-like text file and then load it back using the metadata muxer/demuxer.
+
+
The file format is as follows:
+
+ A file consists of a header and a number of metadata tags divided into sections,
+each on its own line.
+
+ The header is a ’;FFMETADATA’ string, followed by a version number (now 1).
+
+ Metadata tags are of the form ’key=value’
+
+ Immediately after header follows global metadata
+
+ After global metadata there may be sections with per-stream/per-chapter
+metadata.
+
+ A section starts with the section name in uppercase (i.e. STREAM or CHAPTER) in
+brackets (’[’, ’]’) and ends with next section or end of file.
+
+ At the beginning of a chapter section there may be an optional timebase to be
+used for start/end values. It must be in form ’TIMEBASE=num/den’, where num and
+den are integers. If the timebase is missing then start/end times are assumed to
+be in milliseconds.
+Next a chapter section must contain chapter start and end times in form
+’START=num’, ’END=num’, where num is a positive integer.
+
+ Empty lines and lines starting with ’;’ or ’#’ are ignored.
+
+ Metadata keys or values containing special characters (’=’, ’;’, ’#’, ’\’ and a
+newline) must be escaped with a backslash ’\’.
+
+ Note that whitespace in metadata (e.g. foo = bar) is considered to be a part of
+the tag (in the example above key is ’foo ’, value is ’ bar’).
+
+
+
A ffmetadata file might look like this:
+
+
;FFMETADATA1
+title=bike\\shed
+;this is a comment
+artist=FFmpeg troll team
+
+[CHAPTER]
+TIMEBASE=1/1000
+START=0
+#chapter ends at 0:01:00
+END=60000
+title=chapter \#1
+[STREAM]
+title=multi\
+line
+
+
+
By using the ffmetadata muxer and demuxer it is possible to extract
+metadata from an input file to an ffmetadata file, and then transcode
+the file into an output file with the edited ffmetadata file.
+
+
Extracting an ffmetadata file with ffmpeg goes as follows:
+
+
ffmpeg -i INPUT -f ffmetadata FFMETADATAFILE
+
+
+
Reinserting edited metadata information from the FFMETADATAFILE file can
+be done as:
+
+
ffmpeg -i INPUT -i FFMETADATAFILE -map_metadata 1 -codec copy OUTPUT
+
+
+
+
18 Protocols# TOC
+
+
Protocols are configured elements in FFmpeg that enable access to
+resources that require specific protocols.
+
+
When you configure your FFmpeg build, all the supported protocols are
+enabled by default. You can list all available ones using the
+configure option "–list-protocols".
+
+
You can disable all the protocols using the configure option
+"–disable-protocols", and selectively enable a protocol using the
+option "–enable-protocol=PROTOCOL ", or you can disable a
+particular protocol using the option
+"–disable-protocol=PROTOCOL ".
+
+
The option "-protocols" of the ff* tools will display the list of
+supported protocols.
+
+
A description of the currently available protocols follows.
+
+
+
18.1 bluray# TOC
+
+
Read BluRay playlist.
+
+
The accepted options are:
+
+angle
+BluRay angle
+
+
+chapter
+Start chapter (1...N)
+
+
+playlist
+Playlist to read (BDMV/PLAYLIST/?????.mpls)
+
+
+
+
+
Examples:
+
+
Read longest playlist from BluRay mounted to /mnt/bluray:
+
+
+
Read angle 2 of playlist 4 from BluRay mounted to /mnt/bluray, start from chapter 2:
+
+
-playlist 4 -angle 2 -chapter 2 bluray:/mnt/bluray
+
+
+
+
18.2 cache# TOC
+
+
Caching wrapper for input stream.
+
+
Cache the input stream to temporary file. It brings seeking capability to live streams.
+
+
+
+
+
18.3 concat# TOC
+
+
Physical concatenation protocol.
+
+
Allow to read and seek from many resource in sequence as if they were
+a unique resource.
+
+
A URL accepted by this protocol has the syntax:
+
+
concat:URL1 |URL2 |...|URLN
+
+
+
where URL1 , URL2 , ..., URLN are the urls of the
+resource to be concatenated, each one possibly specifying a distinct
+protocol.
+
+
For example to read a sequence of files split1.mpeg ,
+split2.mpeg , split3.mpeg with ffplay
use the
+command:
+
+
ffplay concat:split1.mpeg\|split2.mpeg\|split3.mpeg
+
+
+
Note that you may need to escape the character "|" which is special for
+many shells.
+
+
+
18.4 crypto# TOC
+
+
AES-encrypted stream reading protocol.
+
+
The accepted options are:
+
+key
+Set the AES decryption key binary block from given hexadecimal representation.
+
+
+iv
+Set the AES decryption initialization vector binary block from given hexadecimal representation.
+
+
+
+
Accepted URL formats:
+
+
crypto:URL
+crypto+URL
+
+
+
+
18.5 data# TOC
+
+
Data in-line in the URI. See http://en.wikipedia.org/wiki/Data_URI_scheme .
+
+
For example, to convert a GIF file given inline with ffmpeg
:
+
+
ffmpeg -i "data:image/gif;base64,R0lGODdhCAAIAMIEAAAAAAAA//8AAP//AP///////////////ywAAAAACAAIAAADF0gEDLojDgdGiJdJqUX02iB4E8Q9jUMkADs=" smiley.png
+
+
+
+
18.6 file# TOC
+
+
File access protocol.
+
+
Allow to read from or write to a file.
+
+
A file URL can have the form:
+
+
+
where filename is the path of the file to read.
+
+
An URL that does not have a protocol prefix will be assumed to be a
+file URL. Depending on the build, an URL that looks like a Windows
+path with the drive letter at the beginning will also be assumed to be
+a file URL (usually not the case in builds for unix-like systems).
+
+
For example to read from a file input.mpeg with ffmpeg
+use the command:
+
+
ffmpeg -i file:input.mpeg output.mpeg
+
+
+
This protocol accepts the following options:
+
+
+truncate
+Truncate existing files on write, if set to 1. A value of 0 prevents
+truncating. Default value is 1.
+
+
+blocksize
+Set I/O operation maximum block size, in bytes. Default value is
+INT_MAX
, which results in not limiting the requested block size.
+Setting this value reasonably low improves user termination request reaction
+time, which is valuable for files on slow medium.
+
+
+
+
+
18.7 ftp# TOC
+
+
FTP (File Transfer Protocol).
+
+
Allow to read from or write to remote resources using FTP protocol.
+
+
Following syntax is required.
+
+
ftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+
+
This protocol accepts the following options.
+
+
+timeout
+Set timeout in microseconds of socket I/O operations used by the underlying low level
+operation. By default it is set to -1, which means that the timeout is
+not specified.
+
+
+ftp-anonymous-password
+Password used when login as anonymous user. Typically an e-mail address
+should be used.
+
+
+ftp-write-seekable
+Control seekability of connection during encoding. If set to 1 the
+resource is supposed to be seekable, if set to 0 it is assumed not
+to be seekable. Default value is 0.
+
+
+
+
NOTE: Protocol can be used as output, but it is recommended to not do
+it, unless special care is taken (tests, customized server configuration
+etc.). Different FTP servers behave in different way during seek
+operation. ff* tools may produce incomplete content due to server limitations.
+
+
+
18.8 gopher# TOC
+
+
Gopher protocol.
+
+
+
18.9 hls# TOC
+
+
Read Apple HTTP Live Streaming compliant segmented stream as
+a uniform one. The M3U8 playlists describing the segments can be
+remote HTTP resources or local files, accessed using the standard
+file protocol.
+The nested protocol is declared by specifying
+"+proto " after the hls URI scheme name, where proto
+is either "file" or "http".
+
+
+
hls+http://host/path/to/remote/resource.m3u8
+hls+file://path/to/local/resource.m3u8
+
+
+
Using this protocol is discouraged - the hls demuxer should work
+just as well (if not, please report the issues) and is more complete.
+To use the hls demuxer instead, simply use the direct URLs to the
+m3u8 files.
+
+
+
18.10 http# TOC
+
+
HTTP (Hyper Text Transfer Protocol).
+
+
This protocol accepts the following options:
+
+
+seekable
+Control seekability of connection. If set to 1 the resource is
+supposed to be seekable, if set to 0 it is assumed not to be seekable,
+if set to -1 it will try to autodetect if it is seekable. Default
+value is -1.
+
+
+chunked_post
+If set to 1 use chunked Transfer-Encoding for posts, default is 1.
+
+
+content_type
+Set a specific content type for the POST messages.
+
+
+headers
+Set custom HTTP headers, can override built in default headers. The
+value must be a string encoding the headers.
+
+
+multiple_requests
+Use persistent connections if set to 1, default is 0.
+
+
+post_data
+Set custom HTTP post data.
+
+
+user-agent
+user_agent
+Override the User-Agent header. If not specified the protocol will use a
+string describing the libavformat build. ("Lavf/<version>")
+
+
+timeout
+Set timeout in microseconds of socket I/O operations used by the underlying low level
+operation. By default it is set to -1, which means that the timeout is
+not specified.
+
+
+mime_type
+Export the MIME type.
+
+
+icy
+If set to 1 request ICY (SHOUTcast) metadata from the server. If the server
+supports this, the metadata has to be retrieved by the application by reading
+the icy_metadata_headers and icy_metadata_packet options.
+The default is 1.
+
+
+icy_metadata_headers
+If the server supports ICY metadata, this contains the ICY-specific HTTP reply
+headers, separated by newline characters.
+
+
+icy_metadata_packet
+If the server supports ICY metadata, and icy was set to 1, this
+contains the last non-empty metadata packet sent by the server. It should be
+polled in regular intervals by applications interested in mid-stream metadata
+updates.
+
+
+cookies
+Set the cookies to be sent in future requests. The format of each cookie is the
+same as the value of a Set-Cookie HTTP response field. Multiple cookies can be
+delimited by a newline character.
+
+
+offset
+Set initial byte offset.
+
+
+end_offset
+Try to limit the request to bytes preceding this offset.
+
+
+
+
+
18.10.1 HTTP Cookies# TOC
+
+
Some HTTP requests will be denied unless cookie values are passed in with the
+request. The cookies option allows these cookies to be specified. At
+the very least, each cookie must specify a value along with a path and domain.
+HTTP requests that match both the domain and path will automatically include the
+cookie value in the HTTP Cookie header field. Multiple cookies can be delimited
+by a newline.
+
+
The required syntax to play a stream specifying a cookie is:
+
+
ffplay -cookies "nlqptid=nltid=tsn; path=/; domain=somedomain.com;" http://somedomain.com/somestream.m3u8
+
+
+
+
18.11 Icecast# TOC
+
+
Icecast protocol (stream to Icecast servers)
+
+
This protocol accepts the following options:
+
+
+ice_genre
+Set the stream genre.
+
+
+ice_name
+Set the stream name.
+
+
+ice_description
+Set the stream description.
+
+
+ice_url
+Set the stream website URL.
+
+
+ice_public
+Set if the stream should be public.
+The default is 0 (not public).
+
+
+user_agent
+Override the User-Agent header. If not specified a string of the form
+"Lavf/<version>" will be used.
+
+
+password
+Set the Icecast mountpoint password.
+
+
+content_type
+Set the stream content type. This must be set if it is different from
+audio/mpeg.
+
+
+legacy_icecast
+This enables support for Icecast versions < 2.4.0, that do not support the
+HTTP PUT method but the SOURCE method.
+
+
+
+
+
+
icecast://[username [:password ]@]server :port /mountpoint
+
+
+
+
18.12 mmst# TOC
+
+
MMS (Microsoft Media Server) protocol over TCP.
+
+
+
18.13 mmsh# TOC
+
+
MMS (Microsoft Media Server) protocol over HTTP.
+
+
The required syntax is:
+
+
mmsh://server [:port ][/app ][/playpath ]
+
+
+
+
18.14 md5# TOC
+
+
MD5 output protocol.
+
+
Computes the MD5 hash of the data to be written, and on close writes
+this to the designated output or stdout if none is specified. It can
+be used to test muxers without writing an actual file.
+
+
Some examples follow.
+
+
# Write the MD5 hash of the encoded AVI file to the file output.avi.md5.
+ffmpeg -i input.flv -f avi -y md5:output.avi.md5
+
+# Write the MD5 hash of the encoded AVI file to stdout.
+ffmpeg -i input.flv -f avi -y md5:
+
+
+
Note that some formats (typically MOV) require the output protocol to
+be seekable, so they will fail with the MD5 output protocol.
+
+
+
18.15 pipe# TOC
+
+
UNIX pipe access protocol.
+
+
Allow to read and write from UNIX pipes.
+
+
The accepted syntax is:
+
+
+
number is the number corresponding to the file descriptor of the
+pipe (e.g. 0 for stdin, 1 for stdout, 2 for stderr). If number
+is not specified, by default the stdout file descriptor will be used
+for writing, stdin for reading.
+
+
For example to read from stdin with ffmpeg
:
+
+
cat test.wav | ffmpeg -i pipe:0
+# ...this is the same as...
+cat test.wav | ffmpeg -i pipe:
+
+
+
For writing to stdout with ffmpeg
:
+
+
ffmpeg -i test.wav -f avi pipe:1 | cat > test.avi
+# ...this is the same as...
+ffmpeg -i test.wav -f avi pipe: | cat > test.avi
+
+
+
This protocol accepts the following options:
+
+
+blocksize
+Set I/O operation maximum block size, in bytes. Default value is
+INT_MAX
, which results in not limiting the requested block size.
+Setting this value reasonably low improves user termination request reaction
+time, which is valuable if data transmission is slow.
+
+
+
+
Note that some formats (typically MOV), require the output protocol to
+be seekable, so they will fail with the pipe output protocol.
+
+
+
18.16 rtmp# TOC
+
+
Real-Time Messaging Protocol.
+
+
The Real-Time Messaging Protocol (RTMP) is used for streaming multimedia
+content across a TCP/IP network.
+
+
The required syntax is:
+
+
rtmp://[username :password @]server [:port ][/app ][/instance ][/playpath ]
+
+
+
The accepted parameters are:
+
+username
+An optional username (mostly for publishing).
+
+
+password
+An optional password (mostly for publishing).
+
+
+server
+The address of the RTMP server.
+
+
+port
+The number of the TCP port to use (by default is 1935).
+
+
+app
+It is the name of the application to access. It usually corresponds to
+the path where the application is installed on the RTMP server
+(e.g. /ondemand/ , /flash/live/ , etc.). You can override
+the value parsed from the URI through the rtmp_app
option, too.
+
+
+playpath
+It is the path or name of the resource to play with reference to the
+application specified in app , may be prefixed by "mp4:". You
+can override the value parsed from the URI through the rtmp_playpath
+option, too.
+
+
+listen
+Act as a server, listening for an incoming connection.
+
+
+timeout
+Maximum time to wait for the incoming connection. Implies listen.
+
+
+
+
Additionally, the following parameters can be set via command line options
+(or in code via AVOption
s):
+
+rtmp_app
+Name of application to connect on the RTMP server. This option
+overrides the parameter specified in the URI.
+
+
+rtmp_buffer
+Set the client buffer time in milliseconds. The default is 3000.
+
+
+rtmp_conn
+Extra arbitrary AMF connection parameters, parsed from a string,
+e.g. like B:1 S:authMe O:1 NN:code:1.23 NS:flag:ok O:0
.
+Each value is prefixed by a single character denoting the type,
+B for Boolean, N for number, S for string, O for object, or Z for null,
+followed by a colon. For Booleans the data must be either 0 or 1 for
+FALSE or TRUE, respectively. Likewise for Objects the data must be 0 or
+1 to end or begin an object, respectively. Data items in subobjects may
+be named, by prefixing the type with ’N’ and specifying the name before
+the value (i.e. NB:myFlag:1
). This option may be used multiple
+times to construct arbitrary AMF sequences.
+
+
+rtmp_flashver
+Version of the Flash plugin used to run the SWF player. The default
+is LNX 9,0,124,2. (When publishing, the default is FMLE/3.0 (compatible;
+<libavformat version>).)
+
+
+rtmp_flush_interval
+Number of packets flushed in the same request (RTMPT only). The default
+is 10.
+
+
+rtmp_live
+Specify that the media is a live stream. No resuming or seeking in
+live streams is possible. The default value is any
, which means the
+subscriber first tries to play the live stream specified in the
+playpath. If a live stream of that name is not found, it plays the
+recorded stream. The other possible values are live
and
+recorded
.
+
+
+rtmp_pageurl
+URL of the web page in which the media was embedded. By default no
+value will be sent.
+
+
+rtmp_playpath
+Stream identifier to play or to publish. This option overrides the
+parameter specified in the URI.
+
+
+rtmp_subscribe
+Name of live stream to subscribe to. By default no value will be sent.
+It is only sent if the option is specified or if rtmp_live
+is set to live.
+
+
+rtmp_swfhash
+SHA256 hash of the decompressed SWF file (32 bytes).
+
+
+rtmp_swfsize
+Size of the decompressed SWF file, required for SWFVerification.
+
+
+rtmp_swfurl
+URL of the SWF player for the media. By default no value will be sent.
+
+
+rtmp_swfverify
+URL to player swf file, compute hash/size automatically.
+
+
+rtmp_tcurl
+URL of the target stream. Defaults to proto://host[:port]/app.
+
+
+
+
+
For example to read with ffplay
a multimedia resource named
+"sample" from the application "vod" from an RTMP server "myserver":
+
+
ffplay rtmp://myserver/vod/sample
+
+
+
To publish to a password protected server, passing the playpath and
+app names separately:
+
+
ffmpeg -re -i <input> -f flv -rtmp_playpath some/long/path -rtmp_app long/app/name rtmp://username:password@myserver/
+
+
+
+
18.17 rtmpe# TOC
+
+
Encrypted Real-Time Messaging Protocol.
+
+
The Encrypted Real-Time Messaging Protocol (RTMPE) is used for
+streaming multimedia content within standard cryptographic primitives,
+consisting of Diffie-Hellman key exchange and HMACSHA256, generating
+a pair of RC4 keys.
+
+
+
18.18 rtmps# TOC
+
+
Real-Time Messaging Protocol over a secure SSL connection.
+
+
The Real-Time Messaging Protocol (RTMPS) is used for streaming
+multimedia content across an encrypted connection.
+
+
+
18.19 rtmpt# TOC
+
+
Real-Time Messaging Protocol tunneled through HTTP.
+
+
The Real-Time Messaging Protocol tunneled through HTTP (RTMPT) is used
+for streaming multimedia content within HTTP requests to traverse
+firewalls.
+
+
+
18.20 rtmpte# TOC
+
+
Encrypted Real-Time Messaging Protocol tunneled through HTTP.
+
+
The Encrypted Real-Time Messaging Protocol tunneled through HTTP (RTMPTE)
+is used for streaming multimedia content within HTTP requests to traverse
+firewalls.
+
+
+
18.21 rtmpts# TOC
+
+
Real-Time Messaging Protocol tunneled through HTTPS.
+
+
The Real-Time Messaging Protocol tunneled through HTTPS (RTMPTS) is used
+for streaming multimedia content within HTTPS requests to traverse
+firewalls.
+
+
+
18.22 libsmbclient# TOC
+
+
libsmbclient permits one to manipulate CIFS/SMB network resources.
+
+
Following syntax is required.
+
+
+
smb://[[domain:]user[:password@]]server[/share[/path[/file]]]
+
+
+
This protocol accepts the following options.
+
+
+timeout
+Set timeout in miliseconds of socket I/O operations used by the underlying
+low level operation. By default it is set to -1, which means that the timeout
+is not specified.
+
+
+truncate
+Truncate existing files on write, if set to 1. A value of 0 prevents
+truncating. Default value is 1.
+
+
+workgroup
+Set the workgroup used for making connections. By default workgroup is not specified.
+
+
+
+
+
For more information see: http://www.samba.org/ .
+
+
+
18.23 libssh# TOC
+
+
Secure File Transfer Protocol via libssh
+
+
Allow to read from or write to remote resources using SFTP protocol.
+
+
Following syntax is required.
+
+
+
sftp://[user[:password]@]server[:port]/path/to/remote/resource.mpeg
+
+
+
This protocol accepts the following options.
+
+
+timeout
+Set timeout of socket I/O operations used by the underlying low level
+operation. By default it is set to -1, which means that the timeout
+is not specified.
+
+
+truncate
+Truncate existing files on write, if set to 1. A value of 0 prevents
+truncating. Default value is 1.
+
+
+private_key
+Specify the path of the file containing private key to use during authorization.
+By default libssh searches for keys in the ~/.ssh/ directory.
+
+
+
+
+
Example: Play a file stored on remote server.
+
+
+
ffplay sftp://user:password@server_address:22/home/user/resource.mpeg
+
+
+
+
18.24 librtmp rtmp, rtmpe, rtmps, rtmpt, rtmpte# TOC
+
+
Real-Time Messaging Protocol and its variants supported through
+librtmp.
+
+
Requires the presence of the librtmp headers and library during
+configuration. You need to explicitly configure the build with
+"–enable-librtmp". If enabled this will replace the native RTMP
+protocol.
+
+
This protocol provides most client functions and a few server
+functions needed to support RTMP, RTMP tunneled in HTTP (RTMPT),
+encrypted RTMP (RTMPE), RTMP over SSL/TLS (RTMPS) and tunneled
+variants of these encrypted types (RTMPTE, RTMPTS).
+
+
The required syntax is:
+
+
rtmp_proto ://server [:port ][/app ][/playpath ] options
+
+
+
where rtmp_proto is one of the strings "rtmp", "rtmpt", "rtmpe",
+"rtmps", "rtmpte", "rtmpts" corresponding to each RTMP variant, and
+server , port , app and playpath have the same
+meaning as specified for the RTMP native protocol.
+options contains a list of space-separated options of the form
+key =val .
+
+
See the librtmp manual page (man 3 librtmp) for more information.
+
+
For example, to stream a file in real-time to an RTMP server using
+ffmpeg
:
+
+
ffmpeg -re -i myfile -f flv rtmp://myserver/live/mystream
+
+
+
To play the same stream using ffplay
:
+
+
ffplay "rtmp://myserver/live/mystream live=1"
+
+
+
+
18.25 rtp# TOC
+
+
Real-time Transport Protocol.
+
+
The required syntax for an RTP URL is:
+rtp://hostname [:port ][?option =val ...]
+
+
port specifies the RTP port to use.
+
+
The following URL options are supported:
+
+
+ttl=n
+Set the TTL (Time-To-Live) value (for multicast only).
+
+
+rtcpport=n
+Set the remote RTCP port to n .
+
+
+localrtpport=n
+Set the local RTP port to n .
+
+
+localrtcpport=n '
+Set the local RTCP port to n .
+
+
+pkt_size=n
+Set max packet size (in bytes) to n .
+
+
+connect=0|1
+Do a connect()
on the UDP socket (if set to 1) or not (if set
+to 0).
+
+
+sources=ip [,ip ]
+List allowed source IP addresses.
+
+
+block=ip [,ip ]
+List disallowed (blocked) source IP addresses.
+
+
+write_to_source=0|1
+Send packets to the source address of the latest received packet (if
+set to 1) or to a default remote address (if set to 0).
+
+
+localport=n
+Set the local RTP port to n .
+
+This is a deprecated option. Instead, localrtpport should be
+used.
+
+
+
+
+
Important notes:
+
+
+ If rtcpport is not set the RTCP port will be set to the RTP
+port value plus 1.
+
+ If localrtpport (the local RTP port) is not set any available
+port will be used for the local RTP and RTCP ports.
+
+ If localrtcpport (the local RTCP port) is not set it will be
+set to the local RTP port value plus 1.
+
+
+
+
18.26 rtsp# TOC
+
+
Real-Time Streaming Protocol.
+
+
RTSP is not technically a protocol handler in libavformat, it is a demuxer
+and muxer. The demuxer supports both normal RTSP (with data transferred
+over RTP; this is used by e.g. Apple and Microsoft) and Real-RTSP (with
+data transferred over RDT).
+
+
The muxer can be used to send a stream using RTSP ANNOUNCE to a server
+supporting it (currently Darwin Streaming Server and Mischa Spiegelmock’s
+RTSP server ).
+
+
The required syntax for a RTSP url is:
+
+
rtsp://hostname [:port ]/path
+
+
+
Options can be set on the ffmpeg
/ffplay
command
+line, or set in code via AVOption
s or in
+avformat_open_input
.
+
+
The following options are supported.
+
+
+initial_pause
+Do not start playing the stream immediately if set to 1. Default value
+is 0.
+
+
+rtsp_transport
+Set RTSP transport protocols.
+
+It accepts the following values:
+
+‘udp ’
+Use UDP as lower transport protocol.
+
+
+‘tcp ’
+Use TCP (interleaving within the RTSP control channel) as lower
+transport protocol.
+
+
+‘udp_multicast ’
+Use UDP multicast as lower transport protocol.
+
+
+‘http ’
+Use HTTP tunneling as lower transport protocol, which is useful for
+passing proxies.
+
+
+
+Multiple lower transport protocols may be specified, in that case they are
+tried one at a time (if the setup of one fails, the next one is tried).
+For the muxer, only the ‘tcp ’ and ‘udp ’ options are supported.
+
+
+rtsp_flags
+Set RTSP flags.
+
+The following values are accepted:
+
+‘filter_src ’
+Accept packets only from negotiated peer address and port.
+
+‘listen ’
+Act as a server, listening for an incoming connection.
+
+‘prefer_tcp ’
+Try TCP for RTP transport first, if TCP is available as RTSP RTP transport.
+
+
+
+Default value is ‘none ’.
+
+
+allowed_media_types
+Set media types to accept from the server.
+
+The following flags are accepted:
+
+‘video ’
+‘audio ’
+‘data ’
+
+
+By default it accepts all media types.
+
+
+min_port
+Set minimum local UDP port. Default value is 5000.
+
+
+max_port
+Set maximum local UDP port. Default value is 65000.
+
+
+timeout
+Set maximum timeout (in seconds) to wait for incoming connections.
+
+A value of -1 means infinite (default). This option implies the
+rtsp_flags set to ‘listen ’.
+
+
+reorder_queue_size
+Set number of packets to buffer for handling of reordered packets.
+
+
+stimeout
+Set socket TCP I/O timeout in microseconds.
+
+
+user-agent
+Override User-Agent header. If not specified, it defaults to the
+libavformat identifier string.
+
+
+
+
When receiving data over UDP, the demuxer tries to reorder received packets
+(since they may arrive out of order, or packets may get lost totally). This
+can be disabled by setting the maximum demuxing delay to zero (via
+the max_delay
field of AVFormatContext).
+
+
When watching multi-bitrate Real-RTSP streams with ffplay
, the
+streams to display can be chosen with -vst
n and
+-ast
n for video and audio respectively, and can be switched
+on the fly by pressing v
and a
.
+
+
+
18.26.1 Examples# TOC
+
+
The following examples all make use of the ffplay
and
+ffmpeg
tools.
+
+
+ Watch a stream over UDP, with a max reordering delay of 0.5 seconds:
+
+
ffplay -max_delay 500000 -rtsp_transport udp rtsp://server/video.mp4
+
+
+ Watch a stream tunneled over HTTP:
+
+
ffplay -rtsp_transport http rtsp://server/video.mp4
+
+
+ Send a stream in realtime to a RTSP server, for others to watch:
+
+
ffmpeg -re -i input -f rtsp -muxdelay 0.1 rtsp://server/live.sdp
+
+
+ Receive a stream in realtime:
+
+
ffmpeg -rtsp_flags listen -i rtsp://ownaddress/live.sdp output
+
+
+
+
+
18.27 sap# TOC
+
+
Session Announcement Protocol (RFC 2974). This is not technically a
+protocol handler in libavformat, it is a muxer and demuxer.
+It is used for signalling of RTP streams, by announcing the SDP for the
+streams regularly on a separate port.
+
+
+
18.27.1 Muxer# TOC
+
+
The syntax for a SAP url given to the muxer is:
+
+
sap://destination [:port ][?options ]
+
+
+
The RTP packets are sent to destination on port port ,
+or to port 5004 if no port is specified.
+options is a &
-separated list. The following options
+are supported:
+
+
+announce_addr=address
+Specify the destination IP address for sending the announcements to.
+If omitted, the announcements are sent to the commonly used SAP
+announcement multicast address 224.2.127.254 (sap.mcast.net), or
+ff0e::2:7ffe if destination is an IPv6 address.
+
+
+announce_port=port
+Specify the port to send the announcements on, defaults to
+9875 if not specified.
+
+
+ttl=ttl
+Specify the time to live value for the announcements and RTP packets,
+defaults to 255.
+
+
+same_port=0|1
+If set to 1, send all RTP streams on the same port pair. If zero (the
+default), all streams are sent on unique ports, with each stream on a
+port 2 numbers higher than the previous.
+VLC/Live555 requires this to be set to 1, to be able to receive the stream.
+The RTP stack in libavformat for receiving requires all streams to be sent
+on unique ports.
+
+
+
+
Example command lines follow.
+
+
To broadcast a stream on the local subnet, for watching in VLC:
+
+
+
ffmpeg -re -i input -f sap sap://224.0.0.255?same_port=1
+
+
+
Similarly, for watching in ffplay
:
+
+
+
ffmpeg -re -i input -f sap sap://224.0.0.255
+
+
+
And for watching in ffplay
, over IPv6:
+
+
+
ffmpeg -re -i input -f sap sap://[ff0e::1:2:3:4]
+
+
+
+
18.27.2 Demuxer# TOC
+
+
The syntax for a SAP url given to the demuxer is:
+
+
sap://[address ][:port ]
+
+
+
address is the multicast address to listen for announcements on,
+if omitted, the default 224.2.127.254 (sap.mcast.net) is used. port
+is the port that is listened on, 9875 if omitted.
+
+
The demuxers listens for announcements on the given address and port.
+Once an announcement is received, it tries to receive that particular stream.
+
+
Example command lines follow.
+
+
To play back the first stream announced on the normal SAP multicast address:
+
+
+
+
To play back the first stream announced on one the default IPv6 SAP multicast address:
+
+
+
ffplay sap://[ff0e::2:7ffe]
+
+
+
+
18.28 sctp# TOC
+
+
Stream Control Transmission Protocol.
+
+
The accepted URL syntax is:
+
+
sctp://host :port [?options ]
+
+
+
The protocol accepts the following options:
+
+listen
+If set to any value, listen for an incoming connection. Outgoing connection is done by default.
+
+
+max_streams
+Set the maximum number of streams. By default no limit is set.
+
+
+
+
+
18.29 srtp# TOC
+
+
Secure Real-time Transport Protocol.
+
+
The accepted options are:
+
+srtp_in_suite
+srtp_out_suite
+Select input and output encoding suites.
+
+Supported values:
+
+‘AES_CM_128_HMAC_SHA1_80 ’
+‘SRTP_AES128_CM_HMAC_SHA1_80 ’
+‘AES_CM_128_HMAC_SHA1_32 ’
+‘SRTP_AES128_CM_HMAC_SHA1_32 ’
+
+
+
+srtp_in_params
+srtp_out_params
+Set input and output encoding parameters, which are expressed by a
+base64-encoded representation of a binary block. The first 16 bytes of
+this binary block are used as master key, the following 14 bytes are
+used as master salt.
+
+
+
+
+
18.30 subfile# TOC
+
+
Virtually extract a segment of a file or another stream.
+The underlying stream must be seekable.
+
+
Accepted options:
+
+start
+Start offset of the extracted segment, in bytes.
+
+end
+End offset of the extracted segment, in bytes.
+
+
+
+
Examples:
+
+
Extract a chapter from a DVD VOB file (start and end sectors obtained
+externally and multiplied by 2048):
+
+
subfile,,start,153391104,end,268142592,,:/media/dvd/VIDEO_TS/VTS_08_1.VOB
+
+
+
Play an AVI file directly from a TAR archive:
+subfile,,start,183241728,end,366490624,,:archive.tar
+
+
+
18.31 tcp# TOC
+
+
Transmission Control Protocol.
+
+
The required syntax for a TCP url is:
+
+
tcp://hostname :port [?options ]
+
+
+
options contains a list of &-separated options of the form
+key =val .
+
+
The list of supported options follows.
+
+
+listen=1|0
+Listen for an incoming connection. Default value is 0.
+
+
+timeout=microseconds
+Set raise error timeout, expressed in microseconds.
+
+This option is only relevant in read mode: if no data arrived in more
+than this time interval, raise error.
+
+
+listen_timeout=microseconds
+Set listen timeout, expressed in microseconds.
+
+
+
+
The following example shows how to setup a listening TCP connection
+with ffmpeg
, which is then accessed with ffplay
:
+
+
ffmpeg -i input -f format tcp://hostname :port ?listen
+ffplay tcp://hostname :port
+
+
+
+
18.32 tls# TOC
+
+
Transport Layer Security (TLS) / Secure Sockets Layer (SSL)
+
+
The required syntax for a TLS/SSL url is:
+
+
tls://hostname :port [?options ]
+
+
+
The following parameters can be set via command line options
+(or in code via AVOption
s):
+
+
+ca_file, cafile=filename
+A file containing certificate authority (CA) root certificates to treat
+as trusted. If the linked TLS library contains a default this might not
+need to be specified for verification to work, but not all libraries and
+setups have defaults built in.
+The file must be in OpenSSL PEM format.
+
+
+tls_verify=1|0
+If enabled, try to verify the peer that we are communicating with.
+Note, if using OpenSSL, this currently only makes sure that the
+peer certificate is signed by one of the root certificates in the CA
+database, but it does not validate that the certificate actually
+matches the host name we are trying to connect to. (With GnuTLS,
+the host name is validated as well.)
+
+This is disabled by default since it requires a CA database to be
+provided by the caller in many cases.
+
+
+cert_file, cert=filename
+A file containing a certificate to use in the handshake with the peer.
+(When operating as server, in listen mode, this is more often required
+by the peer, while client certificates only are mandated in certain
+setups.)
+
+
+key_file, key=filename
+A file containing the private key for the certificate.
+
+
+listen=1|0
+If enabled, listen for connections on the provided port, and assume
+the server role in the handshake instead of the client role.
+
+
+
+
+
Example command lines:
+
+
To create a TLS/SSL server that serves an input stream.
+
+
+
ffmpeg -i input -f format tls://hostname :port ?listen&cert=server.crt &key=server.key
+
+
+
To play back a stream from the TLS/SSL server using ffplay
:
+
+
+
ffplay tls://hostname :port
+
+
+
+
18.33 udp# TOC
+
+
User Datagram Protocol.
+
+
The required syntax for an UDP URL is:
+
+
udp://hostname :port [?options ]
+
+
+
options contains a list of &-separated options of the form key =val .
+
+
In case threading is enabled on the system, a circular buffer is used
+to store the incoming data, which allows one to reduce loss of data due to
+UDP socket buffer overruns. The fifo_size and
+overrun_nonfatal options are related to this buffer.
+
+
The list of supported options follows.
+
+
+buffer_size=size
+Set the UDP maximum socket buffer size in bytes. This is used to set either
+the receive or send buffer size, depending on what the socket is used for.
+Default is 64KB. See also fifo_size .
+
+
+localport=port
+Override the local UDP port to bind with.
+
+
+localaddr=addr
+Choose the local IP address. This is useful e.g. if sending multicast
+and the host has multiple interfaces, where the user can choose
+which interface to send on by specifying the IP address of that interface.
+
+
+pkt_size=size
+Set the size in bytes of UDP packets.
+
+
+reuse=1|0
+Explicitly allow or disallow reusing UDP sockets.
+
+
+ttl=ttl
+Set the time to live value (for multicast only).
+
+
+connect=1|0
+Initialize the UDP socket with connect()
. In this case, the
+destination address can’t be changed with ff_udp_set_remote_url later.
+If the destination address isn’t known at the start, this option can
+be specified in ff_udp_set_remote_url, too.
+This allows finding out the source address for the packets with getsockname,
+and makes writes return with AVERROR(ECONNREFUSED) if "destination
+unreachable" is received.
+For receiving, this gives the benefit of only receiving packets from
+the specified peer address/port.
+
+
+sources=address [,address ]
+Only receive packets sent to the multicast group from one of the
+specified sender IP addresses.
+
+
+block=address [,address ]
+Ignore packets sent to the multicast group from the specified
+sender IP addresses.
+
+
+fifo_size=units
+Set the UDP receiving circular buffer size, expressed as a number of
+packets with size of 188 bytes. If not specified defaults to 7*4096.
+
+
+overrun_nonfatal=1|0
+Survive in case of UDP receiving circular buffer overrun. Default
+value is 0.
+
+
+timeout=microseconds
+Set raise error timeout, expressed in microseconds.
+
+This option is only relevant in read mode: if no data arrived in more
+than this time interval, raise error.
+
+
+broadcast=1|0
+Explicitly allow or disallow UDP broadcasting.
+
+Note that broadcasting may not work properly on networks having
+a broadcast storm protection.
+
+
+
+
+
18.33.1 Examples# TOC
+
+
+ Use ffmpeg
to stream over UDP to a remote endpoint:
+
+
ffmpeg -i input -f format udp://hostname :port
+
+
+ Use ffmpeg
to stream in mpegts format over UDP using 188
+sized UDP packets, using a large input buffer:
+
+
ffmpeg -i input -f mpegts udp://hostname :port ?pkt_size=188&buffer_size=65535
+
+
+ Use ffmpeg
to receive over UDP from a remote endpoint:
+
+
ffmpeg -i udp://[multicast-address ]:port ...
+
+
+
+
+
18.34 unix# TOC
+
+
Unix local socket
+
+
The required syntax for a Unix socket URL is:
+
+
+
+
The following parameters can be set via command line options
+(or in code via AVOption
s):
+
+
+timeout
+Timeout in ms.
+
+listen
+Create the Unix socket in listening mode.
+
+
+
+
+
19 Device Options# TOC
+
+
The libavdevice library provides the same interface as
+libavformat. Namely, an input device is considered like a demuxer, and
+an output device like a muxer, and the interface and generic device
+options are the same provided by libavformat (see the ffmpeg-formats
+manual).
+
+
In addition each input or output device may support so-called private
+options, which are specific for that component.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, or by setting the value explicitly in the device
+AVFormatContext
options or using the libavutil/opt.h API
+for programmatic use.
+
+
+
+
20 Input Devices# TOC
+
+
Input devices are configured elements in FFmpeg which allow to access
+the data coming from a multimedia device attached to your system.
+
+
When you configure your FFmpeg build, all the supported input devices
+are enabled by default. You can list all available ones using the
+configure option "–list-indevs".
+
+
You can disable all the input devices using the configure option
+"–disable-indevs", and selectively enable an input device using the
+option "–enable-indev=INDEV ", or you can disable a particular
+input device using the option "–disable-indev=INDEV ".
+
+
The option "-devices" of the ff* tools will display the list of
+supported input devices.
+
+
A description of the currently available input devices follows.
+
+
+
20.1 alsa# TOC
+
+
ALSA (Advanced Linux Sound Architecture) input device.
+
+
To enable this input device during configuration you need libasound
+installed on your system.
+
+
This device allows capturing from an ALSA device. The name of the
+device to capture has to be an ALSA card identifier.
+
+
An ALSA identifier has the syntax:
+
+
hw:CARD [,DEV [,SUBDEV ]]
+
+
+
where the DEV and SUBDEV components are optional.
+
+
The three arguments (in order: CARD ,DEV ,SUBDEV )
+specify card number or identifier, device number and subdevice number
+(-1 means any).
+
+
To see the list of cards currently recognized by your system check the
+files /proc/asound/cards and /proc/asound/devices .
+
+
For example to capture with ffmpeg
from an ALSA device with
+card id 0, you may run the command:
+
+
ffmpeg -f alsa -i hw:0 alsaout.wav
+
+
+
For more information see:
+http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html
+
+
+
20.2 avfoundation# TOC
+
+
AVFoundation input device.
+
+
AVFoundation is the currently recommended framework by Apple for streamgrabbing on OSX >= 10.7 as well as on iOS.
+The older QTKit framework has been marked deprecated since OSX version 10.7.
+
+
The input filename has to be given in the following syntax:
+
+
-i "[[VIDEO]:[AUDIO]]"
+
+
The first entry selects the video input while the latter selects the audio input.
+The stream has to be specified by the device name or the device index as shown by the device list.
+Alternatively, the video and/or audio input device can be chosen by index using the
+
+ -video_device_index <INDEX>
+
+and/or
+
+ -audio_device_index <INDEX>
+
+, overriding any
+device name or index given in the input filename.
+
+
All available devices can be enumerated by using -list_devices true , listing
+all device names and corresponding indices.
+
+
There are two device name aliases:
+
+default
+Select the AVFoundation default device of the corresponding type.
+
+
+none
+Do not record the corresponding media type.
+This is equivalent to specifying an empty device name or index.
+
+
+
+
+
+
20.2.1 Options# TOC
+
+
AVFoundation supports the following options:
+
+
+-list_devices <TRUE|FALSE>
+If set to true, a list of all available input devices is given showing all
+device names and indices.
+
+
+-video_device_index <INDEX>
+Specify the video device by its index. Overrides anything given in the input filename.
+
+
+-audio_device_index <INDEX>
+Specify the audio device by its index. Overrides anything given in the input filename.
+
+
+-pixel_format <FORMAT>
+Request the video device to use a specific pixel format.
+If the specified format is not supported, a list of available formats is given
+und the first one in this list is used instead. Available pixel formats are:
+monob, rgb555be, rgb555le, rgb565be, rgb565le, rgb24, bgr24, 0rgb, bgr0, 0bgr, rgb0,
+ bgr48be, uyvy422, yuva444p, yuva444p16le, yuv444p, yuv422p16, yuv422p10, yuv444p10,
+ yuv420p, nv12, yuyv422, gray
+
+
+
+
+
+
20.2.2 Examples# TOC
+
+
+ Print the list of AVFoundation supported devices and exit:
+
+
$ ffmpeg -f avfoundation -list_devices true -i ""
+
+
+ Record video from video device 0 and audio from audio device 0 into out.avi:
+
+
$ ffmpeg -f avfoundation -i "0:0" out.avi
+
+
+ Record video from video device 2 and audio from audio device 1 into out.avi:
+
+
$ ffmpeg -f avfoundation -video_device_index 2 -i ":1" out.avi
+
+
+ Record video from the system default video device using the pixel format bgr0 and do not record any audio into out.avi:
+
+
$ ffmpeg -f avfoundation -pixel_format bgr0 -i "default:none" out.avi
+
+
+
+
+
+
20.3 bktr# TOC
+
+
BSD video input device.
+
+
+
20.4 dshow# TOC
+
+
Windows DirectShow input device.
+
+
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
+Currently only audio and video devices are supported.
+
+
Multiple devices may be opened as separate inputs, but they may also be
+opened on the same input, which should improve synchronism between them.
+
+
The input name should be in the format:
+
+
+
+
where TYPE can be either audio or video ,
+and NAME is the device’s name.
+
+
+
20.4.1 Options# TOC
+
+
If no options are specified, the device’s defaults are used.
+If the device does not support the requested options, it will
+fail to open.
+
+
+video_size
+Set the video size in the captured video.
+
+
+framerate
+Set the frame rate in the captured video.
+
+
+sample_rate
+Set the sample rate (in Hz) of the captured audio.
+
+
+sample_size
+Set the sample size (in bits) of the captured audio.
+
+
+channels
+Set the number of channels in the captured audio.
+
+
+list_devices
+If set to true , print a list of devices and exit.
+
+
+list_options
+If set to true , print a list of selected device’s options
+and exit.
+
+
+video_device_number
+Set video device number for devices with same name (starts at 0,
+defaults to 0).
+
+
+audio_device_number
+Set audio device number for devices with same name (starts at 0,
+defaults to 0).
+
+
+pixel_format
+Select pixel format to be used by DirectShow. This may only be set when
+the video codec is not set or set to rawvideo.
+
+
+audio_buffer_size
+Set audio device buffer size in milliseconds (which can directly
+impact latency, depending on the device).
+Defaults to using the audio device’s
+default buffer size (typically some multiple of 500ms).
+Setting this value too low can degrade performance.
+See also
+http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx
+
+
+
+
+
+
20.4.2 Examples# TOC
+
+
+ Print the list of DirectShow supported devices and exit:
+
+
$ ffmpeg -list_devices true -f dshow -i dummy
+
+
+ Open video device Camera :
+
+
$ ffmpeg -f dshow -i video="Camera"
+
+
+ Open second video device with name Camera :
+
+
$ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
+
+
+ Open video device Camera and audio device Microphone :
+
+
$ ffmpeg -f dshow -i video="Camera":audio="Microphone"
+
+
+ Print the list of supported options in selected device and exit:
+
+
$ ffmpeg -list_options true -f dshow -i video="Camera"
+
+
+
+
+
+
20.5 dv1394# TOC
+
+
Linux DV 1394 input device.
+
+
+
20.6 fbdev# TOC
+
+
Linux framebuffer input device.
+
+
The Linux framebuffer is a graphic hardware-independent abstraction
+layer to show graphics on a computer monitor, typically on the
+console. It is accessed through a file device node, usually
+/dev/fb0 .
+
+
For more detailed information read the file
+Documentation/fb/framebuffer.txt included in the Linux source tree.
+
+
To record from the framebuffer device /dev/fb0 with
+ffmpeg
:
+
+
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
+
+
+
You can take a single screenshot image with the command:
+
+
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
+
+
+
See also http://linux-fbdev.sourceforge.net/ , and fbset(1).
+
+
+
20.7 gdigrab# TOC
+
+
Win32 GDI-based screen capture device.
+
+
This device allows you to capture a region of the display on Windows.
+
+
There are two options for the input filename:
+
+
or
+
+
+
The first option will capture the entire desktop, or a fixed region of the
+desktop. The second option will instead capture the contents of a single
+window, regardless of its position on the screen.
+
+
For example, to grab the entire desktop using ffmpeg
:
+
+
ffmpeg -f gdigrab -framerate 6 -i desktop out.mpg
+
+
+
Grab a 640x480 region at position 10,20
:
+
+
ffmpeg -f gdigrab -framerate 6 -offset_x 10 -offset_y 20 -video_size vga -i desktop out.mpg
+
+
+
Grab the contents of the window named "Calculator"
+
+
ffmpeg -f gdigrab -framerate 6 -i title=Calculator out.mpg
+
+
+
+
20.7.1 Options# TOC
+
+
+draw_mouse
+Specify whether to draw the mouse pointer. Use the value 0
to
+not draw the pointer. Default value is 1
.
+
+
+framerate
+Set the grabbing frame rate. Default value is ntsc
,
+corresponding to a frame rate of 30000/1001
.
+
+
+show_region
+Show grabbed region on screen.
+
+If show_region is specified with 1
, then the grabbing
+region will be indicated on screen. With this option, it is easy to
+know what is being grabbed if only a portion of the screen is grabbed.
+
+Note that show_region is incompatible with grabbing the contents
+of a single window.
+
+For example:
+
+
ffmpeg -f gdigrab -show_region 1 -framerate 6 -video_size cif -offset_x 10 -offset_y 20 -i desktop out.mpg
+
+
+
+video_size
+Set the video frame size. The default is to capture the full screen if desktop is selected, or the full window size if title=window_title is selected.
+
+
+offset_x
+When capturing a region with video_size , set the distance from the left edge of the screen or desktop.
+
+Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned to the left of your primary monitor, you will need to use a negative offset_x value to move the region to that monitor.
+
+
+offset_y
+When capturing a region with video_size , set the distance from the top edge of the screen or desktop.
+
+Note that the offset calculation is from the top left corner of the primary monitor on Windows. If you have a monitor positioned above your primary monitor, you will need to use a negative offset_y value to move the region to that monitor.
+
+
+
+
+
+
20.8 iec61883# TOC
+
+
FireWire DV/HDV input device using libiec61883.
+
+
To enable this input device, you need libiec61883, libraw1394 and
+libavc1394 installed on your system. Use the configure option
+--enable-libiec61883
to compile with the device enabled.
+
+
The iec61883 capture device supports capturing from a video device
+connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
+FireWire stack (juju). This is the default DV/HDV input method in Linux
+Kernel 2.6.37 and later, since the old FireWire stack was removed.
+
+
Specify the FireWire port to be used as input file, or "auto"
+to choose the first port connected.
+
+
+
20.8.1 Options# TOC
+
+
+dvtype
+Override autodetection of DV/HDV. This should only be used if auto
+detection does not work, or if usage of a different device type
+should be prohibited. Treating a DV device as HDV (or vice versa) will
+not work and result in undefined behavior.
+The values auto , dv and hdv are supported.
+
+
+dvbuffer
+Set maximum size of buffer for incoming data, in frames. For DV, this
+is an exact value. For HDV, it is not frame exact, since HDV does
+not have a fixed frame size.
+
+
+dvguid
+Select the capture device by specifying it’s GUID. Capturing will only
+be performed from the specified device and fails if no device with the
+given GUID is found. This is useful to select the input if multiple
+devices are connected at the same time.
+Look at /sys/bus/firewire/devices to find out the GUIDs.
+
+
+
+
+
+
20.8.2 Examples# TOC
+
+
+ Grab and show the input of a FireWire DV/HDV device.
+
+
ffplay -f iec61883 -i auto
+
+
+ Grab and record the input of a FireWire DV/HDV device,
+using a packet buffer of 100000 packets if the source is HDV.
+
+
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
+
+
+
+
+
+
20.9 jack# TOC
+
+
JACK input device.
+
+
To enable this input device during configuration you need libjack
+installed on your system.
+
+
A JACK input device creates one or more JACK writable clients, one for
+each audio channel, with name client_name :input_N , where
+client_name is the name provided by the application, and N
+is a number which identifies the channel.
+Each writable client will send the acquired data to the FFmpeg input
+device.
+
+
Once you have created one or more JACK readable clients, you need to
+connect them to one or more JACK writable clients.
+
+
To connect or disconnect JACK clients you can use the jack_connect
+and jack_disconnect
programs, or do it through a graphical interface,
+for example with qjackctl
.
+
+
To list the JACK clients and their properties you can invoke the command
+jack_lsp
.
+
+
Follows an example which shows how to capture a JACK readable client
+with ffmpeg
.
+
+
# Create a JACK writable client with name "ffmpeg".
+$ ffmpeg -f jack -i ffmpeg -y out.wav
+
+# Start the sample jack_metro readable client.
+$ jack_metro -b 120 -d 0.2 -f 4000
+
+# List the current JACK clients.
+$ jack_lsp -c
+system:capture_1
+system:capture_2
+system:playback_1
+system:playback_2
+ffmpeg:input_1
+metro:120_bpm
+
+# Connect metro to the ffmpeg writable client.
+$ jack_connect metro:120_bpm ffmpeg:input_1
+
+
+
For more information read:
+http://jackaudio.org/
+
+
+
20.10 lavfi# TOC
+
+
Libavfilter input virtual device.
+
+
This input device reads data from the open output pads of a libavfilter
+filtergraph.
+
+
For each filtergraph open output, the input device will create a
+corresponding stream which is mapped to the generated output. Currently
+only video data is supported. The filtergraph is specified through the
+option graph .
+
+
+
20.10.1 Options# TOC
+
+
+graph
+Specify the filtergraph to use as input. Each video open output must be
+labelled by a unique string of the form "outN ", where N is a
+number starting from 0 corresponding to the mapped input stream
+generated by the device.
+The first unlabelled output is automatically assigned to the "out0"
+label, but all the others need to be specified explicitly.
+
+The suffix "+subcc" can be appended to the output label to create an extra
+stream with the closed captions packets attached to that output
+(experimental; only for EIA-608 / CEA-708 for now).
+The subcc streams are created after all the normal streams, in the order of
+the corresponding stream.
+For example, if there is "out19+subcc", "out7+subcc" and up to "out42", the
+stream #43 is subcc for stream #7 and stream #44 is subcc for stream #19.
+
+If not specified defaults to the filename specified for the input
+device.
+
+
+graph_file
+Set the filename of the filtergraph to be read and sent to the other
+filters. Syntax of the filtergraph is the same as the one specified by
+the option graph .
+
+
+
+
+
+
20.10.2 Examples# TOC
+
+
+ Create a color video stream and play it back with ffplay
:
+
+
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
+
+
+ As the previous example, but use filename for specifying the graph
+description, and omit the "out0" label:
+
+
ffplay -f lavfi color=c=pink
+
+
+ Create three different video test filtered sources and play them:
+
+
ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
+
+
+ Read an audio stream from a file using the amovie source and play it
+back with ffplay
:
+
+
ffplay -f lavfi "amovie=test.wav"
+
+
+ Read an audio stream and a video stream and play it back with
+ffplay
:
+
+
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
+
+
+ Dump decoded frames to images and closed captions to a file (experimental):
+
+
ffmpeg -f lavfi -i "movie=test.ts[out0+subcc]" -map v frame%08d.png -map s -c copy -f rawvideo subcc.bin
+
+
+
+
+
+
20.11 libcdio# TOC
+
+
Audio-CD input device based on cdio.
+
+
To enable this input device during configuration you need libcdio
+installed on your system. Requires the configure option
+--enable-libcdio
.
+
+
This device allows playing and grabbing from an Audio-CD.
+
+
For example to copy with ffmpeg
the entire Audio-CD in /dev/sr0,
+you may run the command:
+
+
ffmpeg -f libcdio -i /dev/sr0 cd.wav
+
+
+
+
20.12 libdc1394# TOC
+
+
IIDC1394 input device, based on libdc1394 and libraw1394.
+
+
Requires the configure option --enable-libdc1394
.
+
+
+
20.13 openal# TOC
+
+
The OpenAL input device provides audio capture on all systems with a
+working OpenAL 1.1 implementation.
+
+
To enable this input device during configuration, you need OpenAL
+headers and libraries installed on your system, and need to configure
+FFmpeg with --enable-openal
.
+
+
OpenAL headers and libraries should be provided as part of your OpenAL
+implementation, or as an additional download (an SDK). Depending on your
+installation you may need to specify additional flags via the
+--extra-cflags
and --extra-ldflags
for allowing the build
+system to locate the OpenAL headers and libraries.
+
+
An incomplete list of OpenAL implementations follows:
+
+
+Creative
+The official Windows implementation, providing hardware acceleration
+with supported devices and software fallback.
+See http://openal.org/ .
+
+OpenAL Soft
+Portable, open source (LGPL) software implementation. Includes
+backends for the most common sound APIs on the Windows, Linux,
+Solaris, and BSD operating systems.
+See http://kcat.strangesoft.net/openal.html .
+
+Apple
+OpenAL is part of Core Audio, the official Mac OS X Audio interface.
+See http://developer.apple.com/technologies/mac/audio-and-video.html
+
+
+
+
This device allows one to capture from an audio input device handled
+through OpenAL.
+
+
You need to specify the name of the device to capture in the provided
+filename. If the empty string is provided, the device will
+automatically select the default device. You can get the list of the
+supported devices by using the option list_devices .
+
+
+
20.13.1 Options# TOC
+
+
+channels
+Set the number of channels in the captured audio. Only the values
+1 (monaural) and 2 (stereo) are currently supported.
+Defaults to 2 .
+
+
+sample_size
+Set the sample size (in bits) of the captured audio. Only the values
+8 and 16 are currently supported. Defaults to
+16 .
+
+
+sample_rate
+Set the sample rate (in Hz) of the captured audio.
+Defaults to 44.1k .
+
+
+list_devices
+If set to true , print a list of devices and exit.
+Defaults to false .
+
+
+
+
+
+
20.13.2 Examples# TOC
+
+
Print the list of OpenAL supported devices and exit:
+
+
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
+
+
+
Capture from the OpenAL device DR-BT101 via PulseAudio :
+
+
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
+
+
+
Capture from the default device (note the empty string ” as filename):
+
+
$ ffmpeg -f openal -i '' out.ogg
+
+
+
Capture from two devices simultaneously, writing to two different files,
+within the same ffmpeg
command:
+
+
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
+
+
Note: not all OpenAL implementations support multiple simultaneous capture -
+try the latest OpenAL Soft if the above does not work.
+
+
+
20.14 oss# TOC
+
+
Open Sound System input device.
+
+
The filename to provide to the input device is the device node
+representing the OSS input device, and is usually set to
+/dev/dsp .
+
+
For example to grab from /dev/dsp using ffmpeg
use the
+command:
+
+
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
+
+
+
For more information about OSS see:
+http://manuals.opensound.com/usersguide/dsp.html
+
+
+
20.15 pulse# TOC
+
+
PulseAudio input device.
+
+
To enable this output device you need to configure FFmpeg with --enable-libpulse
.
+
+
The filename to provide to the input device is a source device or the
+string "default"
+
+
To list the PulseAudio source devices and their properties you can invoke
+the command pactl list sources
.
+
+
More information about PulseAudio can be found on http://www.pulseaudio.org .
+
+
+
20.15.1 Options# TOC
+
+server
+Connect to a specific PulseAudio server, specified by an IP address.
+Default server is used when not provided.
+
+
+name
+Specify the application name PulseAudio will use when showing active clients,
+by default it is the LIBAVFORMAT_IDENT
string.
+
+
+stream_name
+Specify the stream name PulseAudio will use when showing active streams,
+by default it is "record".
+
+
+sample_rate
+Specify the samplerate in Hz, by default 48kHz is used.
+
+
+channels
+Specify the channels in use, by default 2 (stereo) is set.
+
+
+frame_size
+Specify the number of bytes per frame, by default it is set to 1024.
+
+
+fragment_size
+Specify the minimal buffering fragment in PulseAudio, it will affect the
+audio latency. By default it is unset.
+
+
+
+
+
20.15.2 Examples# TOC
+
Record a stream from default device:
+
+
ffmpeg -f pulse -i default /tmp/pulse.wav
+
+
+
+
20.16 qtkit# TOC
+
+
QTKit input device.
+
+
The filename passed as input is parsed to contain either a device name or index.
+The device index can also be given by using -video_device_index.
+A given device index will override any given device name.
+If the desired device consists of numbers only, use -video_device_index to identify it.
+The default device will be chosen if an empty string or the device name "default" is given.
+The available devices can be enumerated by using -list_devices.
+
+
+
ffmpeg -f qtkit -i "0" out.mpg
+
+
+
+
ffmpeg -f qtkit -video_device_index 0 -i "" out.mpg
+
+
+
+
ffmpeg -f qtkit -i "default" out.mpg
+
+
+
+
ffmpeg -f qtkit -list_devices true -i ""
+
+
+
+
20.17 sndio# TOC
+
+
sndio input device.
+
+
To enable this input device during configuration you need libsndio
+installed on your system.
+
+
The filename to provide to the input device is the device node
+representing the sndio input device, and is usually set to
+/dev/audio0 .
+
+
For example to grab from /dev/audio0 using ffmpeg
use the
+command:
+
+
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
+
+
+
+
20.18 video4linux2, v4l2# TOC
+
+
Video4Linux2 input video device.
+
+
"v4l2" can be used as alias for "video4linux2".
+
+
If FFmpeg is built with v4l-utils support (by using the
+--enable-libv4l2
configure option), it is possible to use it with the
+-use_libv4l2
input device option.
+
+
The name of the device to grab is a file device node, usually Linux
+systems tend to automatically create such nodes when the device
+(e.g. an USB webcam) is plugged into the system, and has a name of the
+kind /dev/videoN , where N is a number associated to
+the device.
+
+
Video4Linux2 devices usually support a limited set of
+width xheight sizes and frame rates. You can check which are
+supported using -list_formats all
for Video4Linux2 devices.
+Some devices, like TV cards, support one or more standards. It is possible
+to list all the supported standards using -list_standards all
.
+
+
The time base for the timestamps is 1 microsecond. Depending on the kernel
+version and configuration, the timestamps may be derived from the real time
+clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
+boot time, unaffected by NTP or manual changes to the clock). The
+-timestamps abs or -ts abs option can be used to force
+conversion into the real time clock.
+
+
Some usage examples of the video4linux2 device with ffmpeg
+and ffplay
:
+
+ Grab and show the input of a video4linux2 device:
+
+
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
+
+
+ Grab and record the input of a video4linux2 device, leave the
+frame rate and size as previously set:
+
+
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
+
+
+
+
For more information about Video4Linux, check http://linuxtv.org/ .
+
+
+
20.18.1 Options# TOC
+
+
+standard
+Set the standard. Must be the name of a supported standard. To get a
+list of the supported standards, use the list_standards
+option.
+
+
+channel
+Set the input channel number. Default to -1, which means using the
+previously selected channel.
+
+
+video_size
+Set the video frame size. The argument must be a string in the form
+WIDTH xHEIGHT or a valid size abbreviation.
+
+
+pixel_format
+Select the pixel format (only valid for raw video input).
+
+
+input_format
+Set the preferred pixel format (for raw video) or a codec name.
+This option allows one to select the input format, when several are
+available.
+
+
+framerate
+Set the preferred video frame rate.
+
+
+list_formats
+List available formats (supported pixel formats, codecs, and frame
+sizes) and exit.
+
+Available values are:
+
+‘all ’
+Show all available (compressed and non-compressed) formats.
+
+
+‘raw ’
+Show only raw video (non-compressed) formats.
+
+
+‘compressed ’
+Show only compressed formats.
+
+
+
+
+list_standards
+List supported standards and exit.
+
+Available values are:
+
+‘all ’
+Show all supported standards.
+
+
+
+
+timestamps, ts
+Set type of timestamps for grabbed frames.
+
+Available values are:
+
+‘default ’
+Use timestamps from the kernel.
+
+
+‘abs ’
+Use absolute timestamps (wall clock).
+
+
+‘mono2abs ’
+Force conversion from monotonic to absolute timestamps.
+
+
+
+Default value is default
.
+
+
+
+
+
20.19 vfwcap# TOC
+
+
VfW (Video for Windows) capture input device.
+
+
The filename passed as input is the capture driver number, ranging from
+0 to 9. You may use "list" as filename to print a list of drivers. Any
+other filename will be interpreted as device number 0.
+
+
+
20.20 x11grab# TOC
+
+
X11 video input device.
+
+
Depends on X11, Xext, and Xfixes. Requires the configure option
+--enable-x11grab
.
+
+
This device allows one to capture a region of an X11 display.
+
+
The filename passed as input has the syntax:
+
+
[hostname ]:display_number .screen_number [+x_offset ,y_offset ]
+
+
+
hostname :display_number .screen_number specifies the
+X11 display name of the screen to grab from. hostname can be
+omitted, and defaults to "localhost". The environment variable
+DISPLAY
contains the default display name.
+
+
x_offset and y_offset specify the offsets of the grabbed
+area with respect to the top-left border of the X11 screen. They
+default to 0.
+
+
Check the X11 documentation (e.g. man X) for more detailed information.
+
+
Use the dpyinfo
program for getting basic information about the
+properties of your X11 display (e.g. grep for "name" or "dimensions").
+
+
For example to grab from :0.0 using ffmpeg
:
+
+
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+
Grab at position 10,20
:
+
+
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+
+
+
20.20.1 Options# TOC
+
+
+draw_mouse
+Specify whether to draw the mouse pointer. A value of 0
specify
+not to draw the pointer. Default value is 1
.
+
+
+follow_mouse
+Make the grabbed area follow the mouse. The argument can be
+centered
or a number of pixels PIXELS .
+
+When it is specified with "centered", the grabbing region follows the mouse
+pointer and keeps the pointer at the center of region; otherwise, the region
+follows only when the mouse pointer reaches within PIXELS (greater than
+zero) to the edge of region.
+
+For example:
+
+
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+To follow only when the mouse pointer reaches within 100 pixels to edge:
+
+
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+
+framerate
+Set the grabbing frame rate. Default value is ntsc
,
+corresponding to a frame rate of 30000/1001
.
+
+
+show_region
+Show grabbed region on screen.
+
+If show_region is specified with 1
, then the grabbing
+region will be indicated on screen. With this option, it is easy to
+know what is being grabbed if only a portion of the screen is grabbed.
+
+For example:
+
+
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
+
+
+With follow_mouse :
+
+
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
+
+
+
+video_size
+Set the video frame size. Default value is vga
.
+
+
+use_shm
+Use the MIT-SHM extension for shared memory. Default value is 1
.
+It may be necessary to disable it for remote displays.
+
+
+
+
+
20.21 decklink# TOC
+
+
The decklink input device provides capture capabilities for Blackmagic
+DeckLink devices.
+
+
To enable this input device, you need the Blackmagic DeckLink SDK and you
+need to configure with the appropriate --extra-cflags
+and --extra-ldflags
.
+On Windows, you need to run the IDL files through widl
.
+
+
DeckLink is very picky about the formats it supports. Pixel format is always
+uyvy422, framerate and video size must be determined for your device with
+-list_formats 1
. Audio sample rate is always 48 kHz and the number
+of channels currently is limited to 2 (stereo).
+
+
+
20.21.1 Options# TOC
+
+
+list_devices
+If set to true , print a list of devices and exit.
+Defaults to false .
+
+
+list_formats
+If set to true , print a list of supported formats and exit.
+Defaults to false .
+
+
+
+
+
+
20.21.2 Examples# TOC
+
+
+ List input devices:
+
+
ffmpeg -f decklink -list_devices 1 -i dummy
+
+
+ List supported formats:
+
+
ffmpeg -f decklink -list_formats 1 -i 'Intensity Pro'
+
+
+ Capture video clip at 1080i50 (format 11):
+
+
ffmpeg -f decklink -i 'Intensity Pro@11' -acodec copy -vcodec copy output.avi
+
+
+
+
+
+
+
21 Resampler Options# TOC
+
+
The audio resampler supports the following named options.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools, option =value for the aresample filter,
+by setting the value explicitly in the
+SwrContext
options or using the libavutil/opt.h API for
+programmatic use.
+
+
+ich, in_channel_count
+Set the number of input channels. Default value is 0. Setting this
+value is not mandatory if the corresponding channel layout
+in_channel_layout is set.
+
+
+och, out_channel_count
+Set the number of output channels. Default value is 0. Setting this
+value is not mandatory if the corresponding channel layout
+out_channel_layout is set.
+
+
+uch, used_channel_count
+Set the number of used input channels. Default value is 0. This option is
+only used for special remapping.
+
+
+isr, in_sample_rate
+Set the input sample rate. Default value is 0.
+
+
+osr, out_sample_rate
+Set the output sample rate. Default value is 0.
+
+
+isf, in_sample_fmt
+Specify the input sample format. It is set by default to none
.
+
+
+osf, out_sample_fmt
+Specify the output sample format. It is set by default to none
.
+
+
+tsf, internal_sample_fmt
+Set the internal sample format. Default value is none
.
+This will automatically be chosen when it is not explicitly set.
+
+
+icl, in_channel_layout
+ocl, out_channel_layout
+Set the input/output channel layout.
+
+See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
+for the required syntax.
+
+
+clev, center_mix_level
+Set the center mix level. It is a value expressed in deciBel, and must be
+in the interval [-32,32].
+
+
+slev, surround_mix_level
+Set the surround mix level. It is a value expressed in deciBel, and must
+be in the interval [-32,32].
+
+
+lfe_mix_level
+Set LFE mix into non LFE level. It is used when there is a LFE input but no
+LFE output. It is a value expressed in deciBel, and must
+be in the interval [-32,32].
+
+
+rmvol, rematrix_volume
+Set rematrix volume. Default value is 1.0.
+
+
+rematrix_maxval
+Set maximum output value for rematrixing.
+This can be used to prevent clipping vs. preventing volumn reduction
+A value of 1.0 prevents cliping.
+
+
+flags, swr_flags
+Set flags used by the converter. Default value is 0.
+
+It supports the following individual flags:
+
+res
+force resampling, this flag forces resampling to be used even when the
+input and output sample rates match.
+
+
+
+
+dither_scale
+Set the dither scale. Default value is 1.
+
+
+dither_method
+Set dither method. Default value is 0.
+
+Supported values:
+
+‘rectangular ’
+select rectangular dither
+
+‘triangular ’
+select triangular dither
+
+‘triangular_hp ’
+select triangular dither with high pass
+
+‘lipshitz ’
+select lipshitz noise shaping dither
+
+‘shibata ’
+select shibata noise shaping dither
+
+‘low_shibata ’
+select low shibata noise shaping dither
+
+‘high_shibata ’
+select high shibata noise shaping dither
+
+‘f_weighted ’
+select f-weighted noise shaping dither
+
+‘modified_e_weighted ’
+select modified-e-weighted noise shaping dither
+
+‘improved_e_weighted ’
+select improved-e-weighted noise shaping dither
+
+
+
+
+
+resampler
+Set resampling engine. Default value is swr.
+
+Supported values:
+
+‘swr ’
+select the native SW Resampler; filter options precision and cheby are not
+applicable in this case.
+
+‘soxr ’
+select the SoX Resampler (where available); compensation, and filter options
+filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
+case.
+
+
+
+
+filter_size
+For swr only, set resampling filter size, default value is 32.
+
+
+phase_shift
+For swr only, set resampling phase shift, default value is 10, and must be in
+the interval [0,30].
+
+
+linear_interp
+Use Linear Interpolation if set to 1, default value is 0.
+
+
+cutoff
+Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
+value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
+(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
+
+
+precision
+For soxr only, the precision in bits to which the resampled signal will be
+calculated. The default value of 20 (which, with suitable dithering, is
+appropriate for a destination bit-depth of 16) gives SoX’s ’High Quality’; a
+value of 28 gives SoX’s ’Very High Quality’.
+
+
+cheby
+For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
+approximation for ’irrational’ ratios. Default value is 0.
+
+
+async
+For swr only, simple 1 parameter audio sync to timestamps using stretching,
+squeezing, filling and trimming. Setting this to 1 will enable filling and
+trimming, larger values represent the maximum amount in samples that the data
+may be stretched or squeezed for each second.
+Default value is 0, thus no compensation is applied to make the samples match
+the audio timestamps.
+
+
+first_pts
+For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
+This allows for padding/trimming at the start of stream. By default, no
+assumption is made about the first frame’s expected pts, so no padding or
+trimming is done. For example, this could be set to 0 to pad the beginning with
+silence if an audio stream starts after the video stream or to trim any samples
+with a negative pts due to encoder delay.
+
+
+min_comp
+For swr only, set the minimum difference between timestamps and audio data (in
+seconds) to trigger stretching/squeezing/filling or trimming of the
+data to make it match the timestamps. The default is that
+stretching/squeezing/filling and trimming is disabled
+(min_comp = FLT_MAX
).
+
+
+min_hard_comp
+For swr only, set the minimum difference between timestamps and audio data (in
+seconds) to trigger adding/dropping samples to make it match the
+timestamps. This option effectively is a threshold to select between
+hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
+all compensation is by default disabled through min_comp .
+The default is 0.1.
+
+
+comp_duration
+For swr only, set duration (in seconds) over which data is stretched/squeezed
+to make it match the timestamps. Must be a non-negative double float value,
+default value is 1.0.
+
+
+max_soft_comp
+For swr only, set maximum factor by which data is stretched/squeezed to make it
+match the timestamps. Must be a non-negative double float value, default value
+is 0.
+
+
+matrix_encoding
+Select matrixed stereo encoding.
+
+It accepts the following values:
+
+‘none ’
+select none
+
+‘dolby ’
+select Dolby
+
+‘dplii ’
+select Dolby Pro Logic II
+
+
+
+Default value is none
.
+
+
+filter_type
+For swr only, select resampling filter type. This only affects resampling
+operations.
+
+It accepts the following values:
+
+‘cubic ’
+select cubic
+
+‘blackman_nuttall ’
+select Blackman Nuttall Windowed Sinc
+
+‘kaiser ’
+select Kaiser Windowed Sinc
+
+
+
+
+kaiser_beta
+For swr only, set Kaiser Window Beta value. Must be an integer in the
+interval [2,16], default value is 9.
+
+
+output_sample_bits
+For swr only, set number of used output sample bits for dithering. Must be an integer in the
+interval [0,64], default value is 0, which means it’s not used.
+
+
+
+
+
+
22 Scaler Options# TOC
+
+
The video scaler supports the following named options.
+
+
Options may be set by specifying -option value in the
+FFmpeg tools. For programmatic use, they can be set explicitly in the
+SwsContext
options or through the libavutil/opt.h API.
+
+
+
+
+sws_flags
+Set the scaler flags. This is also used to set the scaling
+algorithm. Only a single algorithm should be selected.
+
+It accepts the following values:
+
+‘fast_bilinear ’
+Select fast bilinear scaling algorithm.
+
+
+‘bilinear ’
+Select bilinear scaling algorithm.
+
+
+‘bicubic ’
+Select bicubic scaling algorithm.
+
+
+‘experimental ’
+Select experimental scaling algorithm.
+
+
+‘neighbor ’
+Select nearest neighbor rescaling algorithm.
+
+
+‘area ’
+Select averaging area rescaling algorithm.
+
+
+‘bicublin ’
+Select bicubic scaling algorithm for the luma component, bilinear for
+chroma components.
+
+
+‘gauss ’
+Select Gaussian rescaling algorithm.
+
+
+‘sinc ’
+Select sinc rescaling algorithm.
+
+
+‘lanczos ’
+Select lanczos rescaling algorithm.
+
+
+‘spline ’
+Select natural bicubic spline rescaling algorithm.
+
+
+‘print_info ’
+Enable printing/debug logging.
+
+
+‘accurate_rnd ’
+Enable accurate rounding.
+
+
+‘full_chroma_int ’
+Enable full chroma interpolation.
+
+
+‘full_chroma_inp ’
+Select full chroma input.
+
+
+‘bitexact ’
+Enable bitexact output.
+
+
+
+
+srcw
+Set source width.
+
+
+srch
+Set source height.
+
+
+dstw
+Set destination width.
+
+
+dsth
+Set destination height.
+
+
+src_format
+Set source pixel format (must be expressed as an integer).
+
+
+dst_format
+Set destination pixel format (must be expressed as an integer).
+
+
+src_range
+Select source range.
+
+
+dst_range
+Select destination range.
+
+
+param0, param1
+Set scaling algorithm parameters. The specified values are specific of
+some scaling algorithms and ignored by others. The specified values
+are floating point number values.
+
+
+sws_dither
+Set the dithering algorithm. Accepts one of the following
+values. Default value is ‘auto ’.
+
+
+‘auto ’
+automatic choice
+
+
+‘none ’
+no dithering
+
+
+‘bayer ’
+bayer dither
+
+
+‘ed ’
+error diffusion dither
+
+
+‘a_dither ’
+arithmetic dither, based using addition
+
+
+‘x_dither ’
+arithmetic dither, based using xor (more random/less apparent patterning that
+a_dither).
+
+
+
+
+
+
+
+
+
23 Filtering Introduction# TOC
+
+
Filtering in FFmpeg is enabled through the libavfilter library.
+
+
In libavfilter, a filter can have multiple inputs and multiple
+outputs.
+To illustrate the sorts of things that are possible, we consider the
+following filtergraph.
+
+
+
[main]
+input --> split ---------------------> overlay --> output
+ | ^
+ |[tmp] [flip]|
+ +-----> crop --> vflip -------+
+
+
+
This filtergraph splits the input stream in two streams, then sends one
+stream through the crop filter and the vflip filter, before merging it
+back with the other stream by overlaying it on top. You can use the
+following command to achieve this:
+
+
+
ffmpeg -i INPUT -vf "split [main][tmp]; [tmp] crop=iw:ih/2:0:0, vflip [flip]; [main][flip] overlay=0:H/2" OUTPUT
+
+
+
The result will be that the top half of the video is mirrored
+onto the bottom half of the output video.
+
+
Filters in the same linear chain are separated by commas, and distinct
+linear chains of filters are separated by semicolons. In our example,
+crop,vflip are in one linear chain, split and
+overlay are separately in another. The points where the linear
+chains join are labelled by names enclosed in square brackets. In the
+example, the split filter generates two outputs that are associated to
+the labels [main] and [tmp] .
+
+
The stream sent to the second output of split , labelled as
+[tmp] , is processed through the crop filter, which crops
+away the lower half part of the video, and then vertically flipped. The
+overlay filter takes in input the first unchanged output of the
+split filter (which was labelled as [main] ), and overlay on its
+lower half the output generated by the crop,vflip filterchain.
+
+
Some filters take in input a list of parameters: they are specified
+after the filter name and an equal sign, and are separated from each other
+by a colon.
+
+
There exist so-called source filters that do not have an
+audio/video input, and sink filters that will not have audio/video
+output.
+
+
+
+
24 graph2dot# TOC
+
+
The graph2dot program included in the FFmpeg tools
+directory can be used to parse a filtergraph description and issue a
+corresponding textual representation in the dot language.
+
+
Invoke the command:
+
+
+
to see how to use graph2dot .
+
+
You can then pass the dot description to the dot program (from
+the graphviz suite of programs) and obtain a graphical representation
+of the filtergraph.
+
+
For example the sequence of commands:
+
+
echo GRAPH_DESCRIPTION | \
+tools/graph2dot -o graph.tmp && \
+dot -Tpng graph.tmp -o graph.png && \
+display graph.png
+
+
+
can be used to create and display an image representing the graph
+described by the GRAPH_DESCRIPTION string. Note that this string must be
+a complete self-contained graph, with its inputs and outputs explicitly defined.
+For example if your command line is of the form:
+
+
ffmpeg -i infile -vf scale=640:360 outfile
+
+
your GRAPH_DESCRIPTION string will need to be of the form:
+
+
nullsrc,scale=640:360,nullsink
+
+
you may also need to set the nullsrc parameters and add a format
+filter in order to simulate a specific input file.
+
+
+
+
25 Filtergraph description# TOC
+
+
A filtergraph is a directed graph of connected filters. It can contain
+cycles, and there can be multiple links between a pair of
+filters. Each link has one input pad on one side connecting it to one
+filter from which it takes its input, and one output pad on the other
+side connecting it to one filter accepting its output.
+
+
Each filter in a filtergraph is an instance of a filter class
+registered in the application, which defines the features and the
+number of input and output pads of the filter.
+
+
A filter with no input pads is called a "source", and a filter with no
+output pads is called a "sink".
+
+
+
25.1 Filtergraph syntax# TOC
+
+
A filtergraph has a textual representation, which is
+recognized by the -filter /-vf and -filter_complex
+options in ffmpeg
and -vf in ffplay
, and by the
+avfilter_graph_parse()
/avfilter_graph_parse2()
functions defined in
+libavfilter/avfilter.h .
+
+
A filterchain consists of a sequence of connected filters, each one
+connected to the previous one in the sequence. A filterchain is
+represented by a list of ","-separated filter descriptions.
+
+
A filtergraph consists of a sequence of filterchains. A sequence of
+filterchains is represented by a list of ";"-separated filterchain
+descriptions.
+
+
A filter is represented by a string of the form:
+[in_link_1 ]...[in_link_N ]filter_name =arguments [out_link_1 ]...[out_link_M ]
+
+
filter_name is the name of the filter class of which the
+described filter is an instance of, and has to be the name of one of
+the filter classes registered in the program.
+The name of the filter class is optionally followed by a string
+"=arguments ".
+
+
arguments is a string which contains the parameters used to
+initialize the filter instance. It may have one of two forms:
+
+ A ’:’-separated list of key=value pairs.
+
+ A ’:’-separated list of value . In this case, the keys are assumed to be
+the option names in the order they are declared. E.g. the fade
filter
+declares three options in this order – type , start_frame and
+nb_frames . Then the parameter list in:0:30 means that the value
+in is assigned to the option type , 0 to
+start_frame and 30 to nb_frames .
+
+ A ’:’-separated list of mixed direct value and long key=value
+pairs. The direct value must precede the key=value pairs, and
+follow the same constraints order of the previous point. The following
+key=value pairs can be set in any preferred order.
+
+
+
+
If the option value itself is a list of items (e.g. the format
filter
+takes a list of pixel formats), the items in the list are usually separated by
+’|’.
+
+
The list of arguments can be quoted using the character "’" as initial
+and ending mark, and the character ’\’ for escaping the characters
+within the quoted text; otherwise the argument string is considered
+terminated when the next special character (belonging to the set
+"[]=;,") is encountered.
+
+
The name and arguments of the filter are optionally preceded and
+followed by a list of link labels.
+A link label allows one to name a link and associate it to a filter output
+or input pad. The preceding labels in_link_1
+... in_link_N , are associated to the filter input pads,
+the following labels out_link_1 ... out_link_M , are
+associated to the output pads.
+
+
When two link labels with the same name are found in the
+filtergraph, a link between the corresponding input and output pad is
+created.
+
+
If an output pad is not labelled, it is linked by default to the first
+unlabelled input pad of the next filter in the filterchain.
+For example in the filterchain
+
+
nullsrc, split[L1], [L2]overlay, nullsink
+
+
the split filter instance has two output pads, and the overlay filter
+instance two input pads. The first output pad of split is labelled
+"L1", the first input pad of overlay is labelled "L2", and the second
+output pad of split is linked to the second input pad of overlay,
+which are both unlabelled.
+
+
In a complete filterchain all the unlabelled filter input and output
+pads must be connected. A filtergraph is considered valid if all the
+filter input and output pads of all the filterchains are connected.
+
+
Libavfilter will automatically insert scale filters where format
+conversion is required. It is possible to specify swscale flags
+for those automatically inserted scalers by prepending
+sws_flags=flags ;
+to the filtergraph description.
+
+
Here is a BNF description of the filtergraph syntax:
+
+
NAME ::= sequence of alphanumeric characters and '_'
+LINKLABEL ::= "[" NAME "]"
+LINKLABELS ::= LINKLABEL [LINKLABELS ]
+FILTER_ARGUMENTS ::= sequence of chars (possibly quoted)
+FILTER ::= [LINKLABELS ] NAME ["=" FILTER_ARGUMENTS ] [LINKLABELS ]
+FILTERCHAIN ::= FILTER [,FILTERCHAIN ]
+FILTERGRAPH ::= [sws_flags=flags ;] FILTERCHAIN [;FILTERGRAPH ]
+
+
+
+
25.2 Notes on filtergraph escaping# TOC
+
+
Filtergraph description composition entails several levels of
+escaping. See (ffmpeg-utils)the "Quoting and escaping"
+section in the ffmpeg-utils(1) manual for more
+information about the employed escaping procedure.
+
+
A first level escaping affects the content of each filter option
+value, which may contain the special character :
used to
+separate values, or one of the escaping characters \'
.
+
+
A second level escaping affects the whole filter description, which
+may contain the escaping characters \'
or the special
+characters [],;
used by the filtergraph description.
+
+
Finally, when you specify a filtergraph on a shell commandline, you
+need to perform a third level escaping for the shell special
+characters contained within it.
+
+
For example, consider the following string to be embedded in
+the drawtext filter description text value:
+
+
this is a 'string': may contain one, or more, special characters
+
+
+
This string contains the '
special escaping character, and the
+:
special character, so it needs to be escaped in this way:
+
+
text=this is a \'string\'\: may contain one, or more, special characters
+
+
+
A second level of escaping is required when embedding the filter
+description in a filtergraph description, in order to escape all the
+filtergraph special characters. Thus the example above becomes:
+
+
drawtext=text=this is a \\\'string\\\'\\: may contain one\, or more\, special characters
+
+
(note that in addition to the \'
escaping special characters,
+also ,
needs to be escaped).
+
+
Finally an additional level of escaping is needed when writing the
+filtergraph description in a shell command, which depends on the
+escaping rules of the adopted shell. For example, assuming that
+\
is special and needs to be escaped with another \
, the
+previous string will finally result in:
+
+
-vf "drawtext=text=this is a \\\\\\'string\\\\\\'\\\\: may contain one\\, or more\\, special characters"
+
+
+
+
26 Timeline editing# TOC
+
+
Some filters support a generic enable option. For the filters
+supporting timeline editing, this option can be set to an expression which is
+evaluated before sending a frame to the filter. If the evaluation is non-zero,
+the filter will be enabled, otherwise the frame will be sent unchanged to the
+next filter in the filtergraph.
+
+
The expression accepts the following values:
+
+‘t ’
+timestamp expressed in seconds, NAN if the input timestamp is unknown
+
+
+‘n ’
+sequential number of the input frame, starting from 0
+
+
+‘pos ’
+the position in the file of the input frame, NAN if unknown
+
+
+‘w ’
+‘h ’
+width and height of the input frame if video
+
+
+
+
Additionally, these filters support an enable command that can be used
+to re-define the expression.
+
+
Like any other filtering option, the enable option follows the same
+rules.
+
+
For example, to enable a blur filter (smartblur ) from 10 seconds to 3
+minutes, and a curves filter starting at 3 seconds:
+
+
smartblur = enable='between(t,10,3*60)',
+curves = enable='gte(t,3)' : preset=cross_process
+
+
+
+
+
27 Audio Filters# TOC
+
+
When you configure your FFmpeg build, you can disable any of the
+existing filters using --disable-filters
.
+The configure output will show the audio filters included in your
+build.
+
+
Below is a description of the currently available audio filters.
+
+
+
27.1 adelay# TOC
+
+
Delay one or more audio channels.
+
+
Samples in delayed channel are filled with silence.
+
+
The filter accepts the following option:
+
+
+delays
+Set list of delays in milliseconds for each channel separated by ’|’.
+At least one delay greater than 0 should be provided.
+Unused delays will be silently ignored. If number of given delays is
+smaller than number of channels all remaining channels will not be delayed.
+
+
+
+
+
27.1.1 Examples# TOC
+
+
+ Delay first channel by 1.5 seconds, the third channel by 0.5 seconds and leave
+the second channel (and any other channels that may be present) unchanged.
+
+
+
+
+
27.2 aecho# TOC
+
+
Apply echoing to the input audio.
+
+
Echoes are reflected sound and can occur naturally amongst mountains
+(and sometimes large buildings) when talking or shouting; digital echo
+effects emulate this behaviour and are often used to help fill out the
+sound of a single instrument or vocal. The time difference between the
+original signal and the reflection is the delay
, and the
+loudness of the reflected signal is the decay
.
+Multiple echoes can have different delays and decays.
+
+
A description of the accepted parameters follows.
+
+
+in_gain
+Set input gain of reflected signal. Default is 0.6
.
+
+
+out_gain
+Set output gain of reflected signal. Default is 0.3
.
+
+
+delays
+Set list of time intervals in milliseconds between original signal and reflections
+separated by ’|’. Allowed range for each delay
is (0 - 90000.0]
.
+Default is 1000
.
+
+
+decays
+Set list of loudnesses of reflected signals separated by ’|’.
+Allowed range for each decay
is (0 - 1.0]
.
+Default is 0.5
.
+
+
+
+
+
27.2.1 Examples# TOC
+
+
+ Make it sound as if there are twice as many instruments as are actually playing:
+
+
+ If delay is very short, then it sound like a (metallic) robot playing music:
+
+
+ A longer delay will sound like an open air concert in the mountains:
+
+
aecho=0.8:0.9:1000:0.3
+
+
+ Same as above but with one more mountain:
+
+
aecho=0.8:0.9:1000|1800:0.3|0.25
+
+
+
+
+
27.3 aeval# TOC
+
+
Modify an audio signal according to the specified expressions.
+
+
This filter accepts one or more expressions (one for each channel),
+which are evaluated and used to modify a corresponding audio signal.
+
+
It accepts the following parameters:
+
+
+exprs
+Set the ’|’-separated expressions list for each separate channel. If
+the number of input channels is greater than the number of
+expressions, the last specified expression is used for the remaining
+output channels.
+
+
+channel_layout, c
+Set output channel layout. If not specified, the channel layout is
+specified by the number of expressions. If set to ‘same ’, it will
+use by default the same input channel layout.
+
+
+
+
Each expression in exprs can contain the following constants and functions:
+
+
+ch
+channel number of the current expression
+
+
+n
+number of the evaluated sample, starting from 0
+
+
+s
+sample rate
+
+
+t
+time of the evaluated sample expressed in seconds
+
+
+nb_in_channels
+nb_out_channels
+input and output number of channels
+
+
+val(CH)
+the value of input channel with number CH
+
+
+
+
Note: this filter is slow. For faster processing you should use a
+dedicated filter.
+
+
+
27.3.1 Examples# TOC
+
+
+ Half volume:
+
+
aeval=val(ch)/2:c=same
+
+
+ Invert phase of the second channel:
+
+
+
+
+
27.4 afade# TOC
+
+
Apply fade-in/out effect to input audio.
+
+
A description of the accepted parameters follows.
+
+
+type, t
+Specify the effect type, can be either in
for fade-in, or
+out
for a fade-out effect. Default is in
.
+
+
+start_sample, ss
+Specify the number of the start sample for starting to apply the fade
+effect. Default is 0.
+
+
+nb_samples, ns
+Specify the number of samples for which the fade effect has to last. At
+the end of the fade-in effect the output audio will have the same
+volume as the input audio, at the end of the fade-out transition
+the output audio will be silence. Default is 44100.
+
+
+start_time, st
+Specify the start time of the fade effect. Default is 0.
+The value must be specified as a time duration; see
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+If set this option is used instead of start_sample .
+
+
+duration, d
+Specify the duration of the fade effect. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+At the end of the fade-in effect the output audio will have the same
+volume as the input audio, at the end of the fade-out transition
+the output audio will be silence.
+By default the duration is determined by nb_samples .
+If set this option is used instead of nb_samples .
+
+
+curve
+Set curve for fade transition.
+
+It accepts the following values:
+
+tri
+select triangular, linear slope (default)
+
+qsin
+select quarter of sine wave
+
+hsin
+select half of sine wave
+
+esin
+select exponential sine wave
+
+log
+select logarithmic
+
+par
+select inverted parabola
+
+qua
+select quadratic
+
+cub
+select cubic
+
+squ
+select square root
+
+cbr
+select cubic root
+
+
+
+
+
+
+
27.4.1 Examples# TOC
+
+
+ Fade in first 15 seconds of audio:
+
+
+ Fade out last 25 seconds of a 900 seconds audio:
+
+
afade=t=out:st=875:d=25
+
+
+
+
+
27.5 aformat# TOC
+
+
Set output format constraints for the input audio. The framework will
+negotiate the most appropriate format to minimize conversions.
+
+
It accepts the following parameters:
+
+sample_fmts
+A ’|’-separated list of requested sample formats.
+
+
+sample_rates
+A ’|’-separated list of requested sample rates.
+
+
+channel_layouts
+A ’|’-separated list of requested channel layouts.
+
+See (ffmpeg-utils)the Channel Layout section in the ffmpeg-utils(1) manual
+for the required syntax.
+
+
+
+
If a parameter is omitted, all values are allowed.
+
+
Force the output to either unsigned 8-bit or signed 16-bit stereo
+
+
aformat=sample_fmts=u8|s16:channel_layouts=stereo
+
+
+
+
27.6 allpass# TOC
+
+
Apply a two-pole all-pass filter with central frequency (in Hz)
+frequency , and filter-width width .
+An all-pass filter changes the audio’s frequency to phase relationship
+without changing its frequency to amplitude relationship.
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set frequency in Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+
+
+
27.7 amerge# TOC
+
+
Merge two or more audio streams into a single multi-channel stream.
+
+
The filter accepts the following options:
+
+
+inputs
+Set the number of inputs. Default is 2.
+
+
+
+
+
If the channel layouts of the inputs are disjoint, and therefore compatible,
+the channel layout of the output will be set accordingly and the channels
+will be reordered as necessary. If the channel layouts of the inputs are not
+disjoint, the output will have all the channels of the first input then all
+the channels of the second input, in that order, and the channel layout of
+the output will be the default value corresponding to the total number of
+channels.
+
+
For example, if the first input is in 2.1 (FL+FR+LF) and the second input
+is FC+BL+BR, then the output will be in 5.1, with the channels in the
+following order: a1, a2, b1, a3, b2, b3 (a1 is the first channel of the
+first input, b1 is the first channel of the second input).
+
+
On the other hand, if both input are in stereo, the output channels will be
+in the default order: a1, a2, b1, b2, and the channel layout will be
+arbitrarily set to 4.0, which may or may not be the expected value.
+
+
All inputs must have the same sample rate, and format.
+
+
If inputs do not have the same duration, the output will stop with the
+shortest.
+
+
+
27.7.1 Examples# TOC
+
+
+ Merge two mono files into a stereo stream:
+
+
amovie=left.wav [l] ; amovie=right.mp3 [r] ; [l] [r] amerge
+
+
+ Multiple merges assuming 1 video stream and 6 audio streams in input.mkv :
+
+
ffmpeg -i input.mkv -filter_complex "[0:1][0:2][0:3][0:4][0:5][0:6] amerge=inputs=6" -c:a pcm_s16le output.mkv
+
+
+
+
+
27.8 amix# TOC
+
+
Mixes multiple audio inputs into a single output.
+
+
Note that this filter only supports float samples (the amerge
+and pan audio filters support many formats). If the amix
+input has integer samples then aresample will be automatically
+inserted to perform the conversion to float samples.
+
+
For example
+
+
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex amix=inputs=3:duration=first:dropout_transition=3 OUTPUT
+
+
will mix 3 input audio streams to a single output with the same duration as the
+first input and a dropout transition time of 3 seconds.
+
+
It accepts the following parameters:
+
+inputs
+The number of inputs. If unspecified, it defaults to 2.
+
+
+duration
+How to determine the end-of-stream.
+
+longest
+The duration of the longest input. (default)
+
+
+shortest
+The duration of the shortest input.
+
+
+first
+The duration of the first input.
+
+
+
+
+
+dropout_transition
+The transition time, in seconds, for volume renormalization when an input
+stream ends. The default value is 2 seconds.
+
+
+
+
+
+
27.9 anull# TOC
+
+
Pass the audio source unchanged to the output.
+
+
+
27.10 apad# TOC
+
+
Pad the end of an audio stream with silence.
+
+
This can be used together with ffmpeg
-shortest to
+extend audio streams to the same length as the video stream.
+
+
A description of the accepted options follows.
+
+
+packet_size
+Set silence packet size. Default value is 4096.
+
+
+pad_len
+Set the number of samples of silence to add to the end. After the
+value is reached, the stream is terminated. This option is mutually
+exclusive with whole_len .
+
+
+whole_len
+Set the minimum total number of samples in the output audio stream. If
+the value is longer than the input audio length, silence is added to
+the end, until the value is reached. This option is mutually exclusive
+with pad_len .
+
+
+
+
If neither the pad_len nor the whole_len option is
+set, the filter will add silence to the end of the input stream
+indefinitely.
+
+
+
27.10.1 Examples# TOC
+
+
+ Add 1024 samples of silence to the end of the input:
+
+
+ Make sure the audio output will contain at least 10000 samples, pad
+the input with silence if required:
+
+
+ Use ffmpeg
to pad the audio input with silence, so that the
+video stream will always result the shortest and will be converted
+until the end in the output file when using the shortest
+option:
+
+
ffmpeg -i VIDEO -i AUDIO -filter_complex "[1:0]apad" -shortest OUTPUT
+
+
+
+
+
27.11 aphaser# TOC
+
Add a phasing effect to the input audio.
+
+
A phaser filter creates series of peaks and troughs in the frequency spectrum.
+The position of the peaks and troughs are modulated so that they vary over time, creating a sweeping effect.
+
+
A description of the accepted parameters follows.
+
+
+in_gain
+Set input gain. Default is 0.4.
+
+
+out_gain
+Set output gain. Default is 0.74
+
+
+delay
+Set delay in milliseconds. Default is 3.0.
+
+
+decay
+Set decay. Default is 0.4.
+
+
+speed
+Set modulation speed in Hz. Default is 0.5.
+
+
+type
+Set modulation type. Default is triangular.
+
+It accepts the following values:
+
+‘triangular, t ’
+‘sinusoidal, s ’
+
+
+
+
+
+
27.12 aresample# TOC
+
+
Resample the input audio to the specified parameters, using the
+libswresample library. If none are specified then the filter will
+automatically convert between its input and output.
+
+
This filter is also able to stretch/squeeze the audio data to make it match
+the timestamps or to inject silence / cut out audio to make it match the
+timestamps, do a combination of both or do neither.
+
+
The filter accepts the syntax
+[sample_rate :]resampler_options , where sample_rate
+expresses a sample rate and resampler_options is a list of
+key =value pairs, separated by ":". See the
+ffmpeg-resampler manual for the complete list of supported options.
+
+
+
27.12.1 Examples# TOC
+
+
+ Resample the input audio to 44100Hz:
+
+
+ Stretch/squeeze samples to the given timestamps, with a maximum of 1000
+samples per second compensation:
+
+
+
+
+
27.13 asetnsamples# TOC
+
+
Set the number of samples per each output audio frame.
+
+
The last output packet may contain a different number of samples, as
+the filter will flush all the remaining samples when the input audio
+signal its end.
+
+
The filter accepts the following options:
+
+
+nb_out_samples, n
+Set the number of frames per each output audio frame. The number is
+intended as the number of samples per each channel .
+Default value is 1024.
+
+
+pad, p
+If set to 1, the filter will pad the last audio frame with zeroes, so
+that the last frame will contain the same number of samples as the
+previous ones. Default value is 1.
+
+
+
+
For example, to set the number of per-frame samples to 1234 and
+disable padding for the last frame, use:
+
+
asetnsamples=n=1234:p=0
+
+
+
+
27.14 asetrate# TOC
+
+
Set the sample rate without altering the PCM data.
+This will result in a change of speed and pitch.
+
+
The filter accepts the following options:
+
+
+sample_rate, r
+Set the output sample rate. Default is 44100 Hz.
+
+
+
+
+
27.15 ashowinfo# TOC
+
+
Show a line containing various information for each input audio frame.
+The input audio is not modified.
+
+
The shown line contains a sequence of key/value pairs of the form
+key :value .
+
+
The following values are shown in the output:
+
+
+n
+The (sequential) number of the input frame, starting from 0.
+
+
+pts
+The presentation timestamp of the input frame, in time base units; the time base
+depends on the filter input pad, and is usually 1/sample_rate .
+
+
+pts_time
+The presentation timestamp of the input frame in seconds.
+
+
+pos
+position of the frame in the input stream, -1 if this information in
+unavailable and/or meaningless (for example in case of synthetic audio)
+
+
+fmt
+The sample format.
+
+
+chlayout
+The channel layout.
+
+
+rate
+The sample rate for the audio frame.
+
+
+nb_samples
+The number of samples (per channel) in the frame.
+
+
+checksum
+The Adler-32 checksum (printed in hexadecimal) of the audio data. For planar
+audio, the data is treated as if all the planes were concatenated.
+
+
+plane_checksums
+A list of Adler-32 checksums for each data plane.
+
+
+
+
+
27.16 astats# TOC
+
+
Display time domain statistical information about the audio channels.
+Statistics are calculated and displayed for each audio channel and,
+where applicable, an overall figure is also given.
+
+
It accepts the following option:
+
+length
+Short window length in seconds, used for peak and trough RMS measurement.
+Default is 0.05
(50 milliseconds). Allowed range is [0.1 - 10]
.
+
+
+
+
A description of each shown parameter follows:
+
+
+DC offset
+Mean amplitude displacement from zero.
+
+
+Min level
+Minimal sample level.
+
+
+Max level
+Maximal sample level.
+
+
+Peak level dB
+RMS level dB
+Standard peak and RMS level measured in dBFS.
+
+
+RMS peak dB
+RMS trough dB
+Peak and trough values for RMS level measured over a short window.
+
+
+Crest factor
+Standard ratio of peak to RMS level (note: not in dB).
+
+
+Flat factor
+Flatness (i.e. consecutive samples with the same value) of the signal at its peak levels
+(i.e. either Min level or Max level ).
+
+
+Peak count
+Number of occasions (not the number of samples) that the signal attained either
+Min level or Max level .
+
+
+
+
+
27.17 astreamsync# TOC
+
+
Forward two audio streams and control the order the buffers are forwarded.
+
+
The filter accepts the following options:
+
+
+expr, e
+Set the expression deciding which stream should be
+forwarded next: if the result is negative, the first stream is forwarded; if
+the result is positive or zero, the second stream is forwarded. It can use
+the following variables:
+
+
+b1 b2
+number of buffers forwarded so far on each stream
+
+s1 s2
+number of samples forwarded so far on each stream
+
+t1 t2
+current timestamp of each stream
+
+
+
+The default value is t1-t2
, which means to always forward the stream
+that has a smaller timestamp.
+
+
+
+
+
27.17.1 Examples# TOC
+
+
Stress-test amerge
by randomly sending buffers on the wrong
+input, while avoiding too much of a desynchronization:
+
+
amovie=file.ogg [a] ; amovie=file.mp3 [b] ;
+[a] [b] astreamsync=(2*random(1))-1+tanh(5*(t1-t2)) [a2] [b2] ;
+[a2] [b2] amerge
+
+
+
+
27.18 asyncts# TOC
+
+
Synchronize audio data with timestamps by squeezing/stretching it and/or
+dropping samples/adding silence when needed.
+
+
This filter is not built by default, please use aresample to do squeezing/stretching.
+
+
It accepts the following parameters:
+
+compensate
+Enable stretching/squeezing the data to make it match the timestamps. Disabled
+by default. When disabled, time gaps are covered with silence.
+
+
+min_delta
+The minimum difference between timestamps and audio data (in seconds) to trigger
+adding/dropping samples. The default value is 0.1. If you get an imperfect
+sync with this filter, try setting this parameter to 0.
+
+
+max_comp
+The maximum compensation in samples per second. Only relevant with compensate=1.
+The default value is 500.
+
+
+first_pts
+Assume that the first PTS should be this value. The time base is 1 / sample
+rate. This allows for padding/trimming at the start of the stream. By default,
+no assumption is made about the first frame’s expected PTS, so no padding or
+trimming is done. For example, this could be set to 0 to pad the beginning with
+silence if an audio stream starts after the video stream or to trim any samples
+with a negative PTS due to encoder delay.
+
+
+
+
+
+
27.19 atempo# TOC
+
+
Adjust audio tempo.
+
+
The filter accepts exactly one parameter, the audio tempo. If not
+specified then the filter will assume nominal 1.0 tempo. Tempo must
+be in the [0.5, 2.0] range.
+
+
+
27.19.1 Examples# TOC
+
+
+ Slow down audio to 80% tempo:
+
+
+ To speed up audio to 125% tempo:
+
+
+
+
+
27.20 atrim# TOC
+
+
Trim the input so that the output contains one continuous subpart of the input.
+
+
It accepts the following parameters:
+
+start
+Timestamp (in seconds) of the start of the section to keep. I.e. the audio
+sample with the timestamp start will be the first sample in the output.
+
+
+end
+Specify time of the first audio sample that will be dropped, i.e. the
+audio sample immediately preceding the one with the timestamp end will be
+the last sample in the output.
+
+
+start_pts
+Same as start , except this option sets the start timestamp in samples
+instead of seconds.
+
+
+end_pts
+Same as end , except this option sets the end timestamp in samples instead
+of seconds.
+
+
+duration
+The maximum duration of the output in seconds.
+
+
+start_sample
+The number of the first sample that should be output.
+
+
+end_sample
+The number of the first sample that should be dropped.
+
+
+
+
start , end , and duration are expressed as time
+duration specifications; see
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual .
+
+
Note that the first two sets of the start/end options and the duration
+option look at the frame timestamp, while the _sample options simply count the
+samples that pass through the filter. So start/end_pts and start/end_sample will
+give different results when the timestamps are wrong, inexact or do not start at
+zero. Also note that this filter does not modify the timestamps. If you wish
+to have the output timestamps start at zero, insert the asetpts filter after the
+atrim filter.
+
+
If multiple start or end options are set, this filter tries to be greedy and
+keep all samples that match at least one of the specified constraints. To keep
+only the part that matches all the constraints at once, chain multiple atrim
+filters.
+
+
The defaults are such that all the input is kept. So it is possible to set e.g.
+just the end values to keep everything before the specified time.
+
+
Examples:
+
+ Drop everything except the second minute of input:
+
+
ffmpeg -i INPUT -af atrim=60:120
+
+
+ Keep only the first 1000 samples:
+
+
ffmpeg -i INPUT -af atrim=end_sample=1000
+
+
+
+
+
+
27.21 bandpass# TOC
+
+
Apply a two-pole Butterworth band-pass filter with central
+frequency frequency , and (3dB-point) band-width width.
+The csg option selects a constant skirt gain (peak gain = Q)
+instead of the default: constant 0dB peak gain.
+The filter roll off at 6dB per octave (20dB per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the filter’s central frequency. Default is 3000
.
+
+
+csg
+Constant skirt gain if set to 1. Defaults to 0.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+
+
+
27.22 bandreject# TOC
+
+
Apply a two-pole Butterworth band-reject filter with central
+frequency frequency , and (3dB-point) band-width width .
+The filter roll off at 6dB per octave (20dB per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the filter’s central frequency. Default is 3000
.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+
+
+
27.23 bass# TOC
+
+
Boost or cut the bass (lower) frequencies of the audio using a two-pole
+shelving filter with a response similar to that of a standard
+hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
+
+
The filter accepts the following options:
+
+
+gain, g
+Give the gain at 0 Hz. Its useful range is about -20
+(for a large cut) to +20 (for a large boost).
+Beware of clipping when using a positive gain.
+
+
+frequency, f
+Set the filter’s central frequency and so can be used
+to extend or reduce the frequency range to be boosted or cut.
+The default value is 100
Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Determine how steep is the filter’s shelf transition.
+
+
+
+
+
27.24 biquad# TOC
+
+
Apply a biquad IIR filter with the given coefficients.
+Where b0 , b1 , b2 and a0 , a1 , a2
+are the numerator and denominator coefficients respectively.
+
+
+
27.25 bs2b# TOC
+
Bauer stereo to binaural transformation, which improves headphone listening of
+stereo audio records.
+
+
It accepts the following parameters:
+
+profile
+Pre-defined crossfeed level.
+
+default
+Default level (fcut=700, feed=50).
+
+
+cmoy
+Chu Moy circuit (fcut=700, feed=60).
+
+
+jmeier
+Jan Meier circuit (fcut=650, feed=95).
+
+
+
+
+
+fcut
+Cut frequency (in Hz).
+
+
+feed
+Feed level (in Hz).
+
+
+
+
+
+
27.26 channelmap# TOC
+
+
Remap input channels to new locations.
+
+
It accepts the following parameters:
+
+channel_layout
+The channel layout of the output stream.
+
+
+map
+Map channels from input to output. The argument is a ’|’-separated list of
+mappings, each in the in_channel -out_channel
or
+in_channel form. in_channel can be either the name of the input
+channel (e.g. FL for front left) or its index in the input channel layout.
+out_channel is the name of the output channel or its index in the output
+channel layout. If out_channel is not given then it is implicitly an
+index, starting with zero and increasing by one for each mapping.
+
+
+
+
If no mapping is present, the filter will implicitly map input channels to
+output channels, preserving indices.
+
+
For example, assuming a 5.1+downmix input MOV file,
+
+
ffmpeg -i in.mov -filter 'channelmap=map=DL-FL|DR-FR' out.wav
+
+
will create an output WAV file tagged as stereo from the downmix channels of
+the input.
+
+
To fix a 5.1 WAV improperly encoded in AAC’s native channel order
+
+
ffmpeg -i in.wav -filter 'channelmap=1|2|0|5|3|4:channel_layout=5.1' out.wav
+
+
+
+
27.27 channelsplit# TOC
+
+
Split each channel from an input audio stream into a separate output stream.
+
+
It accepts the following parameters:
+
+channel_layout
+The channel layout of the input stream. The default is "stereo".
+
+
+
+
For example, assuming a stereo input MP3 file,
+
+
ffmpeg -i in.mp3 -filter_complex channelsplit out.mkv
+
+
will create an output Matroska file with two audio streams, one containing only
+the left channel and the other the right channel.
+
+
Split a 5.1 WAV file into per-channel files:
+
+
ffmpeg -i in.wav -filter_complex
+'channelsplit=channel_layout=5.1[FL][FR][FC][LFE][SL][SR]'
+-map '[FL]' front_left.wav -map '[FR]' front_right.wav -map '[FC]'
+front_center.wav -map '[LFE]' lfe.wav -map '[SL]' side_left.wav -map '[SR]'
+side_right.wav
+
+
+
+
27.28 compand# TOC
+
Compress or expand the audio’s dynamic range.
+
+
It accepts the following parameters:
+
+
+attacks
+decays
+A list of times in seconds for each channel over which the instantaneous level
+of the input signal is averaged to determine its volume. attacks refers to
+increase of volume and decays refers to decrease of volume. For most
+situations, the attack time (response to the audio getting louder) should be
+shorter than the decay time, because the human ear is more sensitive to sudden
+loud audio than sudden soft audio. A typical value for attack is 0.3 seconds and
+a typical value for decay is 0.8 seconds.
+
+
+points
+A list of points for the transfer function, specified in dB relative to the
+maximum possible signal amplitude. Each key points list must be defined using
+the following syntax: x0/y0|x1/y1|x2/y2|....
or
+x0/y0 x1/y1 x2/y2 ....
+
+The input values must be in strictly increasing order but the transfer function
+does not have to be monotonically rising. The point 0/0
is assumed but
+may be overridden (by 0/out-dBn
). Typical values for the transfer
+function are -70/-70|-60/-20
.
+
+
+soft-knee
+Set the curve radius in dB for all joints. It defaults to 0.01.
+
+
+gain
+Set the additional gain in dB to be applied at all points on the transfer
+function. This allows for easy adjustment of the overall gain.
+It defaults to 0.
+
+
+volume
+Set an initial volume, in dB, to be assumed for each channel when filtering
+starts. This permits the user to supply a nominal level initially, so that, for
+example, a very large gain is not applied to initial signal levels before the
+companding has begun to operate. A typical value for audio which is initially
+quiet is -90 dB. It defaults to 0.
+
+
+delay
+Set a delay, in seconds. The input audio is analyzed immediately, but audio is
+delayed before being fed to the volume adjuster. Specifying a delay
+approximately equal to the attack/decay times allows the filter to effectively
+operate in predictive rather than reactive mode. It defaults to 0.
+
+
+
+
+
+
27.28.1 Examples# TOC
+
+
+ Make music with both quiet and loud passages suitable for listening to in a
+noisy environment:
+
+
compand=.3|.3:1|1:-90/-60|-60/-40|-40/-30|-20/-20:6:0:-90:0.2
+
+
+ A noise gate for when the noise is at a lower level than the signal:
+
+
compand=.1|.1:.2|.2:-900/-900|-50.1/-900|-50/-50:.01:0:-90:.1
+
+
+ Here is another noise gate, this time for when the noise is at a higher level
+than the signal (making it, in some ways, similar to squelch):
+
+
compand=.1|.1:.1|.1:-45.1/-45.1|-45/-900|0/-900:.01:45:-90:.1
+
+
+
+
+
27.29 earwax# TOC
+
+
Make audio easier to listen to on headphones.
+
+
This filter adds ‘cues’ to 44.1kHz stereo (i.e. audio CD format) audio
+so that when listened to on headphones the stereo image is moved from
+inside your head (standard for headphones) to outside and in front of
+the listener (standard for speakers).
+
+
Ported from SoX.
+
+
+
27.30 equalizer# TOC
+
+
Apply a two-pole peaking equalisation (EQ) filter. With this
+filter, the signal-level at and around a selected frequency can
+be increased or decreased, whilst (unlike bandpass and bandreject
+filters) that at all other frequencies is unchanged.
+
+
In order to produce complex equalisation curves, this filter can
+be given several times, each with a different central frequency.
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the filter’s central frequency in Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+
+
+gain, g
+Set the required gain or attenuation in dB.
+Beware of clipping when using a positive gain.
+
+
+
+
+
27.30.1 Examples# TOC
+
+ Attenuate 10 dB at 1000 Hz, with a bandwidth of 200 Hz:
+
+
equalizer=f=1000:width_type=h:width=200:g=-10
+
+
+ Apply 2 dB gain at 1000 Hz with Q 1 and attenuate 5 dB at 100 Hz with Q 2:
+
+
equalizer=f=1000:width_type=q:width=1:g=2,equalizer=f=100:width_type=q:width=2:g=-5
+
+
+
+
+
27.31 flanger# TOC
+
Apply a flanging effect to the audio.
+
+
The filter accepts the following options:
+
+
+delay
+Set base delay in milliseconds. Range from 0 to 30. Default value is 0.
+
+
+depth
+Set added swep delay in milliseconds. Range from 0 to 10. Default value is 2.
+
+
+regen
+Set percentage regeneration (delayed signal feedback). Range from -95 to 95.
+Default value is 0.
+
+
+width
+Set percentage of delayed signal mixed with original. Range from 0 to 100.
+Default value is 71.
+
+
+speed
+Set sweeps per second (Hz). Range from 0.1 to 10. Default value is 0.5.
+
+
+shape
+Set swept wave shape, can be triangular or sinusoidal .
+Default value is sinusoidal .
+
+
+phase
+Set swept wave percentage-shift for multi channel. Range from 0 to 100.
+Default value is 25.
+
+
+interp
+Set delay-line interpolation, linear or quadratic .
+Default is linear .
+
+
+
+
+
27.32 highpass# TOC
+
+
Apply a high-pass filter with 3dB point frequency.
+The filter can be either single-pole, or double-pole (the default).
+The filter roll off at 6dB per pole per octave (20dB per pole per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set frequency in Hz. Default is 3000.
+
+
+poles, p
+Set number of poles. Default is 2.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+Applies only to double-pole filter.
+The default is 0.707q and gives a Butterworth response.
+
+
+
+
+
27.33 join# TOC
+
+
Join multiple input streams into one multi-channel stream.
+
+
It accepts the following parameters:
+
+inputs
+The number of input streams. It defaults to 2.
+
+
+channel_layout
+The desired output channel layout. It defaults to stereo.
+
+
+map
+Map channels from inputs to output. The argument is a ’|’-separated list of
+mappings, each in the input_idx .in_channel -out_channel
+form. input_idx is the 0-based index of the input stream. in_channel
+can be either the name of the input channel (e.g. FL for front left) or its
+index in the specified input stream. out_channel is the name of the output
+channel.
+
+
+
+
The filter will attempt to guess the mappings when they are not specified
+explicitly. It does so by first trying to find an unused matching input channel
+and if that fails it picks the first unused input channel.
+
+
Join 3 inputs (with properly set channel layouts):
+
+
ffmpeg -i INPUT1 -i INPUT2 -i INPUT3 -filter_complex join=inputs=3 OUTPUT
+
+
+
Build a 5.1 output from 6 single-channel streams:
+
+
ffmpeg -i fl -i fr -i fc -i sl -i sr -i lfe -filter_complex
+'join=inputs=6:channel_layout=5.1:map=0.0-FL|1.0-FR|2.0-FC|3.0-SL|4.0-SR|5.0-LFE'
+out
+
+
+
+
27.34 ladspa# TOC
+
+
Load a LADSPA (Linux Audio Developer’s Simple Plugin API) plugin.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-ladspa
.
+
+
+file, f
+Specifies the name of LADSPA plugin library to load. If the environment
+variable LADSPA_PATH
is defined, the LADSPA plugin is searched in
+each one of the directories specified by the colon separated list in
+LADSPA_PATH
, otherwise in the standard LADSPA paths, which are in
+this order: HOME/.ladspa/lib/ , /usr/local/lib/ladspa/ ,
+/usr/lib/ladspa/ .
+
+
+plugin, p
+Specifies the plugin within the library. Some libraries contain only
+one plugin, but others contain many of them. If this is not set filter
+will list all available plugins within the specified library.
+
+
+controls, c
+Set the ’|’ separated list of controls which are zero or more floating point
+values that determine the behavior of the loaded plugin (for example delay,
+threshold or gain).
+Controls need to be defined using the following syntax:
+c0=value0 |c1=value1 |c2=value2 |..., where
+valuei is the value set on the i -th control.
+If controls is set to help
, all available controls and
+their valid ranges are printed.
+
+
+sample_rate, s
+Specify the sample rate, default to 44100. Only used if plugin have
+zero inputs.
+
+
+nb_samples, n
+Set the number of samples per channel per each output frame, default
+is 1024. Only used if plugin have zero inputs.
+
+
+duration, d
+Set the minimum duration of the sourced audio. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+Note that the resulting duration may be greater than the specified duration,
+as the generated audio is always cut at the end of a complete frame.
+If not specified, or the expressed duration is negative, the audio is
+supposed to be generated forever.
+Only used if plugin have zero inputs.
+
+
+
+
+
+
27.34.1 Examples# TOC
+
+
+ List all available plugins within amp (LADSPA example plugin) library:
+
+
+ List all available controls and their valid ranges for vcf_notch
+plugin from VCF
library:
+
+
ladspa=f=vcf:p=vcf_notch:c=help
+
+
+ Simulate low quality audio equipment using Computer Music Toolkit
(CMT)
+plugin library:
+
+
ladspa=file=cmt:plugin=lofi:controls=c0=22|c1=12|c2=12
+
+
+ Add reverberation to the audio using TAP-plugins
+(Tom’s Audio Processing plugins):
+
+
ladspa=file=tap_reverb:tap_reverb
+
+
+ Generate white noise, with 0.2 amplitude:
+
+
ladspa=file=cmt:noise_source_white:c=c0=.2
+
+
+ Generate 20 bpm clicks using plugin C* Click - Metronome
from the
+C* Audio Plugin Suite
(CAPS) library:
+
+
ladspa=file=caps:Click:c=c1=20'
+
+
+ Apply C* Eq10X2 - Stereo 10-band equaliser
effect:
+
+
ladspa=caps:Eq10X2:c=c0=-48|c9=-24|c3=12|c4=2
+
+
+
+
+
27.34.2 Commands# TOC
+
+
This filter supports the following commands:
+
+cN
+Modify the N -th control value.
+
+If the specified value is not valid, it is ignored and prior one is kept.
+
+
+
+
+
27.35 lowpass# TOC
+
+
Apply a low-pass filter with 3dB point frequency.
+The filter can be either single-pole or double-pole (the default).
+The filter roll off at 6dB per pole per octave (20dB per pole per decade).
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set frequency in Hz. Default is 500.
+
+
+poles, p
+Set number of poles. Default is 2.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Specify the band-width of a filter in width_type units.
+Applies only to double-pole filter.
+The default is 0.707q and gives a Butterworth response.
+
+
+
+
+
27.36 pan# TOC
+
+
Mix channels with specific gain levels. The filter accepts the output
+channel layout followed by a set of channels definitions.
+
+
This filter is also designed to efficiently remap the channels of an audio
+stream.
+
+
The filter accepts parameters of the form:
+"l |outdef |outdef |..."
+
+
+l
+output channel layout or number of channels
+
+
+outdef
+output channel specification, of the form:
+"out_name =[gain *]in_name [+[gain *]in_name ...]"
+
+
+out_name
+output channel to define, either a channel name (FL, FR, etc.) or a channel
+number (c0, c1, etc.)
+
+
+gain
+multiplicative coefficient for the channel, 1 leaving the volume unchanged
+
+
+in_name
+input channel to use, see out_name for details; it is not possible to mix
+named and numbered input channels
+
+
+
+
If the ‘=’ in a channel specification is replaced by ‘<’, then the gains for
+that specification will be renormalized so that the total is 1, thus
+avoiding clipping noise.
+
+
+
27.36.1 Mixing examples# TOC
+
+
For example, if you want to down-mix from stereo to mono, but with a bigger
+factor for the left channel:
+
+
pan=1c|c0=0.9*c0+0.1*c1
+
+
+
A customized down-mix to stereo that works automatically for 3-, 4-, 5- and
+7-channels surround:
+
+
pan=stereo| FL < FL + 0.5*FC + 0.6*BL + 0.6*SL | FR < FR + 0.5*FC + 0.6*BR + 0.6*SR
+
+
+
Note that ffmpeg
integrates a default down-mix (and up-mix) system
+that should be preferred (see "-ac" option) unless you have very specific
+needs.
+
+
+
27.36.2 Remapping examples# TOC
+
+
The channel remapping will be effective if, and only if:
+
+
+ gain coefficients are zeroes or ones,
+ only one input per channel output,
+
+
+
If all these conditions are satisfied, the filter will notify the user ("Pure
+channel mapping detected"), and use an optimized and lossless method to do the
+remapping.
+
+
For example, if you have a 5.1 source and want a stereo audio stream by
+dropping the extra channels:
+
+
pan="stereo| c0=FL | c1=FR"
+
+
+
Given the same source, you can also switch front left and front right channels
+and keep the input channel layout:
+
+
pan="5.1| c0=c1 | c1=c0 | c2=c2 | c3=c3 | c4=c4 | c5=c5"
+
+
+
If the input is a stereo audio stream, you can mute the front left channel (and
+still keep the stereo channel layout) with:
+
+
+
Still with a stereo audio stream input, you can copy the right channel in both
+front left and right:
+
+
pan="stereo| c0=FR | c1=FR"
+
+
+
+
27.37 replaygain# TOC
+
+
ReplayGain scanner filter. This filter takes an audio stream as an input and
+outputs it unchanged.
+At end of filtering it displays track_gain
and track_peak
.
+
+
+
27.38 resample# TOC
+
+
Convert the audio sample format, sample rate and channel layout. It is
+not meant to be used directly.
+
+
+
27.39 silencedetect# TOC
+
+
Detect silence in an audio stream.
+
+
This filter logs a message when it detects that the input audio volume is less
+or equal to a noise tolerance value for a duration greater or equal to the
+minimum detected noise duration.
+
+
The printed times and duration are expressed in seconds.
+
+
The filter accepts the following options:
+
+
+duration, d
+Set silence duration until notification (default is 2 seconds).
+
+
+noise, n
+Set noise tolerance. Can be specified in dB (in case "dB" is appended to the
+specified value) or amplitude ratio. Default is -60dB, or 0.001.
+
+
+
+
+
27.39.1 Examples# TOC
+
+
+ Detect 5 seconds of silence with -50dB noise tolerance:
+
+
silencedetect=n=-50dB:d=5
+
+
+ Complete example with ffmpeg
to detect silence with 0.0001 noise
+tolerance in silence.mp3 :
+
+
ffmpeg -i silence.mp3 -af silencedetect=noise=0.0001 -f null -
+
+
+
+
+
27.40 silenceremove# TOC
+
+
Remove silence from the beginning, middle or end of the audio.
+
+
The filter accepts the following options:
+
+
+start_periods
+This value is used to indicate if audio should be trimmed at beginning of
+the audio. A value of zero indicates no silence should be trimmed from the
+beginning. When specifying a non-zero value, it trims audio up until it
+finds non-silence. Normally, when trimming silence from beginning of audio
+the start_periods will be 1
but it can be increased to higher
+values to trim all audio up to specific count of non-silence periods.
+Default value is 0
.
+
+
+start_duration
+Specify the amount of time that non-silence must be detected before it stops
+trimming audio. By increasing the duration, bursts of noises can be treated
+as silence and trimmed off. Default value is 0
.
+
+
+start_threshold
+This indicates what sample value should be treated as silence. For digital
+audio, a value of 0
may be fine but for audio recorded from analog,
+you may wish to increase the value to account for background noise.
+Can be specified in dB (in case "dB" is appended to the specified value)
+or amplitude ratio. Default value is 0
.
+
+
+stop_periods
+Set the count for trimming silence from the end of audio.
+To remove silence from the middle of a file, specify a stop_periods
+that is negative. This value is then treated as a positive value and is
+used to indicate the effect should restart processing as specified by
+start_periods , making it suitable for removing periods of silence
+in the middle of the audio.
+Default value is 0
.
+
+
+stop_duration
+Specify a duration of silence that must exist before audio is not copied any
+more. By specifying a higher duration, silence that is wanted can be left in
+the audio.
+Default value is 0
.
+
+
+stop_threshold
+This is the same as start_threshold but for trimming silence from
+the end of audio.
+Can be specified in dB (in case "dB" is appended to the specified value)
+or amplitude ratio. Default value is 0
.
+
+
+leave_silence
+This indicate that stop_duration length of audio should be left intact
+at the beginning of each period of silence.
+For example, if you want to remove long pauses between words but do not want
+to remove the pauses completely. Default value is 0
.
+
+
+
+
+
+
27.40.1 Examples# TOC
+
+
+ The following example shows how this filter can be used to start a recording
+that does not contain the delay at the start which usually occurs between
+pressing the record button and the start of the performance:
+
+
silenceremove=1:5:0.02
+
+
+
+
+
27.41 treble# TOC
+
+
Boost or cut treble (upper) frequencies of the audio using a two-pole
+shelving filter with a response similar to that of a standard
+hi-fi’s tone-controls. This is also known as shelving equalisation (EQ).
+
+
The filter accepts the following options:
+
+
+gain, g
+Give the gain at whichever is the lower of ~22 kHz and the
+Nyquist frequency. Its useful range is about -20 (for a large cut)
+to +20 (for a large boost). Beware of clipping when using a positive gain.
+
+
+frequency, f
+Set the filter’s central frequency and so can be used
+to extend or reduce the frequency range to be boosted or cut.
+The default value is 3000
Hz.
+
+
+width_type
+Set method to specify band-width of filter.
+
+h
+Hz
+
+q
+Q-Factor
+
+o
+octave
+
+s
+slope
+
+
+
+
+width, w
+Determine how steep is the filter’s shelf transition.
+
+
+
+
+
27.42 volume# TOC
+
+
Adjust the input audio volume.
+
+
It accepts the following parameters:
+
+volume
+Set audio volume expression.
+
+Output values are clipped to the maximum value.
+
+The output audio volume is given by the relation:
+
+
output_volume = volume * input_volume
+
+
+The default value for volume is "1.0".
+
+
+precision
+This parameter represents the mathematical precision.
+
+It determines which input sample formats will be allowed, which affects the
+precision of the volume scaling.
+
+
+fixed
+8-bit fixed-point; this limits input sample format to U8, S16, and S32.
+
+float
+32-bit floating-point; this limits input sample format to FLT. (default)
+
+double
+64-bit floating-point; this limits input sample format to DBL.
+
+
+
+
+replaygain
+Choose the behaviour on encountering ReplayGain side data in input frames.
+
+
+drop
+Remove ReplayGain side data, ignoring its contents (the default).
+
+
+ignore
+Ignore ReplayGain side data, but leave it in the frame.
+
+
+track
+Prefer the track gain, if present.
+
+
+album
+Prefer the album gain, if present.
+
+
+
+
+replaygain_preamp
+Pre-amplification gain in dB to apply to the selected replaygain gain.
+
+Default value for replaygain_preamp is 0.0.
+
+
+eval
+Set when the volume expression is evaluated.
+
+It accepts the following values:
+
+‘once ’
+only evaluate expression once during the filter initialization, or
+when the ‘volume ’ command is sent
+
+
+‘frame ’
+evaluate expression for each incoming frame
+
+
+
+Default value is ‘once ’.
+
+
+
+
The volume expression can contain the following parameters.
+
+
+n
+frame number (starting at zero)
+
+nb_channels
+number of channels
+
+nb_consumed_samples
+number of samples consumed by the filter
+
+nb_samples
+number of samples in the current frame
+
+pos
+original frame position in the file
+
+pts
+frame PTS
+
+sample_rate
+sample rate
+
+startpts
+PTS at start of stream
+
+startt
+time at start of stream
+
+t
+frame time
+
+tb
+timestamp timebase
+
+volume
+last set volume value
+
+
+
+
Note that when eval is set to ‘once ’ only the
+sample_rate and tb variables are available, all other
+variables will evaluate to NAN.
+
+
+
27.42.1 Commands# TOC
+
+
This filter supports the following commands:
+
+volume
+Modify the volume expression.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+replaygain_noclip
+Prevent clipping by limiting the gain applied.
+
+Default value for replaygain_noclip is 1.
+
+
+
+
+
+
27.42.2 Examples# TOC
+
+
+
+
+
27.43 volumedetect# TOC
+
+
Detect the volume of the input video.
+
+
The filter has no parameters. The input is not modified. Statistics about
+the volume will be printed in the log when the input stream end is reached.
+
+
In particular it will show the mean volume (root mean square), maximum
+volume (on a per-sample basis), and the beginning of a histogram of the
+registered volume values (from the maximum value to a cumulated 1/1000 of
+the samples).
+
+
All volumes are in decibels relative to the maximum PCM value.
+
+
+
27.43.1 Examples# TOC
+
+
Here is an excerpt of the output:
+
+
[Parsed_volumedetect_0 0xa23120] mean_volume: -27 dB
+[Parsed_volumedetect_0 0xa23120] max_volume: -4 dB
+[Parsed_volumedetect_0 0xa23120] histogram_4db: 6
+[Parsed_volumedetect_0 0xa23120] histogram_5db: 62
+[Parsed_volumedetect_0 0xa23120] histogram_6db: 286
+[Parsed_volumedetect_0 0xa23120] histogram_7db: 1042
+[Parsed_volumedetect_0 0xa23120] histogram_8db: 2551
+[Parsed_volumedetect_0 0xa23120] histogram_9db: 4609
+[Parsed_volumedetect_0 0xa23120] histogram_10db: 8409
+
+
+
It means that:
+
+ The mean square energy is approximately -27 dB, or 10^-2.7.
+ The largest sample is at -4 dB, or more precisely between -4 dB and -5 dB.
+ There are 6 samples at -4 dB, 62 at -5 dB, 286 at -6 dB, etc.
+
+
+
In other words, raising the volume by +4 dB does not cause any clipping,
+raising it by +5 dB causes clipping for 6 samples, etc.
+
+
+
+
28 Audio Sources# TOC
+
+
Below is a description of the currently available audio sources.
+
+
+
28.1 abuffer# TOC
+
+
Buffer audio frames, and make them available to the filter chain.
+
+
This source is mainly intended for a programmatic use, in particular
+through the interface defined in libavfilter/asrc_abuffer.h .
+
+
It accepts the following parameters:
+
+time_base
+The timebase which will be used for timestamps of submitted frames. It must be
+either a floating-point number or in numerator /denominator form.
+
+
+sample_rate
+The sample rate of the incoming audio buffers.
+
+
+sample_fmt
+The sample format of the incoming audio buffers.
+Either a sample format name or its corresponding integer representation from
+the enum AVSampleFormat in libavutil/samplefmt.h
+
+
+channel_layout
+The channel layout of the incoming audio buffers.
+Either a channel layout name from channel_layout_map in
+libavutil/channel_layout.c or its corresponding integer representation
+from the AV_CH_LAYOUT_* macros in libavutil/channel_layout.h
+
+
+channels
+The number of channels of the incoming audio buffers.
+If both channels and channel_layout are specified, then they
+must be consistent.
+
+
+
+
+
+
28.1.1 Examples# TOC
+
+
+
abuffer=sample_rate=44100:sample_fmt=s16p:channel_layout=stereo
+
+
+
will instruct the source to accept planar 16bit signed stereo at 44100Hz.
+Since the sample format with name "s16p" corresponds to the number
+6 and the "stereo" channel layout corresponds to the value 0x3, this is
+equivalent to:
+
+
abuffer=sample_rate=44100:sample_fmt=6:channel_layout=0x3
+
+
+
+
28.2 aevalsrc# TOC
+
+
Generate an audio signal specified by an expression.
+
+
This source accepts in input one or more expressions (one for each
+channel), which are evaluated and used to generate a corresponding
+audio signal.
+
+
This source accepts the following options:
+
+
+exprs
+Set the ’|’-separated expressions list for each separate channel. In case the
+channel_layout option is not specified, the selected channel layout
+depends on the number of provided expressions. Otherwise the last
+specified expression is applied to the remaining output channels.
+
+
+channel_layout, c
+Set the channel layout. The number of channels in the specified layout
+must be equal to the number of specified expressions.
+
+
+duration, d
+Set the minimum duration of the sourced audio. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+Note that the resulting duration may be greater than the specified
+duration, as the generated audio is always cut at the end of a
+complete frame.
+
+If not specified, or the expressed duration is negative, the audio is
+supposed to be generated forever.
+
+
+nb_samples, n
+Set the number of samples per channel per each output frame,
+default to 1024.
+
+
+sample_rate, s
+Specify the sample rate, default to 44100.
+
+
+
+
Each expression in exprs can contain the following constants:
+
+
+n
+number of the evaluated sample, starting from 0
+
+
+t
+time of the evaluated sample expressed in seconds, starting from 0
+
+
+s
+sample rate
+
+
+
+
+
+
28.2.1 Examples# TOC
+
+
+ Generate silence:
+
+
+ Generate a sin signal with frequency of 440 Hz, set sample rate to
+8000 Hz:
+
+
aevalsrc="sin(440*2*PI*t):s=8000"
+
+
+ Generate a two channels signal, specify the channel layout (Front
+Center + Back Center) explicitly:
+
+
aevalsrc="sin(420*2*PI*t)|cos(430*2*PI*t):c=FC|BC"
+
+
+ Generate white noise:
+
+
aevalsrc="-2+random(0)"
+
+
+ Generate an amplitude modulated signal:
+
+
aevalsrc="sin(10*2*PI*t)*sin(880*2*PI*t)"
+
+
+ Generate 2.5 Hz binaural beats on a 360 Hz carrier:
+
+
aevalsrc="0.1*sin(2*PI*(360-2.5/2)*t) | 0.1*sin(2*PI*(360+2.5/2)*t)"
+
+
+
+
+
+
28.3 anullsrc# TOC
+
+
The null audio source, return unprocessed audio frames. It is mainly useful
+as a template and to be employed in analysis / debugging tools, or as
+the source for filters which ignore the input data (for example the sox
+synth filter).
+
+
This source accepts the following options:
+
+
+channel_layout, cl
+
+Specifies the channel layout, and can be either an integer or a string
+representing a channel layout. The default value of channel_layout
+is "stereo".
+
+Check the channel_layout_map definition in
+libavutil/channel_layout.c for the mapping between strings and
+channel layout values.
+
+
+sample_rate, r
+Specifies the sample rate, and defaults to 44100.
+
+
+nb_samples, n
+Set the number of samples per requested frames.
+
+
+
+
+
+
28.3.1 Examples# TOC
+
+
+ Set the sample rate to 48000 Hz and the channel layout to AV_CH_LAYOUT_MONO.
+
+
+ Do the same operation with a more obvious syntax:
+
+
anullsrc=r=48000:cl=mono
+
+
+
+
All the parameters need to be explicitly defined.
+
+
+
28.4 flite# TOC
+
+
Synthesize a voice utterance using the libflite library.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libflite
.
+
+
Note that the flite library is not thread-safe.
+
+
The filter accepts the following options:
+
+
+list_voices
+If set to 1, list the names of the available voices and exit
+immediately. Default value is 0.
+
+
+nb_samples, n
+Set the maximum number of samples per frame. Default value is 512.
+
+
+textfile
+Set the filename containing the text to speak.
+
+
+text
+Set the text to speak.
+
+
+voice, v
+Set the voice to use for the speech synthesis. Default value is
+kal
. See also the list_voices option.
+
+
+
+
+
28.4.1 Examples# TOC
+
+
+ Read from file speech.txt , and synthesize the text using the
+standard flite voice:
+
+
flite=textfile=speech.txt
+
+
+ Read the specified text selecting the slt
voice:
+
+
flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
+
+
+ Input text to ffmpeg:
+
+
ffmpeg -f lavfi -i flite=text='So fare thee well, poor devil of a Sub-Sub, whose commentator I am':voice=slt
+
+
+ Make ffplay speak the specified text, using flite
and
+the lavfi
device:
+
+
ffplay -f lavfi flite=text='No more be grieved for which that thou hast done.'
+
+
+
+
For more information about libflite, check:
+http://www.speech.cs.cmu.edu/flite/
+
+
+
28.5 sine# TOC
+
+
Generate an audio signal made of a sine wave with amplitude 1/8.
+
+
The audio signal is bit-exact.
+
+
The filter accepts the following options:
+
+
+frequency, f
+Set the carrier frequency. Default is 440 Hz.
+
+
+beep_factor, b
+Enable a periodic beep every second with frequency beep_factor times
+the carrier frequency. Default is 0, meaning the beep is disabled.
+
+
+sample_rate, r
+Specify the sample rate, default is 44100.
+
+
+duration, d
+Specify the duration of the generated audio stream.
+
+
+samples_per_frame
+Set the number of samples per output frame, default is 1024.
+
+
+
+
+
28.5.1 Examples# TOC
+
+
+ Generate a simple 440 Hz sine wave:
+
+
+ Generate a 220 Hz sine wave with a 880 Hz beep each second, for 5 seconds:
+
+
sine=220:4:d=5
+sine=f=220:b=4:d=5
+sine=frequency=220:beep_factor=4:duration=5
+
+
+
+
+
+
+
29 Audio Sinks# TOC
+
+
Below is a description of the currently available audio sinks.
+
+
+
29.1 abuffersink# TOC
+
+
Buffer audio frames, and make them available to the end of filter chain.
+
+
This sink is mainly intended for programmatic use, in particular
+through the interface defined in libavfilter/buffersink.h
+or the options system.
+
+
It accepts a pointer to an AVABufferSinkContext structure, which
+defines the incoming buffers’ formats, to be passed as the opaque
+parameter to avfilter_init_filter
for initialization.
+
+
29.2 anullsink# TOC
+
+
Null audio sink; do absolutely nothing with the input audio. It is
+mainly useful as a template and for use in analysis / debugging
+tools.
+
+
+
+
30 Video Filters# TOC
+
+
When you configure your FFmpeg build, you can disable any of the
+existing filters using --disable-filters
.
+The configure output will show the video filters included in your
+build.
+
+
Below is a description of the currently available video filters.
+
+
+
30.1 alphaextract# TOC
+
+
Extract the alpha component from the input as a grayscale video. This
+is especially useful with the alphamerge filter.
+
+
+
30.2 alphamerge# TOC
+
+
Add or replace the alpha component of the primary input with the
+grayscale value of a second input. This is intended for use with
+alphaextract to allow the transmission or storage of frame
+sequences that have alpha in a format that doesn’t support an alpha
+channel.
+
+
For example, to reconstruct full frames from a normal YUV-encoded video
+and a separate video created with alphaextract , you might use:
+
+
movie=in_alpha.mkv [alpha]; [in][alpha] alphamerge [out]
+
+
+
Since this filter is designed for reconstruction, it operates on frame
+sequences without considering timestamps, and terminates when either
+input reaches end of stream. This will cause problems if your encoding
+pipeline drops frames. If you’re trying to apply an image as an
+overlay to a video stream, consider the overlay filter instead.
+
+
+
30.3 ass# TOC
+
+
Same as the subtitles filter, except that it doesn’t require libavcodec
+and libavformat to work. On the other hand, it is limited to ASS (Advanced
+Substation Alpha) subtitles files.
+
+
This filter accepts the following option in addition to the common options from
+the subtitles filter:
+
+
+shaping
+Set the shaping engine
+
+Available values are:
+
+‘auto ’
+The default libass shaping engine, which is the best available.
+
+‘simple ’
+Fast, font-agnostic shaper that can do only substitutions
+
+‘complex ’
+Slower shaper using OpenType for substitutions and positioning
+
+
+
+The default is auto
.
+
+
+
+
+
30.4 bbox# TOC
+
+
Compute the bounding box for the non-black pixels in the input frame
+luminance plane.
+
+
This filter computes the bounding box containing all the pixels with a
+luminance value greater than the minimum allowed value.
+The parameters describing the bounding box are printed on the filter
+log.
+
+
The filter accepts the following option:
+
+
+min_val
+Set the minimal luminance value. Default is 16
.
+
+
+
+
+
30.5 blackdetect# TOC
+
+
Detect video intervals that are (almost) completely black. Can be
+useful to detect chapter transitions, commercials, or invalid
+recordings. Output lines contains the time for the start, end and
+duration of the detected black interval expressed in seconds.
+
+
In order to display the output lines, you need to set the loglevel at
+least to the AV_LOG_INFO value.
+
+
The filter accepts the following options:
+
+
+black_min_duration, d
+Set the minimum detected black duration expressed in seconds. It must
+be a non-negative floating point number.
+
+Default value is 2.0.
+
+
+picture_black_ratio_th, pic_th
+Set the threshold for considering a picture "black".
+Express the minimum value for the ratio:
+
+
nb_black_pixels / nb_pixels
+
+
+for which a picture is considered black.
+Default value is 0.98.
+
+
+pixel_black_th, pix_th
+Set the threshold for considering a pixel "black".
+
+The threshold expresses the maximum pixel luminance value for which a
+pixel is considered "black". The provided value is scaled according to
+the following equation:
+
+
absolute_threshold = luminance_minimum_value + pixel_black_th * luminance_range_size
+
+
+luminance_range_size and luminance_minimum_value depend on
+the input video format, the range is [0-255] for YUV full-range
+formats and [16-235] for YUV non full-range formats.
+
+Default value is 0.10.
+
+
+
+
The following example sets the maximum pixel threshold to the minimum
+value, and detects only black intervals of 2 or more seconds:
+
+
blackdetect=d=2:pix_th=0.00
+
+
+
+
30.6 blackframe# TOC
+
+
Detect frames that are (almost) completely black. Can be useful to
+detect chapter transitions or commercials. Output lines consist of
+the frame number of the detected frame, the percentage of blackness,
+the position in the file if known or -1 and the timestamp in seconds.
+
+
In order to display the output lines, you need to set the loglevel at
+least to the AV_LOG_INFO value.
+
+
It accepts the following parameters:
+
+
+amount
+The percentage of the pixels that have to be below the threshold; it defaults to
+98
.
+
+
+threshold, thresh
+The threshold below which a pixel value is considered black; it defaults to
+32
.
+
+
+
+
+
+
30.7 blend, tblend# TOC
+
+
Blend two video frames into each other.
+
+
The blend
filter takes two input streams and outputs one
+stream, the first input is the "top" layer and second input is
+"bottom" layer. Output terminates when shortest input terminates.
+
+
The tblend
(time blend) filter takes two consecutive frames
+from one single stream, and outputs the result obtained by blending
+the new frame on top of the old frame.
+
+
A description of the accepted options follows.
+
+
+c0_mode
+c1_mode
+c2_mode
+c3_mode
+all_mode
+Set blend mode for specific pixel component or all pixel components in case
+of all_mode . Default value is normal
.
+
+Available values for component modes are:
+
+‘addition ’
+‘and ’
+‘average ’
+‘burn ’
+‘darken ’
+‘difference ’
+‘difference128 ’
+‘divide ’
+‘dodge ’
+‘exclusion ’
+‘hardlight ’
+‘lighten ’
+‘multiply ’
+‘negation ’
+‘normal ’
+‘or ’
+‘overlay ’
+‘phoenix ’
+‘pinlight ’
+‘reflect ’
+‘screen ’
+‘softlight ’
+‘subtract ’
+‘vividlight ’
+‘xor ’
+
+
+
+c0_opacity
+c1_opacity
+c2_opacity
+c3_opacity
+all_opacity
+Set blend opacity for specific pixel component or all pixel components in case
+of all_opacity . Only used in combination with pixel component blend modes.
+
+
+c0_expr
+c1_expr
+c2_expr
+c3_expr
+all_expr
+Set blend expression for specific pixel component or all pixel components in case
+of all_expr . Note that related mode options will be ignored if those are set.
+
+The expressions can use the following variables:
+
+
+N
+The sequential number of the filtered frame, starting from 0
.
+
+
+X
+Y
+the coordinates of the current sample
+
+
+W
+H
+the width and height of currently filtered plane
+
+
+SW
+SH
+Width and height scale depending on the currently filtered plane. It is the
+ratio between the corresponding luma plane number of pixels and the current
+plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
+0.5,0.5
for chroma planes.
+
+
+T
+Time of the current frame, expressed in seconds.
+
+
+TOP, A
+Value of pixel component at current location for first video frame (top layer).
+
+
+BOTTOM, B
+Value of pixel component at current location for second video frame (bottom layer).
+
+
+
+
+shortest
+Force termination when the shortest input terminates. Default is
+0
. This option is only defined for the blend
filter.
+
+
+repeatlast
+Continue applying the last bottom frame after the end of the stream. A value of
+0
disable the filter after the last frame of the bottom layer is reached.
+Default is 1
. This option is only defined for the blend
filter.
+
+
+
+
+
30.7.1 Examples# TOC
+
+
+ Apply transition from bottom layer to top layer in first 10 seconds:
+
+
blend=all_expr='A*(if(gte(T,10),1,T/10))+B*(1-(if(gte(T,10),1,T/10)))'
+
+
+ Apply 1x1 checkerboard effect:
+
+
blend=all_expr='if(eq(mod(X,2),mod(Y,2)),A,B)'
+
+
+ Apply uncover left effect:
+
+
blend=all_expr='if(gte(N*SW+X,W),A,B)'
+
+
+ Apply uncover down effect:
+
+
blend=all_expr='if(gte(Y-N*SH,0),A,B)'
+
+
+ Apply uncover up-left effect:
+
+
blend=all_expr='if(gte(T*SH*40+Y,H)*gte((T*40*SW+X)*W/H,W),A,B)'
+
+
+ Display differences between the current and the previous frame:
+
+
tblend=all_mode=difference128
+
+
+
+
+
30.8 boxblur# TOC
+
+
Apply a boxblur algorithm to the input video.
+
+
It accepts the following parameters:
+
+
+luma_radius, lr
+luma_power, lp
+chroma_radius, cr
+chroma_power, cp
+alpha_radius, ar
+alpha_power, ap
+
+
+
A description of the accepted options follows.
+
+
+luma_radius, lr
+chroma_radius, cr
+alpha_radius, ar
+Set an expression for the box radius in pixels used for blurring the
+corresponding input plane.
+
+The radius value must be a non-negative number, and must not be
+greater than the value of the expression min(w,h)/2
for the
+luma and alpha planes, and of min(cw,ch)/2
for the chroma
+planes.
+
+Default value for luma_radius is "2". If not specified,
+chroma_radius and alpha_radius default to the
+corresponding value set for luma_radius .
+
+The expressions can contain the following constants:
+
+w
+h
+The input width and height in pixels.
+
+
+cw
+ch
+The input chroma image width and height in pixels.
+
+
+hsub
+vsub
+The horizontal and vertical chroma subsample values. For example, for the
+pixel format "yuv422p", hsub is 2 and vsub is 1.
+
+
+
+
+luma_power, lp
+chroma_power, cp
+alpha_power, ap
+Specify how many times the boxblur filter is applied to the
+corresponding plane.
+
+Default value for luma_power is 2. If not specified,
+chroma_power and alpha_power default to the
+corresponding value set for luma_power .
+
+A value of 0 will disable the effect.
+
+
+
+
+
30.8.1 Examples# TOC
+
+
+ Apply a boxblur filter with the luma, chroma, and alpha radii
+set to 2:
+
+
boxblur=luma_radius=2:luma_power=1
+boxblur=2:1
+
+
+ Set the luma radius to 2, and alpha and chroma radius to 0:
+
+
+ Set the luma and chroma radii to a fraction of the video dimension:
+
+
boxblur=luma_radius=min(h\,w)/10:luma_power=1:chroma_radius=min(cw\,ch)/10:chroma_power=1
+
+
+
+
+
30.9 codecview# TOC
+
+
Visualize information exported by some codecs.
+
+
Some codecs can export information through frames using side-data or other
+means. For example, some MPEG based codecs export motion vectors through the
+export_mvs flag in the codec flags2 option.
+
+
The filter accepts the following option:
+
+
+mv
+Set motion vectors to visualize.
+
+Available flags for mv are:
+
+
+‘pf ’
+forward predicted MVs of P-frames
+
+‘bf ’
+forward predicted MVs of B-frames
+
+‘bb ’
+backward predicted MVs of B-frames
+
+
+
+
+
+
+
30.9.1 Examples# TOC
+
+
+ Visualizes multi-directionals MVs from P and B-Frames using ffplay
:
+
+
ffplay -flags2 +export_mvs input.mpg -vf codecview=mv=pf+bf+bb
+
+
+
+
+
30.10 colorbalance# TOC
+
Modify intensity of primary colors (red, green and blue) of input frames.
+
+
The filter allows an input frame to be adjusted in the shadows, midtones or highlights
+regions for the red-cyan, green-magenta or blue-yellow balance.
+
+
A positive adjustment value shifts the balance towards the primary color, a negative
+value towards the complementary color.
+
+
The filter accepts the following options:
+
+
+rs
+gs
+bs
+Adjust red, green and blue shadows (darkest pixels).
+
+
+rm
+gm
+bm
+Adjust red, green and blue midtones (medium pixels).
+
+
+rh
+gh
+bh
+Adjust red, green and blue highlights (brightest pixels).
+
+Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
+
+
+
+
+
30.10.1 Examples# TOC
+
+
+ Add red color cast to shadows:
+
+
+
+
+
30.11 colorlevels# TOC
+
+
Adjust video input frames using levels.
+
+
The filter accepts the following options:
+
+
+rimin
+gimin
+bimin
+aimin
+Adjust red, green, blue and alpha input black point.
+Allowed ranges for options are [-1.0, 1.0]
. Defaults are 0
.
+
+
+rimax
+gimax
+bimax
+aimax
+Adjust red, green, blue and alpha input white point.
+Allowed ranges for options are [-1.0, 1.0]
. Defaults are 1
.
+
+Input levels are used to lighten highlights (bright tones), darken shadows
+(dark tones), change the balance of bright and dark tones.
+
+
+romin
+gomin
+bomin
+aomin
+Adjust red, green, blue and alpha output black point.
+Allowed ranges for options are [0, 1.0]
. Defaults are 0
.
+
+
+romax
+gomax
+bomax
+aomax
+Adjust red, green, blue and alpha output white point.
+Allowed ranges for options are [0, 1.0]
. Defaults are 1
.
+
+Output levels allows manual selection of a constrained output level range.
+
+
+
+
+
30.11.1 Examples# TOC
+
+
+ Make video output darker:
+
+
colorlevels=rimin=0.058:gimin=0.058:bimin=0.058
+
+
+ Increase contrast:
+
+
colorlevels=rimin=0.039:gimin=0.039:bimin=0.039:rimax=0.96:gimax=0.96:bimax=0.96
+
+
+ Make video output lighter:
+
+
colorlevels=rimax=0.902:gimax=0.902:bimax=0.902
+
+
+ Increase brightness:
+
+
colorlevels=romin=0.5:gomin=0.5:bomin=0.5
+
+
+
+
+
30.12 colorchannelmixer# TOC
+
+
Adjust video input frames by re-mixing color channels.
+
+
This filter modifies a color channel by adding the values associated to
+the other channels of the same pixels. For example if the value to
+modify is red, the output value will be:
+
+
red =red *rr + blue *rb + green *rg + alpha *ra
+
+
+
The filter accepts the following options:
+
+
+rr
+rg
+rb
+ra
+Adjust contribution of input red, green, blue and alpha channels for output red channel.
+Default is 1
for rr , and 0
for rg , rb and ra .
+
+
+gr
+gg
+gb
+ga
+Adjust contribution of input red, green, blue and alpha channels for output green channel.
+Default is 1
for gg , and 0
for gr , gb and ga .
+
+
+br
+bg
+bb
+ba
+Adjust contribution of input red, green, blue and alpha channels for output blue channel.
+Default is 1
for bb , and 0
for br , bg and ba .
+
+
+ar
+ag
+ab
+aa
+Adjust contribution of input red, green, blue and alpha channels for output alpha channel.
+Default is 1
for aa , and 0
for ar , ag and ab .
+
+Allowed ranges for options are [-2.0, 2.0]
.
+
+
+
+
+
30.12.1 Examples# TOC
+
+
+ Convert source to grayscale:
+
+
colorchannelmixer=.3:.4:.3:0:.3:.4:.3:0:.3:.4:.3
+
+ Simulate sepia tones:
+
+
colorchannelmixer=.393:.769:.189:0:.349:.686:.168:0:.272:.534:.131
+
+
+
+
+
30.13 colormatrix# TOC
+
+
Convert color matrix.
+
+
The filter accepts the following options:
+
+
+src
+dst
+Specify the source and destination color matrix. Both values must be
+specified.
+
+The accepted values are:
+
+‘bt709 ’
+BT.709
+
+
+‘bt601 ’
+BT.601
+
+
+‘smpte240m ’
+SMPTE-240M
+
+
+‘fcc ’
+FCC
+
+
+
+
+
+
For example to convert from BT.601 to SMPTE-240M, use the command:
+
+
colormatrix=bt601:smpte240m
+
+
+
+
30.14 copy# TOC
+
+
Copy the input source unchanged to the output. This is mainly useful for
+testing purposes.
+
+
+
30.15 crop# TOC
+
+
Crop the input video to given dimensions.
+
+
It accepts the following parameters:
+
+
+w, out_w
+The width of the output video. It defaults to iw
.
+This expression is evaluated only once during the filter
+configuration.
+
+
+h, out_h
+The height of the output video. It defaults to ih
.
+This expression is evaluated only once during the filter
+configuration.
+
+
+x
+The horizontal position, in the input video, of the left edge of the output
+video. It defaults to (in_w-out_w)/2
.
+This expression is evaluated per-frame.
+
+
+y
+The vertical position, in the input video, of the top edge of the output video.
+It defaults to (in_h-out_h)/2
.
+This expression is evaluated per-frame.
+
+
+keep_aspect
+If set to 1 will force the output display aspect ratio
+to be the same of the input, by changing the output sample aspect
+ratio. It defaults to 0.
+
+
+
+
The out_w , out_h , x , y parameters are
+expressions containing the following constants:
+
+
+x
+y
+The computed values for x and y . They are evaluated for
+each new frame.
+
+
+in_w
+in_h
+The input width and height.
+
+
+iw
+ih
+These are the same as in_w and in_h .
+
+
+out_w
+out_h
+The output (cropped) width and height.
+
+
+ow
+oh
+These are the same as out_w and out_h .
+
+
+a
+same as iw / ih
+
+
+sar
+input sample aspect ratio
+
+
+dar
+input display aspect ratio, it is the same as (iw / ih ) * sar
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+n
+The number of the input frame, starting from 0.
+
+
+pos
+the position in the file of the input frame, NAN if unknown
+
+
+t
+The timestamp expressed in seconds. It’s NAN if the input timestamp is unknown.
+
+
+
+
+
The expression for out_w may depend on the value of out_h ,
+and the expression for out_h may depend on out_w , but they
+cannot depend on x and y , as x and y are
+evaluated after out_w and out_h .
+
+
The x and y parameters specify the expressions for the
+position of the top-left corner of the output (non-cropped) area. They
+are evaluated for each frame. If the evaluated value is not valid, it
+is approximated to the nearest valid value.
+
+
The expression for x may depend on y , and the expression
+for y may depend on x .
+
+
+
30.15.1 Examples# TOC
+
+
+
+
+
30.16 cropdetect# TOC
+
+
Auto-detect the crop size.
+
+
It calculates the necessary cropping parameters and prints the
+recommended parameters via the logging system. The detected dimensions
+correspond to the non-black area of the input video.
+
+
It accepts the following parameters:
+
+
+limit
+Set higher black value threshold, which can be optionally specified
+from nothing (0) to everything (255 for 8bit based formats). An intensity
+value greater to the set value is considered non-black. It defaults to 24.
+You can also specify a value between 0.0 and 1.0 which will be scaled depending
+on the bitdepth of the pixel format.
+
+
+round
+The value which the width/height should be divisible by. It defaults to
+16. The offset is automatically adjusted to center the video. Use 2 to
+get only even dimensions (needed for 4:2:2 video). 16 is best when
+encoding to most video codecs.
+
+
+reset_count, reset
+Set the counter that determines after how many frames cropdetect will
+reset the previously detected largest video area and start over to
+detect the current optimal crop area. Default value is 0.
+
+This can be useful when channel logos distort the video area. 0
+indicates ’never reset’, and returns the largest area encountered during
+playback.
+
+
+
+
+
30.17 curves# TOC
+
+
Apply color adjustments using curves.
+
+
This filter is similar to the Adobe Photoshop and GIMP curves tools. Each
+component (red, green and blue) has its values defined by N key points
+tied from each other using a smooth curve. The x-axis represents the pixel
+values from the input frame, and the y-axis the new pixel values to be set for
+the output frame.
+
+
By default, a component curve is defined by the two points (0;0) and
+(1;1) . This creates a straight line where each original pixel value is
+"adjusted" to its own value, which means no change to the image.
+
+
The filter allows you to redefine these two points and add some more. A new
+curve (using a natural cubic spline interpolation) will be define to pass
+smoothly through all these new coordinates. The new defined points needs to be
+strictly increasing over the x-axis, and their x and y values must
+be in the [0;1] interval. If the computed curves happened to go outside
+the vector spaces, the values will be clipped accordingly.
+
+
If there is no key point defined in x=0
, the filter will automatically
+insert a (0;0) point. In the same way, if there is no key point defined
+in x=1
, the filter will automatically insert a (1;1) point.
+
+
The filter accepts the following options:
+
+
+preset
+Select one of the available color presets. This option can be used in addition
+to the r , g , b parameters; in this case, the later
+options takes priority on the preset values.
+Available presets are:
+
+‘none ’
+‘color_negative ’
+‘cross_process ’
+‘darker ’
+‘increase_contrast ’
+‘lighter ’
+‘linear_contrast ’
+‘medium_contrast ’
+‘negative ’
+‘strong_contrast ’
+‘vintage ’
+
+Default is none
.
+
+master, m
+Set the master key points. These points will define a second pass mapping. It
+is sometimes called a "luminance" or "value" mapping. It can be used with
+r , g , b or all since it acts like a
+post-processing LUT.
+
+red, r
+Set the key points for the red component.
+
+green, g
+Set the key points for the green component.
+
+blue, b
+Set the key points for the blue component.
+
+all
+Set the key points for all components (not including master).
+Can be used in addition to the other key points component
+options. In this case, the unset component(s) will fallback on this
+all setting.
+
+psfile
+Specify a Photoshop curves file (.asv
) to import the settings from.
+
+
+
+
To avoid some filtergraph syntax conflicts, each key points list need to be
+defined using the following syntax: x0/y0 x1/y1 x2/y2 ...
.
+
+
+
30.17.1 Examples# TOC
+
+
+
+
+
30.18 dctdnoiz# TOC
+
+
Denoise frames using 2D DCT (frequency domain filtering).
+
+
This filter is not designed for real time.
+
+
The filter accepts the following options:
+
+
+sigma, s
+Set the noise sigma constant.
+
+This sigma defines a hard threshold of 3 * sigma
; every DCT
+coefficient (absolute value) below this threshold with be dropped.
+
+If you need a more advanced filtering, see expr .
+
+Default is 0
.
+
+
+overlap
+Set number overlapping pixels for each block. Since the filter can be slow, you
+may want to reduce this value, at the cost of a less effective filter and the
+risk of various artefacts.
+
+If the overlapping value doesn’t allow to process the whole input width or
+height, a warning will be displayed and according borders won’t be denoised.
+
+Default value is blocksize -1, which is the best possible setting.
+
+
+expr, e
+Set the coefficient factor expression.
+
+For each coefficient of a DCT block, this expression will be evaluated as a
+multiplier value for the coefficient.
+
+If this is option is set, the sigma option will be ignored.
+
+The absolute value of the coefficient can be accessed through the c
+variable.
+
+
+n
+Set the blocksize using the number of bits. 1<<n
defines the
+blocksize , which is the width and height of the processed blocks.
+
+The default value is 3 (8x8) and can be raised to 4 for a
+blocksize of 16x16. Note that changing this setting has huge consequences
+on the speed processing. Also, a larger block size does not necessarily means a
+better de-noising.
+
+
+
+
+
30.18.1 Examples# TOC
+
+
Apply a denoise with a sigma of 4.5
:
+
+
+
The same operation can be achieved using the expression system:
+
+
dctdnoiz=e='gte(c, 4.5*3)'
+
+
+
Violent denoise using a block size of 16x16
:
+
+
+
+
30.19 decimate# TOC
+
+
Drop duplicated frames at regular intervals.
+
+
The filter accepts the following options:
+
+
+cycle
+Set the number of frames from which one will be dropped. Setting this to
+N means one frame in every batch of N frames will be dropped.
+Default is 5
.
+
+
+dupthresh
+Set the threshold for duplicate detection. If the difference metric for a frame
+is less than or equal to this value, then it is declared as duplicate. Default
+is 1.1
+
+
+scthresh
+Set scene change threshold. Default is 15
.
+
+
+blockx
+blocky
+Set the size of the x and y-axis blocks used during metric calculations.
+Larger blocks give better noise suppression, but also give worse detection of
+small movements. Must be a power of two. Default is 32
.
+
+
+ppsrc
+Mark main input as a pre-processed input and activate clean source input
+stream. This allows the input to be pre-processed with various filters to help
+the metrics calculation while keeping the frame selection lossless. When set to
+1
, the first stream is for the pre-processed input, and the second
+stream is the clean source from where the kept frames are chosen. Default is
+0
.
+
+
+chroma
+Set whether or not chroma is considered in the metric calculations. Default is
+1
.
+
+
+
+
+
30.20 dejudder# TOC
+
+
Remove judder produced by partially interlaced telecined content.
+
+
Judder can be introduced, for instance, by pullup filter. If the original
+source was partially telecined content then the output of pullup,dejudder
+will have a variable frame rate. May change the recorded frame rate of the
+container. Aside from that change, this filter will not affect constant frame
+rate video.
+
+
The option available in this filter is:
+
+cycle
+Specify the length of the window over which the judder repeats.
+
+Accepts any integer greater than 1. Useful values are:
+
+‘4 ’
+If the original was telecined from 24 to 30 fps (Film to NTSC).
+
+
+‘5 ’
+If the original was telecined from 25 to 30 fps (PAL to NTSC).
+
+
+‘20 ’
+If a mixture of the two.
+
+
+
+The default is ‘4 ’.
+
+
+
+
+
30.21 delogo# TOC
+
+
Suppress a TV station logo by a simple interpolation of the surrounding
+pixels. Just set a rectangle covering the logo and watch it disappear
+(and sometimes something even uglier appear - your mileage may vary).
+
+
It accepts the following parameters:
+
+x
+y
+Specify the top left corner coordinates of the logo. They must be
+specified.
+
+
+w
+h
+Specify the width and height of the logo to clear. They must be
+specified.
+
+
+band, t
+Specify the thickness of the fuzzy edge of the rectangle (added to
+w and h ). The default value is 4.
+
+
+show
+When set to 1, a green rectangle is drawn on the screen to simplify
+finding the right x , y , w , and h parameters.
+The default value is 0.
+
+The rectangle is drawn on the outermost pixels which will be (partly)
+replaced with interpolated values. The values of the next pixels
+immediately outside this rectangle in each direction will be used to
+compute the interpolated pixel values inside the rectangle.
+
+
+
+
+
+
30.21.1 Examples# TOC
+
+
+ Set a rectangle covering the area with top left corner coordinates 0,0
+and size 100x77, and a band of size 10:
+
+
delogo=x=0:y=0:w=100:h=77:band=10
+
+
+
+
+
+
30.22 deshake# TOC
+
+
Attempt to fix small changes in horizontal and/or vertical shift. This
+filter helps remove camera shake from hand-holding a camera, bumping a
+tripod, moving on a vehicle, etc.
+
+
The filter accepts the following options:
+
+
+x
+y
+w
+h
+Specify a rectangular area where to limit the search for motion
+vectors.
+If desired the search for motion vectors can be limited to a
+rectangular area of the frame defined by its top left corner, width
+and height. These parameters have the same meaning as the drawbox
+filter which can be used to visualise the position of the bounding
+box.
+
+This is useful when simultaneous movement of subjects within the frame
+might be confused for camera motion by the motion vector search.
+
+If any or all of x , y , w and h are set to -1
+then the full frame is used. This allows later options to be set
+without specifying the bounding box for the motion vector search.
+
+Default - search the whole frame.
+
+
+rx
+ry
+Specify the maximum extent of movement in x and y directions in the
+range 0-64 pixels. Default 16.
+
+
+edge
+Specify how to generate pixels to fill blanks at the edge of the
+frame. Available values are:
+
+‘blank, 0 ’
+Fill zeroes at blank locations
+
+‘original, 1 ’
+Original image at blank locations
+
+‘clamp, 2 ’
+Extruded edge value at blank locations
+
+‘mirror, 3 ’
+Mirrored edge at blank locations
+
+
+Default value is ‘mirror ’.
+
+
+blocksize
+Specify the blocksize to use for motion search. Range 4-128 pixels,
+default 8.
+
+
+contrast
+Specify the contrast threshold for blocks. Only blocks with more than
+the specified contrast (difference between darkest and lightest
+pixels) will be considered. Range 1-255, default 125.
+
+
+search
+Specify the search strategy. Available values are:
+
+‘exhaustive, 0 ’
+Set exhaustive search
+
+‘less, 1 ’
+Set less exhaustive search.
+
+
+Default value is ‘exhaustive ’.
+
+
+filename
+If set then a detailed log of the motion search is written to the
+specified file.
+
+
+opencl
+If set to 1, specify using OpenCL capabilities, only available if
+FFmpeg was configured with --enable-opencl
. Default value is 0.
+
+
+
+
+
+
30.23 drawbox# TOC
+
+
Draw a colored box on the input image.
+
+
It accepts the following parameters:
+
+
+x
+y
+The expressions which specify the top left corner coordinates of the box. It defaults to 0.
+
+
+width, w
+height, h
+The expressions which specify the width and height of the box; if 0 they are interpreted as
+the input width and height. It defaults to 0.
+
+
+color, c
+Specify the color of the box to write. For the general syntax of this option,
+check the "Color" section in the ffmpeg-utils manual. If the special
+value invert
is used, the box edge color is the same as the
+video with inverted luma.
+
+
+thickness, t
+The expression which sets the thickness of the box edge. Default value is 3
.
+
+See below for the list of accepted constants.
+
+
+
+
The parameters for x , y , w and h and t are expressions containing the
+following constants:
+
+
+dar
+The input display aspect ratio, it is the same as (w / h ) * sar .
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+in_h, ih
+in_w, iw
+The input width and height.
+
+
+sar
+The input sample aspect ratio.
+
+
+x
+y
+The x and y offset coordinates where the box is drawn.
+
+
+w
+h
+The width and height of the drawn box.
+
+
+t
+The thickness of the drawn box.
+
+These constants allow the x , y , w , h and t expressions to refer to
+each other, so you may for example specify y=x/dar
or h=w/dar
.
+
+
+
+
+
+
30.23.1 Examples# TOC
+
+
+
+
+
30.24 drawgrid# TOC
+
+
Draw a grid on the input image.
+
+
It accepts the following parameters:
+
+
+x
+y
+The expressions which specify the coordinates of some point of grid intersection (meant to configure offset). Both default to 0.
+
+
+width, w
+height, h
+The expressions which specify the width and height of the grid cell, if 0 they are interpreted as the
+input width and height, respectively, minus thickness
, so image gets
+framed. Default to 0.
+
+
+color, c
+Specify the color of the grid. For the general syntax of this option,
+check the "Color" section in the ffmpeg-utils manual. If the special
+value invert
is used, the grid color is the same as the
+video with inverted luma.
+
+
+thickness, t
+The expression which sets the thickness of the grid line. Default value is 1
.
+
+See below for the list of accepted constants.
+
+
+
+
The parameters for x , y , w and h and t are expressions containing the
+following constants:
+
+
+dar
+The input display aspect ratio, it is the same as (w / h ) * sar .
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+in_h, ih
+in_w, iw
+The input grid cell width and height.
+
+
+sar
+The input sample aspect ratio.
+
+
+x
+y
+The x and y coordinates of some point of grid intersection (meant to configure offset).
+
+
+w
+h
+The width and height of the drawn cell.
+
+
+t
+The thickness of the drawn cell.
+
+These constants allow the x , y , w , h and t expressions to refer to
+each other, so you may for example specify y=x/dar
or h=w/dar
.
+
+
+
+
+
+
30.24.1 Examples# TOC
+
+
+ Draw a grid with cell 100x100 pixels, thickness 2 pixels, with color red and an opacity of 50%:
+
+
drawgrid=width=100:height=100:thickness=2:color=red@0.5
+
+
+ Draw a white 3x3 grid with an opacity of 50%:
+
+
drawgrid=w=iw/3:h=ih/3:t=2:c=white@0.5
+
+
+
+
+
30.25 drawtext# TOC
+
+
Draw a text string or text from a specified file on top of a video, using the
+libfreetype library.
+
+
To enable compilation of this filter, you need to configure FFmpeg with
+--enable-libfreetype
.
+To enable default font fallback and the font option you need to
+configure FFmpeg with --enable-libfontconfig
.
+To enable the text_shaping option, you need to configure FFmpeg with
+--enable-libfribidi
.
+
+
+
30.25.1 Syntax# TOC
+
+
It accepts the following parameters:
+
+
+box
+Used to draw a box around text using the background color.
+The value must be either 1 (enable) or 0 (disable).
+The default value of box is 0.
+
+
+boxcolor
+The color to be used for drawing box around text. For the syntax of this
+option, check the "Color" section in the ffmpeg-utils manual.
+
+The default value of boxcolor is "white".
+
+
+borderw
+Set the width of the border to be drawn around the text using bordercolor .
+The default value of borderw is 0.
+
+
+bordercolor
+Set the color to be used for drawing border around text. For the syntax of this
+option, check the "Color" section in the ffmpeg-utils manual.
+
+The default value of bordercolor is "black".
+
+
+expansion
+Select how the text is expanded. Can be either none
,
+strftime
(deprecated) or
+normal
(default). See the Text expansion section
+below for details.
+
+
+fix_bounds
+If true, check and fix text coords to avoid clipping.
+
+
+fontcolor
+The color to be used for drawing fonts. For the syntax of this option, check
+the "Color" section in the ffmpeg-utils manual.
+
+The default value of fontcolor is "black".
+
+
+fontcolor_expr
+String which is expanded the same way as text to obtain dynamic
+fontcolor value. By default this option has empty value and is not
+processed. When this option is set, it overrides fontcolor option.
+
+
+font
+The font family to be used for drawing text. By default Sans.
+
+
+fontfile
+The font file to be used for drawing text. The path must be included.
+This parameter is mandatory if the fontconfig support is disabled.
+
+
+fontsize
+The font size to be used for drawing text.
+The default value of fontsize is 16.
+
+
+text_shaping
+If set to 1, attempt to shape the text (for example, reverse the order of
+right-to-left text and join Arabic characters) before drawing it.
+Otherwise, just draw the text exactly as given.
+By default 1 (if supported).
+
+
+ft_load_flags
+The flags to be used for loading the fonts.
+
+The flags map the corresponding flags supported by libfreetype, and are
+a combination of the following values:
+
+default
+no_scale
+no_hinting
+render
+no_bitmap
+vertical_layout
+force_autohint
+crop_bitmap
+pedantic
+ignore_global_advance_width
+no_recurse
+ignore_transform
+monochrome
+linear_design
+no_autohint
+
+
+Default value is "default".
+
+For more information consult the documentation for the FT_LOAD_*
+libfreetype flags.
+
+
+shadowcolor
+The color to be used for drawing a shadow behind the drawn text. For the
+syntax of this option, check the "Color" section in the ffmpeg-utils manual.
+
+The default value of shadowcolor is "black".
+
+
+shadowx
+shadowy
+The x and y offsets for the text shadow position with respect to the
+position of the text. They can be either positive or negative
+values. The default value for both is "0".
+
+
+start_number
+The starting frame number for the n/frame_num variable. The default value
+is "0".
+
+
+tabsize
+The size in number of spaces to use for rendering the tab.
+Default value is 4.
+
+
+timecode
+Set the initial timecode representation in "hh:mm:ss[:;.]ff"
+format. It can be used with or without text parameter. timecode_rate
+option must be specified.
+
+
+timecode_rate, rate, r
+Set the timecode frame rate (timecode only).
+
+
+text
+The text string to be drawn. The text must be a sequence of UTF-8
+encoded characters.
+This parameter is mandatory if no file is specified with the parameter
+textfile .
+
+
+textfile
+A text file containing text to be drawn. The text must be a sequence
+of UTF-8 encoded characters.
+
+This parameter is mandatory if no text string is specified with the
+parameter text .
+
+If both text and textfile are specified, an error is thrown.
+
+
+reload
+If set to 1, the textfile will be reloaded before each frame.
+Be sure to update it atomically, or it may be read partially, or even fail.
+
+
+x
+y
+The expressions which specify the offsets where text will be drawn
+within the video frame. They are relative to the top/left border of the
+output image.
+
+The default value of x and y is "0".
+
+See below for the list of accepted constants and functions.
+
+
+
+
The parameters for x and y are expressions containing the
+following constants and functions:
+
+
+dar
+input display aspect ratio, it is the same as (w / h ) * sar
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+line_h, lh
+the height of each text line
+
+
+main_h, h, H
+the input height
+
+
+main_w, w, W
+the input width
+
+
+max_glyph_a, ascent
+the maximum distance from the baseline to the highest/upper grid
+coordinate used to place a glyph outline point, for all the rendered
+glyphs.
+It is a positive value, due to the grid’s orientation with the Y axis
+upwards.
+
+
+max_glyph_d, descent
+the maximum distance from the baseline to the lowest grid coordinate
+used to place a glyph outline point, for all the rendered glyphs.
+This is a negative value, due to the grid’s orientation, with the Y axis
+upwards.
+
+
+max_glyph_h
+maximum glyph height, that is the maximum height for all the glyphs
+contained in the rendered text, it is equivalent to ascent -
+descent .
+
+
+max_glyph_w
+maximum glyph width, that is the maximum width for all the glyphs
+contained in the rendered text
+
+
+n
+the number of input frame, starting from 0
+
+
+rand(min, max)
+return a random number included between min and max
+
+
+sar
+The input sample aspect ratio.
+
+
+t
+timestamp expressed in seconds, NAN if the input timestamp is unknown
+
+
+text_h, th
+the height of the rendered text
+
+
+text_w, tw
+the width of the rendered text
+
+
+x
+y
+the x and y offset coordinates where the text is drawn.
+
+These parameters allow the x and y expressions to refer
+each other, so you can for example specify y=x/dar
.
+
+
+
+
+
30.25.2 Text expansion# TOC
+
+
If expansion is set to strftime
,
+the filter recognizes strftime() sequences in the provided text and
+expands them accordingly. Check the documentation of strftime(). This
+feature is deprecated.
+
+
If expansion is set to none
, the text is printed verbatim.
+
+
If expansion is set to normal
(which is the default),
+the following expansion mechanism is used.
+
+
The backslash character ’\’, followed by any character, always expands to
+the second character.
+
+
Sequence of the form %{...}
are expanded. The text between the
+braces is a function name, possibly followed by arguments separated by ’:’.
+If the arguments contain special characters or delimiters (’:’ or ’}’),
+they should be escaped.
+
+
Note that they probably must also be escaped as the value for the
+text option in the filter argument string and as the filter
+argument in the filtergraph description, and possibly also for the shell,
+that makes up to four levels of escaping; using a text file avoids these
+problems.
+
+
The following functions are available:
+
+
+expr, e
+The expression evaluation result.
+
+It must take one argument specifying the expression to be evaluated,
+which accepts the same constants and functions as the x and
+y values. Note that not all constants should be used, for
+example the text size is not known when evaluating the expression, so
+the constants text_w and text_h will have an undefined
+value.
+
+
+expr_int_format, eif
+Evaluate the expression’s value and output as formatted integer.
+
+The first argument is the expression to be evaluated, just as for the expr function.
+The second argument specifies the output format. Allowed values are ’x’, ’X’, ’d’ and
+’u’. They are treated exactly as in the printf function.
+The third parameter is optional and sets the number of positions taken by the output.
+It can be used to add padding with zeros from the left.
+
+
+gmtime
+The time at which the filter is running, expressed in UTC.
+It can accept an argument: a strftime() format string.
+
+
+localtime
+The time at which the filter is running, expressed in the local time zone.
+It can accept an argument: a strftime() format string.
+
+
+metadata
+Frame metadata. It must take one argument specifying metadata key.
+
+
+n, frame_num
+The frame number, starting from 0.
+
+
+pict_type
+A 1 character description of the current picture type.
+
+
+pts
+The timestamp of the current frame.
+It can take up to two arguments.
+
+The first argument is the format of the timestamp; it defaults to flt
+for seconds as a decimal number with microsecond accuracy; hms
stands
+for a formatted [-]HH:MM:SS.mmm timestamp with millisecond accuracy.
+
+The second argument is an offset added to the timestamp.
+
+
+
+
+
+
30.25.3 Examples# TOC
+
+
+
+
For more information about libfreetype, check:
+http://www.freetype.org/ .
+
+
For more information about fontconfig, check:
+http://freedesktop.org/software/fontconfig/fontconfig-user.html .
+
+
For more information about libfribidi, check:
+http://fribidi.org/ .
+
+
+
30.26 edgedetect# TOC
+
+
Detect and draw edges. The filter uses the Canny Edge Detection algorithm.
+
+
The filter accepts the following options:
+
+
+low
+high
+Set low and high threshold values used by the Canny thresholding
+algorithm.
+
+The high threshold selects the "strong" edge pixels, which are then
+connected through 8-connectivity with the "weak" edge pixels selected
+by the low threshold.
+
+low and high threshold values must be chosen in the range
+[0,1], and low should be lesser or equal to high .
+
+Default value for low is 20/255
, and default value for high
+is 50/255
.
+
+
+mode
+Define the drawing mode.
+
+
+‘wires ’
+Draw white/gray wires on black background.
+
+
+‘colormix ’
+Mix the colors to create a paint/cartoon effect.
+
+
+
+Default value is wires .
+
+
+
+
+
30.26.1 Examples# TOC
+
+
+ Standard edge detection with custom values for the hysteresis thresholding:
+
+
edgedetect=low=0.1:high=0.4
+
+
+ Painting effect without thresholding:
+
+
edgedetect=mode=colormix:high=0
+
+
+
+
+
30.27 extractplanes# TOC
+
+
Extract color channel components from input video stream into
+separate grayscale video streams.
+
+
The filter accepts the following option:
+
+
+planes
+Set plane(s) to extract.
+
+Available values for planes are:
+
+‘y ’
+‘u ’
+‘v ’
+‘a ’
+‘r ’
+‘g ’
+‘b ’
+
+
+Choosing planes not available in the input will result in an error.
+That means you cannot select r
, g
, b
planes
+with y
, u
, v
planes at same time.
+
+
+
+
+
30.27.1 Examples# TOC
+
+
+ Extract luma, u and v color channel component from input video frame
+into 3 grayscale outputs:
+
+
ffmpeg -i video.avi -filter_complex 'extractplanes=y+u+v[y][u][v]' -map '[y]' y.avi -map '[u]' u.avi -map '[v]' v.avi
+
+
+
+
+
30.28 elbg# TOC
+
+
Apply a posterize effect using the ELBG (Enhanced LBG) algorithm.
+
+
For each input image, the filter will compute the optimal mapping from
+the input to the output given the codebook length, that is the number
+of distinct output colors.
+
+
This filter accepts the following options.
+
+
+codebook_length, l
+Set codebook length. The value must be a positive integer, and
+represents the number of distinct output colors. Default value is 256.
+
+
+nb_steps, n
+Set the maximum number of iterations to apply for computing the optimal
+mapping. The higher the value the better the result and the higher the
+computation time. Default value is 1.
+
+
+seed, s
+Set a random seed, must be an integer included between 0 and
+UINT32_MAX. If not specified, or if explicitly set to -1, the filter
+will try to use a good random seed on a best effort basis.
+
+
+
+
+
30.29 fade# TOC
+
+
Apply a fade-in/out effect to the input video.
+
+
It accepts the following parameters:
+
+
+type, t
+The effect type can be either "in" for a fade-in, or "out" for a fade-out
+effect.
+Default is in
.
+
+
+start_frame, s
+Specify the number of the frame to start applying the fade
+effect at. Default is 0.
+
+
+nb_frames, n
+The number of frames that the fade effect lasts. At the end of the
+fade-in effect, the output video will have the same intensity as the input video.
+At the end of the fade-out transition, the output video will be filled with the
+selected color .
+Default is 25.
+
+
+alpha
+If set to 1, fade only alpha channel, if one exists on the input.
+Default value is 0.
+
+
+start_time, st
+Specify the timestamp (in seconds) of the frame to start to apply the fade
+effect. If both start_frame and start_time are specified, the fade will start at
+whichever comes last. Default is 0.
+
+
+duration, d
+The number of seconds for which the fade effect has to last. At the end of the
+fade-in effect the output video will have the same intensity as the input video,
+at the end of the fade-out transition the output video will be filled with the
+selected color .
+If both duration and nb_frames are specified, duration is used. Default is 0.
+
+
+color, c
+Specify the color of the fade. Default is "black".
+
+
+
+
+
30.29.1 Examples# TOC
+
+
+
+
+
30.30 field# TOC
+
+
Extract a single field from an interlaced image using stride
+arithmetic to avoid wasting CPU time. The output frames are marked as
+non-interlaced.
+
+
The filter accepts the following options:
+
+
+type
+Specify whether to extract the top (if the value is 0
or
+top
) or the bottom field (if the value is 1
or
+bottom
).
+
+
+
+
+
30.31 fieldmatch# TOC
+
+
Field matching filter for inverse telecine. It is meant to reconstruct the
+progressive frames from a telecined stream. The filter does not drop duplicated
+frames, so to achieve a complete inverse telecine fieldmatch
needs to be
+followed by a decimation filter such as decimate in the filtergraph.
+
+
The separation of the field matching and the decimation is notably motivated by
+the possibility of inserting a de-interlacing filter fallback between the two.
+If the source has mixed telecined and real interlaced content,
+fieldmatch
will not be able to match fields for the interlaced parts.
+But these remaining combed frames will be marked as interlaced, and thus can be
+de-interlaced by a later filter such as yadif before decimation.
+
+
In addition to the various configuration options, fieldmatch
can take an
+optional second stream, activated through the ppsrc option. If
+enabled, the frames reconstruction will be based on the fields and frames from
+this second stream. This allows the first input to be pre-processed in order to
+help the various algorithms of the filter, while keeping the output lossless
+(assuming the fields are matched properly). Typically, a field-aware denoiser,
+or brightness/contrast adjustments can help.
+
+
Note that this filter uses the same algorithms as TIVTC/TFM (AviSynth project)
+and VIVTC/VFM (VapourSynth project). The later is a light clone of TFM from
+which fieldmatch
is based on. While the semantic and usage are very
+close, some behaviour and options names can differ.
+
+
The decimate filter currently only works for constant frame rate input.
+Do not use fieldmatch
and decimate if your input has mixed
+telecined and progressive content with changing framerate.
+
+
The filter accepts the following options:
+
+
+order
+Specify the assumed field order of the input stream. Available values are:
+
+
+‘auto ’
+Auto detect parity (use FFmpeg’s internal parity value).
+
+‘bff ’
+Assume bottom field first.
+
+‘tff ’
+Assume top field first.
+
+
+
+Note that it is sometimes recommended not to trust the parity announced by the
+stream.
+
+Default value is auto .
+
+
+mode
+Set the matching mode or strategy to use. pc mode is the safest in the
+sense that it won’t risk creating jerkiness due to duplicate frames when
+possible, but if there are bad edits or blended fields it will end up
+outputting combed frames when a good match might actually exist. On the other
+hand, pcn_ub mode is the most risky in terms of creating jerkiness,
+but will almost always find a good frame if there is one. The other values are
+all somewhere in between pc and pcn_ub in terms of risking
+jerkiness and creating duplicate frames versus finding good matches in sections
+with bad edits, orphaned fields, blended fields, etc.
+
+More details about p/c/n/u/b are available in p/c/n/u/b meaning section.
+
+Available values are:
+
+
+‘pc ’
+2-way matching (p/c)
+
+‘pc_n ’
+2-way matching, and trying 3rd match if still combed (p/c + n)
+
+‘pc_u ’
+2-way matching, and trying 3rd match (same order) if still combed (p/c + u)
+
+‘pc_n_ub ’
+2-way matching, trying 3rd match if still combed, and trying 4th/5th matches if
+still combed (p/c + n + u/b)
+
+‘pcn ’
+3-way matching (p/c/n)
+
+‘pcn_ub ’
+3-way matching, and trying 4th/5th matches if all 3 of the original matches are
+detected as combed (p/c/n + u/b)
+
+
+
+The parenthesis at the end indicate the matches that would be used for that
+mode assuming order =tff (and field on auto or
+top ).
+
+In terms of speed pc mode is by far the fastest and pcn_ub is
+the slowest.
+
+Default value is pc_n .
+
+
+ppsrc
+Mark the main input stream as a pre-processed input, and enable the secondary
+input stream as the clean source to pick the fields from. See the filter
+introduction for more details. It is similar to the clip2 feature from
+VFM/TFM.
+
+Default value is 0
(disabled).
+
+
+field
+Set the field to match from. It is recommended to set this to the same value as
+order unless you experience matching failures with that setting. In
+certain circumstances changing the field that is used to match from can have a
+large impact on matching performance. Available values are:
+
+
+‘auto ’
+Automatic (same value as order ).
+
+‘bottom ’
+Match from the bottom field.
+
+‘top ’
+Match from the top field.
+
+
+
+Default value is auto .
+
+
+mchroma
+Set whether or not chroma is included during the match comparisons. In most
+cases it is recommended to leave this enabled. You should set this to 0
+only if your clip has bad chroma problems such as heavy rainbowing or other
+artifacts. Setting this to 0
could also be used to speed things up at
+the cost of some accuracy.
+
+Default value is 1
.
+
+
+y0
+y1
+These define an exclusion band which excludes the lines between y0 and
+y1 from being included in the field matching decision. An exclusion
+band can be used to ignore subtitles, a logo, or other things that may
+interfere with the matching. y0 sets the starting scan line and
+y1 sets the ending line; all lines in between y0 and
+y1 (including y0 and y1 ) will be ignored. Setting
+y0 and y1 to the same value will disable the feature.
+y0 and y1 defaults to 0
.
+
+
+scthresh
+Set the scene change detection threshold as a percentage of maximum change on
+the luma plane. Good values are in the [8.0, 14.0]
range. Scene change
+detection is only relevant in case combmatch =sc . The range for
+scthresh is [0.0, 100.0]
.
+
+Default value is 12.0
.
+
+
+combmatch
+When combatch is not none , fieldmatch
will take into
+account the combed scores of matches when deciding what match to use as the
+final match. Available values are:
+
+
+‘none ’
+No final matching based on combed scores.
+
+‘sc ’
+Combed scores are only used when a scene change is detected.
+
+‘full ’
+Use combed scores all the time.
+
+
+
+Default is sc .
+
+
+combdbg
+Force fieldmatch
to calculate the combed metrics for certain matches and
+print them. This setting is known as micout in TFM/VFM vocabulary.
+Available values are:
+
+
+‘none ’
+No forced calculation.
+
+‘pcn ’
+Force p/c/n calculations.
+
+‘pcnub ’
+Force p/c/n/u/b calculations.
+
+
+
+Default value is none .
+
+
+cthresh
+This is the area combing threshold used for combed frame detection. This
+essentially controls how "strong" or "visible" combing must be to be detected.
+Larger values mean combing must be more visible and smaller values mean combing
+can be less visible or strong and still be detected. Valid settings are from
+-1
(every pixel will be detected as combed) to 255
(no pixel will
+be detected as combed). This is basically a pixel difference value. A good
+range is [8, 12]
.
+
+Default value is 9
.
+
+
+chroma
+Sets whether or not chroma is considered in the combed frame decision. Only
+disable this if your source has chroma problems (rainbowing, etc.) that are
+causing problems for the combed frame detection with chroma enabled. Actually,
+using chroma =0 is usually more reliable, except for the case
+where there is chroma only combing in the source.
+
+Default value is 0
.
+
+
+blockx
+blocky
+Respectively set the x-axis and y-axis size of the window used during combed
+frame detection. This has to do with the size of the area in which
+combpel pixels are required to be detected as combed for a frame to be
+declared combed. See the combpel parameter description for more info.
+Possible values are any number that is a power of 2 starting at 4 and going up
+to 512.
+
+Default value is 16
.
+
+
+combpel
+The number of combed pixels inside any of the blocky by
+blockx size blocks on the frame for the frame to be detected as
+combed. While cthresh controls how "visible" the combing must be, this
+setting controls "how much" combing there must be in any localized area (a
+window defined by the blockx and blocky settings) on the
+frame. Minimum value is 0
and maximum is blocky x blockx
(at
+which point no frames will ever be detected as combed). This setting is known
+as MI in TFM/VFM vocabulary.
+
+Default value is 80
.
+
+
+
+
+
30.31.1 p/c/n/u/b meaning# TOC
+
+
+
30.31.1.1 p/c/n# TOC
+
+
We assume the following telecined stream:
+
+
+
Top fields: 1 2 2 3 4
+Bottom fields: 1 2 3 4 4
+
+
+
The numbers correspond to the progressive frame the fields relate to. Here, the
+first two frames are progressive, the 3rd and 4th are combed, and so on.
+
+
When fieldmatch
is configured to run a matching from bottom
+(field =bottom ) this is how this input stream get transformed:
+
+
+
Input stream:
+ T 1 2 2 3 4
+ B 1 2 3 4 4 <-- matching reference
+
+Matches: c c n n c
+
+Output stream:
+ T 1 2 3 4 4
+ B 1 2 3 4 4
+
+
+
As a result of the field matching, we can see that some frames get duplicated.
+To perform a complete inverse telecine, you need to rely on a decimation filter
+after this operation. See for instance the decimate filter.
+
+
The same operation now matching from top fields (field =top )
+looks like this:
+
+
+
Input stream:
+ T 1 2 2 3 4 <-- matching reference
+ B 1 2 3 4 4
+
+Matches: c c p p c
+
+Output stream:
+ T 1 2 2 3 4
+ B 1 2 2 3 4
+
+
+
In these examples, we can see what p , c and n mean;
+basically, they refer to the frame and field of the opposite parity:
+
+
+ p matches the field of the opposite parity in the previous frame
+ c matches the field of the opposite parity in the current frame
+ n matches the field of the opposite parity in the next frame
+
+
+
+
30.31.1.2 u/b# TOC
+
+
The u and b matching are a bit special in the sense that they match
+from the opposite parity flag. In the following examples, we assume that we are
+currently matching the 2nd frame (Top:2, bottom:2). According to the match, a
+’x’ is placed above and below each matched fields.
+
+
With bottom matching (field =bottom ):
+
+
Match: c p n b u
+
+ x x x x x
+ Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
+ Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
+ x x x x x
+
+Output frames:
+ 2 1 2 2 2
+ 2 2 2 1 3
+
+
+
With top matching (field =top ):
+
+
Match: c p n b u
+
+ x x x x x
+ Top 1 2 2 1 2 2 1 2 2 1 2 2 1 2 2
+ Bottom 1 2 3 1 2 3 1 2 3 1 2 3 1 2 3
+ x x x x x
+
+Output frames:
+ 2 2 2 1 2
+ 2 1 3 2 2
+
+
+
+
30.31.2 Examples# TOC
+
+
Simple IVTC of a top field first telecined stream:
+
+
fieldmatch=order=tff:combmatch=none, decimate
+
+
+
Advanced IVTC, with fallback on yadif for still combed frames:
+
+
fieldmatch=order=tff:combmatch=full, yadif=deint=interlaced, decimate
+
+
+
+
30.32 fieldorder# TOC
+
+
Transform the field order of the input video.
+
+
It accepts the following parameters:
+
+
+order
+The output field order. Valid values are tff for top field first or bff
+for bottom field first.
+
+
+
+
The default value is ‘tff ’.
+
+
The transformation is done by shifting the picture content up or down
+by one line, and filling the remaining line with appropriate picture content.
+This method is consistent with most broadcast field order converters.
+
+
If the input video is not flagged as being interlaced, or it is already
+flagged as being of the required output field order, then this filter does
+not alter the incoming video.
+
+
It is very useful when converting to or from PAL DV material,
+which is bottom field first.
+
+
For example:
+
+
ffmpeg -i in.vob -vf "fieldorder=bff" out.dv
+
+
+
+
30.33 fifo# TOC
+
+
Buffer input images and send them when they are requested.
+
+
It is mainly useful when auto-inserted by the libavfilter
+framework.
+
+
It does not take parameters.
+
+
+
30.34 format# TOC
+
+
Convert the input video to one of the specified pixel formats.
+Libavfilter will try to pick one that is suitable as input to
+the next filter.
+
+
It accepts the following parameters:
+
+pix_fmts
+A ’|’-separated list of pixel format names, such as
+"pix_fmts=yuv420p|monow|rgb24".
+
+
+
+
+
+
30.34.1 Examples# TOC
+
+
+
+
+
30.35 fps# TOC
+
+
Convert the video to specified constant frame rate by duplicating or dropping
+frames as necessary.
+
+
It accepts the following parameters:
+
+fps
+The desired output frame rate. The default is 25
.
+
+
+round
+Rounding method.
+
+Possible values are:
+
+zero
+zero round towards 0
+
+inf
+round away from 0
+
+down
+round towards -infinity
+
+up
+round towards +infinity
+
+near
+round to nearest
+
+
+The default is near
.
+
+
+start_time
+Assume the first PTS should be the given value, in seconds. This allows for
+padding/trimming at the start of stream. By default, no assumption is made
+about the first frame’s expected PTS, so no padding or trimming is done.
+For example, this could be set to 0 to pad the beginning with duplicates of
+the first frame if a video stream starts after the audio stream or to trim any
+frames with a negative PTS.
+
+
+
+
+
Alternatively, the options can be specified as a flat string:
+fps [:round ].
+
+
See also the setpts filter.
+
+
+
30.35.1 Examples# TOC
+
+
+ A typical usage in order to set the fps to 25:
+
+
+ Sets the fps to 24, using abbreviation and rounding method to round to nearest:
+
+
fps=fps=film:round=near
+
+
+
+
+
30.36 framepack# TOC
+
+
Pack two different video streams into a stereoscopic video, setting proper
+metadata on supported codecs. The two views should have the same size and
+framerate and processing will stop when the shorter video ends. Please note
+that you may conveniently adjust view properties with the scale and
+fps filters.
+
+
It accepts the following parameters:
+
+format
+The desired packing format. Supported values are:
+
+
+sbs
+The views are next to each other (default).
+
+
+tab
+The views are on top of each other.
+
+
+lines
+The views are packed by line.
+
+
+columns
+The views are packed by column.
+
+
+frameseq
+The views are temporally interleaved.
+
+
+
+
+
+
+
+
Some examples:
+
+
+
# Convert left and right views into a frame-sequential video
+ffmpeg -i LEFT -i RIGHT -filter_complex framepack=frameseq OUTPUT
+
+# Convert views into a side-by-side video with the same output resolution as the input
+ffmpeg -i LEFT -i RIGHT -filter_complex [0:v]scale=w=iw/2[left],[1:v]scale=w=iw/2[right],[left][right]framepack=sbs OUTPUT
+
+
+
+
30.37 framestep# TOC
+
+
Select one frame every N-th frame.
+
+
This filter accepts the following option:
+
+step
+Select frame after every step
frames.
+Allowed values are positive integers higher than 0. Default value is 1
.
+
+
+
+
+
30.38 frei0r# TOC
+
+
Apply a frei0r effect to the input video.
+
+
To enable the compilation of this filter, you need to install the frei0r
+header and configure FFmpeg with --enable-frei0r
.
+
+
It accepts the following parameters:
+
+
+filter_name
+The name of the frei0r effect to load. If the environment variable
+FREI0R_PATH
is defined, the frei0r effect is searched for in each of the
+directories specified by the colon-separated list in FREIOR_PATH
.
+Otherwise, the standard frei0r paths are searched, in this order:
+HOME/.frei0r-1/lib/ , /usr/local/lib/frei0r-1/ ,
+/usr/lib/frei0r-1/ .
+
+
+filter_params
+A ’|’-separated list of parameters to pass to the frei0r effect.
+
+
+
+
+
A frei0r effect parameter can be a boolean (its value is either
+"y" or "n"), a double, a color (specified as
+R /G /B , where R , G , and B are floating point
+numbers between 0.0 and 1.0, inclusive) or by a color description specified in the "Color"
+section in the ffmpeg-utils manual), a position (specified as X /Y , where
+X and Y are floating point numbers) and/or a string.
+
+
The number and types of parameters depend on the loaded effect. If an
+effect parameter is not specified, the default value is set.
+
+
+
30.38.1 Examples# TOC
+
+
+ Apply the distort0r effect, setting the first two double parameters:
+
+
frei0r=filter_name=distort0r:filter_params=0.5|0.01
+
+
+ Apply the colordistance effect, taking a color as the first parameter:
+
+
frei0r=colordistance:0.2/0.3/0.4
+frei0r=colordistance:violet
+frei0r=colordistance:0x112233
+
+
+ Apply the perspective effect, specifying the top left and top right image
+positions:
+
+
frei0r=perspective:0.2/0.2|0.8/0.2
+
+
+
+
For more information, see
+http://frei0r.dyne.org
+
+
+
30.39 fspp# TOC
+
+
Apply fast and simple postprocessing. It is a faster version of spp .
+
+
It splits (I)DCT into horizontal/vertical passes. Unlike the simple post-
+processing filter, one of them is performed once per block, not per pixel.
+This allows for much higher speed.
+
+
The filter accepts the following options:
+
+
+quality
+Set quality. This option defines the number of levels for averaging. It accepts
+an integer in the range 4-5. Default value is 4
.
+
+
+qp
+Force a constant quantization parameter. It accepts an integer in range 0-63.
+If not set, the filter will use the QP from the video stream (if available).
+
+
+strength
+Set filter strength. It accepts an integer in range -15 to 32. Lower values mean
+more details but also more artifacts, while higher values make the image smoother
+but also blurrier. Default value is 0
− PSNR optimal.
+
+
+use_bframe_qp
+Enable the use of the QP from the B-Frames if set to 1
. Using this
+option may cause flicker since the B-Frames have often larger QP. Default is
+0
(not enabled).
+
+
+
+
+
+
30.40 geq# TOC
+
+
The filter accepts the following options:
+
+
+lum_expr, lum
+Set the luminance expression.
+
+cb_expr, cb
+Set the chrominance blue expression.
+
+cr_expr, cr
+Set the chrominance red expression.
+
+alpha_expr, a
+Set the alpha expression.
+
+red_expr, r
+Set the red expression.
+
+green_expr, g
+Set the green expression.
+
+blue_expr, b
+Set the blue expression.
+
+
+
+
The colorspace is selected according to the specified options. If one
+of the lum_expr , cb_expr , or cr_expr
+options is specified, the filter will automatically select a YCbCr
+colorspace. If one of the red_expr , green_expr , or
+blue_expr options is specified, it will select an RGB
+colorspace.
+
+
If one of the chrominance expression is not defined, it falls back on the other
+one. If no alpha expression is specified it will evaluate to opaque value.
+If none of chrominance expressions are specified, they will evaluate
+to the luminance expression.
+
+
The expressions can use the following variables and functions:
+
+
+N
+The sequential number of the filtered frame, starting from 0
.
+
+
+X
+Y
+The coordinates of the current sample.
+
+
+W
+H
+The width and height of the image.
+
+
+SW
+SH
+Width and height scale depending on the currently filtered plane. It is the
+ratio between the corresponding luma plane number of pixels and the current
+plane ones. E.g. for YUV4:2:0 the values are 1,1
for the luma plane, and
+0.5,0.5
for chroma planes.
+
+
+T
+Time of the current frame, expressed in seconds.
+
+
+p(x, y)
+Return the value of the pixel at location (x ,y ) of the current
+plane.
+
+
+lum(x, y)
+Return the value of the pixel at location (x ,y ) of the luminance
+plane.
+
+
+cb(x, y)
+Return the value of the pixel at location (x ,y ) of the
+blue-difference chroma plane. Return 0 if there is no such plane.
+
+
+cr(x, y)
+Return the value of the pixel at location (x ,y ) of the
+red-difference chroma plane. Return 0 if there is no such plane.
+
+
+r(x, y)
+g(x, y)
+b(x, y)
+Return the value of the pixel at location (x ,y ) of the
+red/green/blue component. Return 0 if there is no such component.
+
+
+alpha(x, y)
+Return the value of the pixel at location (x ,y ) of the alpha
+plane. Return 0 if there is no such plane.
+
+
+
+
For functions, if x and y are outside the area, the value will be
+automatically clipped to the closer edge.
+
+
+
30.40.1 Examples# TOC
+
+
+ Flip the image horizontally:
+
+
+ Generate a bidimensional sine wave, with angle PI/3
and a
+wavelength of 100 pixels:
+
+
geq=128 + 100*sin(2*(PI/100)*(cos(PI/3)*(X-50*T) + sin(PI/3)*Y)):128:128
+
+
+ Generate a fancy enigmatic moving light:
+
+
nullsrc=s=256x256,geq=random(1)/hypot(X-cos(N*0.07)*W/2-W/2\,Y-sin(N*0.09)*H/2-H/2)^2*1000000*sin(N*0.02):128:128
+
+
+ Generate a quick emboss effect:
+
+
format=gray,geq=lum_expr='(p(X,Y)+(256-p(X-4,Y-4)))/2'
+
+
+ Modify RGB components depending on pixel position:
+
+
geq=r='X/W*r(X,Y)':g='(1-X/W)*g(X,Y)':b='(H-Y)/H*b(X,Y)'
+
+
+ Create a radial gradient that is the same size as the input (also see
+the vignette filter):
+
+
geq=lum=255*gauss((X/W-0.5)*3)*gauss((Y/H-0.5)*3)/gauss(0)/gauss(0),format=gray
+
+
+ Create a linear gradient to use as a mask for another filter, then
+compose with overlay . In this example the video will gradually
+become more blurry from the top to the bottom of the y-axis as defined
+by the linear gradient:
+
+
ffmpeg -i input.mp4 -filter_complex "geq=lum=255*(Y/H),format=gray[grad];[0:v]boxblur=4[blur];[blur][grad]alphamerge[alpha];[0:v][alpha]overlay" output.mp4
+
+
+
+
+
30.41 gradfun# TOC
+
+
Fix the banding artifacts that are sometimes introduced into nearly flat
+regions by truncation to 8bit color depth.
+Interpolate the gradients that should go where the bands are, and
+dither them.
+
+
It is designed for playback only. Do not use it prior to
+lossy compression, because compression tends to lose the dither and
+bring back the bands.
+
+
It accepts the following parameters:
+
+
+strength
+The maximum amount by which the filter will change any one pixel. This is also
+the threshold for detecting nearly flat regions. Acceptable values range from
+.51 to 64; the default value is 1.2. Out-of-range values will be clipped to the
+valid range.
+
+
+radius
+The neighborhood to fit the gradient to. A larger radius makes for smoother
+gradients, but also prevents the filter from modifying the pixels near detailed
+regions. Acceptable values are 8-32; the default value is 16. Out-of-range
+values will be clipped to the valid range.
+
+
+
+
+
Alternatively, the options can be specified as a flat string:
+strength [:radius ]
+
+
+
30.41.1 Examples# TOC
+
+
+ Apply the filter with a 3.5
strength and radius of 8
:
+
+
+ Specify radius, omitting the strength (which will fall-back to the default
+value):
+
+
+
+
+
+
30.42 haldclut# TOC
+
+
Apply a Hald CLUT to a video stream.
+
+
First input is the video stream to process, and second one is the Hald CLUT.
+The Hald CLUT input can be a simple picture or a complete video stream.
+
+
The filter accepts the following options:
+
+
+shortest
+Force termination when the shortest input terminates. Default is 0
.
+
+repeatlast
+Continue applying the last CLUT after the end of the stream. A value of
+0
disable the filter after the last frame of the CLUT is reached.
+Default is 1
.
+
+
+
+
haldclut
also has the same interpolation options as lut3d (both
+filters share the same internals).
+
+
More information about the Hald CLUT can be found on Eskil Steenberg’s website
+(Hald CLUT author) at http://www.quelsolaar.com/technology/clut.html .
+
+
+
30.42.1 Workflow examples# TOC
+
+
+
30.42.1.1 Hald CLUT video stream# TOC
+
+
Generate an identity Hald CLUT stream altered with various effects:
+
+
ffmpeg -f lavfi -i haldclutsrc =8 -vf "hue=H=2*PI*t:s=sin(2*PI*t)+1, curves=cross_process" -t 10 -c:v ffv1 clut.nut
+
+
+
Note: make sure you use a lossless codec.
+
+
Then use it with haldclut
to apply it on some random stream:
+
+
ffmpeg -f lavfi -i mandelbrot -i clut.nut -filter_complex '[0][1] haldclut' -t 20 mandelclut.mkv
+
+
+
The Hald CLUT will be applied to the 10 first seconds (duration of
+clut.nut ), then the latest picture of that CLUT stream will be applied
+to the remaining frames of the mandelbrot
stream.
+
+
+
30.42.1.2 Hald CLUT with preview# TOC
+
+
A Hald CLUT is supposed to be a squared image of Level*Level*Level
by
+Level*Level*Level
pixels. For a given Hald CLUT, FFmpeg will select the
+biggest possible square starting at the top left of the picture. The remaining
+padding pixels (bottom or right) will be ignored. This area can be used to add
+a preview of the Hald CLUT.
+
+
Typically, the following generated Hald CLUT will be supported by the
+haldclut
filter:
+
+
+
ffmpeg -f lavfi -i haldclutsrc =8 -vf "
+ pad=iw+320 [padded_clut];
+ smptebars=s=320x256, split [a][b];
+ [padded_clut][a] overlay=W-320:h, curves=color_negative [main];
+ [main][b] overlay=W-320" -frames:v 1 clut.png
+
+
+
It contains the original and a preview of the effect of the CLUT: SMPTE color
+bars are displayed on the right-top, and below the same color bars processed by
+the color changes.
+
+
Then, the effect of this Hald CLUT can be visualized with:
+
+
ffplay input.mkv -vf "movie=clut.png, [in] haldclut"
+
+
+
+
30.43 hflip# TOC
+
+
Flip the input video horizontally.
+
+
For example, to horizontally flip the input video with ffmpeg
:
+
+
ffmpeg -i in.avi -vf "hflip" out.avi
+
+
+
+
30.44 histeq# TOC
+
This filter applies a global color histogram equalization on a
+per-frame basis.
+
+
It can be used to correct video that has a compressed range of pixel
+intensities. The filter redistributes the pixel intensities to
+equalize their distribution across the intensity range. It may be
+viewed as an "automatically adjusting contrast filter". This filter is
+useful only for correcting degraded or poorly captured source
+video.
+
+
The filter accepts the following options:
+
+
+strength
+Determine the amount of equalization to be applied. As the strength
+is reduced, the distribution of pixel intensities more-and-more
+approaches that of the input frame. The value must be a float number
+in the range [0,1] and defaults to 0.200.
+
+
+intensity
+Set the maximum intensity that can generated and scale the output
+values appropriately. The strength should be set as desired and then
+the intensity can be limited if needed to avoid washing-out. The value
+must be a float number in the range [0,1] and defaults to 0.210.
+
+
+antibanding
+Set the antibanding level. If enabled the filter will randomly vary
+the luminance of output pixels by a small amount to avoid banding of
+the histogram. Possible values are none
, weak
or
+strong
. It defaults to none
.
+
+
+
+
+
30.45 histogram# TOC
+
+
Compute and draw a color distribution histogram for the input video.
+
+
The computed histogram is a representation of the color component
+distribution in an image.
+
+
The filter accepts the following options:
+
+
+mode
+Set histogram mode.
+
+It accepts the following values:
+
+‘levels ’
+Standard histogram that displays the color components distribution in an
+image. Displays color graph for each color component. Shows distribution of
+the Y, U, V, A or R, G, B components, depending on input format, in the
+current frame. Below each graph a color component scale meter is shown.
+
+
+‘color ’
+Displays chroma values (U/V color placement) in a two dimensional
+graph (which is called a vectorscope). The brighter a pixel in the
+vectorscope, the more pixels of the input frame correspond to that pixel
+(i.e., more pixels have this chroma value). The V component is displayed on
+the horizontal (X) axis, with the leftmost side being V = 0 and the rightmost
+side being V = 255. The U component is displayed on the vertical (Y) axis,
+with the top representing U = 0 and the bottom representing U = 255.
+
+The position of a white pixel in the graph corresponds to the chroma value of
+a pixel of the input clip. The graph can therefore be used to read the hue
+(color flavor) and the saturation (the dominance of the hue in the color). As
+the hue of a color changes, it moves around the square. At the center of the
+square the saturation is zero, which means that the corresponding pixel has no
+color. If the amount of a specific color is increased (while leaving the other
+colors unchanged) the saturation increases, and the indicator moves towards
+the edge of the square.
+
+
+‘color2 ’
+Chroma values in vectorscope, similar as color
but actual chroma values
+are displayed.
+
+
+‘waveform ’
+Per row/column color component graph. In row mode, the graph on the left side
+represents color component value 0 and the right side represents value = 255.
+In column mode, the top side represents color component value = 0 and bottom
+side represents value = 255.
+
+
+Default value is levels
.
+
+
+level_height
+Set height of level in levels
. Default value is 200
.
+Allowed range is [50, 2048].
+
+
+scale_height
+Set height of color scale in levels
. Default value is 12
.
+Allowed range is [0, 40].
+
+
+step
+Set step for waveform
mode. Smaller values are useful to find out how
+many values of the same luminance are distributed across input rows/columns.
+Default value is 10
. Allowed range is [1, 255].
+
+
+waveform_mode
+Set mode for waveform
. Can be either row
, or column
.
+Default is row
.
+
+
+waveform_mirror
+Set mirroring mode for waveform
. 0
means unmirrored, 1
+means mirrored. In mirrored mode, higher values will be represented on the left
+side for row
mode and at the top for column
mode. Default is
+0
(unmirrored).
+
+
+display_mode
+Set display mode for waveform
and levels
.
+It accepts the following values:
+
+‘parade ’
+Display separate graph for the color components side by side in
+row
waveform mode or one below the other in column
waveform mode
+for waveform
histogram mode. For levels
histogram mode,
+per color component graphs are placed below each other.
+
+Using this display mode in waveform
histogram mode makes it easy to
+spot color casts in the highlights and shadows of an image, by comparing the
+contours of the top and the bottom graphs of each waveform. Since whites,
+grays, and blacks are characterized by exactly equal amounts of red, green,
+and blue, neutral areas of the picture should display three waveforms of
+roughly equal width/height. If not, the correction is easy to perform by
+making level adjustments the three waveforms.
+
+
+‘overlay ’
+Presents information identical to that in the parade
, except
+that the graphs representing color components are superimposed directly
+over one another.
+
+This display mode in waveform
histogram mode makes it easier to spot
+relative differences or similarities in overlapping areas of the color
+components that are supposed to be identical, such as neutral whites, grays,
+or blacks.
+
+
+Default is parade
.
+
+
+levels_mode
+Set mode for levels
. Can be either linear
, or logarithmic
.
+Default is linear
.
+
+
+
+
+
30.45.1 Examples# TOC
+
+
+ Calculate and draw histogram:
+
+
ffplay -i input -vf histogram
+
+
+
+
+
+
30.46 hqdn3d# TOC
+
+
This is a high precision/quality 3d denoise filter. It aims to reduce
+image noise, producing smooth images and making still images really
+still. It should enhance compressibility.
+
+
It accepts the following optional parameters:
+
+
+luma_spatial
+A non-negative floating point number which specifies spatial luma strength.
+It defaults to 4.0.
+
+
+chroma_spatial
+A non-negative floating point number which specifies spatial chroma strength.
+It defaults to 3.0*luma_spatial /4.0.
+
+
+luma_tmp
+A floating point number which specifies luma temporal strength. It defaults to
+6.0*luma_spatial /4.0.
+
+
+chroma_tmp
+A floating point number which specifies chroma temporal strength. It defaults to
+luma_tmp *chroma_spatial /luma_spatial .
+
+
+
+
+
30.47 hqx# TOC
+
+
Apply a high-quality magnification filter designed for pixel art. This filter
+was originally created by Maxim Stepin.
+
+
It accepts the following option:
+
+
+n
+Set the scaling dimension: 2
for hq2x
, 3
for
+hq3x
and 4
for hq4x
.
+Default is 3
.
+
+
+
+
+
30.48 hue# TOC
+
+
Modify the hue and/or the saturation of the input.
+
+
It accepts the following parameters:
+
+
+h
+Specify the hue angle as a number of degrees. It accepts an expression,
+and defaults to "0".
+
+
+s
+Specify the saturation in the [-10,10] range. It accepts an expression and
+defaults to "1".
+
+
+H
+Specify the hue angle as a number of radians. It accepts an
+expression, and defaults to "0".
+
+
+b
+Specify the brightness in the [-10,10] range. It accepts an expression and
+defaults to "0".
+
+
+
+
h and H are mutually exclusive, and can’t be
+specified at the same time.
+
+
The b , h , H and s option values are
+expressions containing the following constants:
+
+
+n
+frame count of the input frame starting from 0
+
+
+pts
+presentation timestamp of the input frame expressed in time base units
+
+
+r
+frame rate of the input video, NAN if the input frame rate is unknown
+
+
+t
+timestamp expressed in seconds, NAN if the input timestamp is unknown
+
+
+tb
+time base of the input video
+
+
+
+
+
30.48.1 Examples# TOC
+
+
+
+
+
30.48.2 Commands# TOC
+
+
This filter supports the following commands:
+
+b
+s
+h
+H
+Modify the hue and/or the saturation and/or brightness of the input video.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+
+
+
+
30.49 idet# TOC
+
+
Detect video interlacing type.
+
+
This filter tries to detect if the input frames as interlaced, progressive,
+top or bottom field first. It will also try and detect fields that are
+repeated between adjacent frames (a sign of telecine).
+
+
Single frame detection considers only immediately adjacent frames when classifying each frame.
+Multiple frame detection incorporates the classification history of previous frames.
+
+
The filter will log these metadata values:
+
+
+single.current_frame
+Detected type of current frame using single-frame detection. One of:
+“tff” (top field first), “bff” (bottom field first),
+“progressive”, or “undetermined”
+
+
+single.tff
+Cumulative number of frames detected as top field first using single-frame detection.
+
+
+multiple.tff
+Cumulative number of frames detected as top field first using multiple-frame detection.
+
+
+single.bff
+Cumulative number of frames detected as bottom field first using single-frame detection.
+
+
+multiple.current_frame
+Detected type of current frame using multiple-frame detection. One of:
+“tff” (top field first), “bff” (bottom field first),
+“progressive”, or “undetermined”
+
+
+multiple.bff
+Cumulative number of frames detected as bottom field first using multiple-frame detection.
+
+
+single.progressive
+Cumulative number of frames detected as progressive using single-frame detection.
+
+
+multiple.progressive
+Cumulative number of frames detected as progressive using multiple-frame detection.
+
+
+single.undetermined
+Cumulative number of frames that could not be classified using single-frame detection.
+
+
+multiple.undetermined
+Cumulative number of frames that could not be classified using multiple-frame detection.
+
+
+repeated.current_frame
+Which field in the current frame is repeated from the last. One of “neither”, “top”, or “bottom”.
+
+
+repeated.neither
+Cumulative number of frames with no repeated field.
+
+
+repeated.top
+Cumulative number of frames with the top field repeated from the previous frame’s top field.
+
+
+repeated.bottom
+Cumulative number of frames with the bottom field repeated from the previous frame’s bottom field.
+
+
+
+
The filter accepts the following options:
+
+
+intl_thres
+Set interlacing threshold.
+
+prog_thres
+Set progressive threshold.
+
+repeat_thres
+Threshold for repeated field detection.
+
+half_life
+Number of frames after which a given frame’s contribution to the
+statistics is halved (i.e., it contributes only 0.5 to it’s
+classification). The default of 0 means that all frames seen are given
+full weight of 1.0 forever.
+
+analyze_interlaced_flag
+When this is not 0 then idet will use the specified number of frames to determine
+if the interlaced flag is accurate, it will not count undetermined frames.
+If the flag is found to be accurate it will be used without any further
+computations, if it is found to be inaccuarte it will be cleared without any
+further computations. This allows inserting the idet filter as a low computational
+method to clean up the interlaced flag
+
+
+
+
+
30.50 il# TOC
+
+
Deinterleave or interleave fields.
+
+
This filter allows one to process interlaced images fields without
+deinterlacing them. Deinterleaving splits the input frame into 2
+fields (so called half pictures). Odd lines are moved to the top
+half of the output image, even lines to the bottom half.
+You can process (filter) them independently and then re-interleave them.
+
+
The filter accepts the following options:
+
+
+luma_mode, l
+chroma_mode, c
+alpha_mode, a
+Available values for luma_mode , chroma_mode and
+alpha_mode are:
+
+
+‘none ’
+Do nothing.
+
+
+‘deinterleave, d ’
+Deinterleave fields, placing one above the other.
+
+
+‘interleave, i ’
+Interleave fields. Reverse the effect of deinterleaving.
+
+
+Default value is none
.
+
+
+luma_swap, ls
+chroma_swap, cs
+alpha_swap, as
+Swap luma/chroma/alpha fields. Exchange even & odd lines. Default value is 0
.
+
+
+
+
+
30.51 interlace# TOC
+
+
Simple interlacing filter from progressive contents. This interleaves upper (or
+lower) lines from odd frames with lower (or upper) lines from even frames,
+halving the frame rate and preserving image height.
+
+
+
Original Original New Frame
+ Frame 'j' Frame 'j+1' (tff)
+ ========== =========== ==================
+ Line 0 --------------------> Frame 'j' Line 0
+ Line 1 Line 1 ----> Frame 'j+1' Line 1
+ Line 2 ---------------------> Frame 'j' Line 2
+ Line 3 Line 3 ----> Frame 'j+1' Line 3
+ ... ... ...
+New Frame + 1 will be generated by Frame 'j+2' and Frame 'j+3' and so on
+
+
+
It accepts the following optional parameters:
+
+
+scan
+This determines whether the interlaced frame is taken from the even
+(tff - default) or odd (bff) lines of the progressive frame.
+
+
+lowpass
+Enable (default) or disable the vertical lowpass filter to avoid twitter
+interlacing and reduce moire patterns.
+
+
+
+
+
30.52 kerndeint# TOC
+
+
Deinterlace input video by applying Donald Graft’s adaptive kernel
+deinterling. Work on interlaced parts of a video to produce
+progressive frames.
+
+
The description of the accepted parameters follows.
+
+
+thresh
+Set the threshold which affects the filter’s tolerance when
+determining if a pixel line must be processed. It must be an integer
+in the range [0,255] and defaults to 10. A value of 0 will result in
+applying the process on every pixels.
+
+
+map
+Paint pixels exceeding the threshold value to white if set to 1.
+Default is 0.
+
+
+order
+Set the fields order. Swap fields if set to 1, leave fields alone if
+0. Default is 0.
+
+
+sharp
+Enable additional sharpening if set to 1. Default is 0.
+
+
+twoway
+Enable twoway sharpening if set to 1. Default is 0.
+
+
+
+
+
30.52.1 Examples# TOC
+
+
+ Apply default values:
+
+
kerndeint=thresh=10:map=0:order=0:sharp=0:twoway=0
+
+
+ Enable additional sharpening:
+
+
+ Paint processed pixels in white:
+
+
+
+
+
30.53 lenscorrection# TOC
+
+
Correct radial lens distortion
+
+
This filter can be used to correct for radial distortion as can result from the use
+of wide angle lenses, and thereby re-rectify the image. To find the right parameters
+one can use tools available for example as part of opencv or simply trial-and-error.
+To use opencv use the calibration sample (under samples/cpp) from the opencv sources
+and extract the k1 and k2 coefficients from the resulting matrix.
+
+
Note that effectively the same filter is available in the open-source tools Krita and
+Digikam from the KDE project.
+
+
In contrast to the vignette filter, which can also be used to compensate lens errors,
+this filter corrects the distortion of the image, whereas vignette corrects the
+brightness distribution, so you may want to use both filters together in certain
+cases, though you will have to take care of ordering, i.e. whether vignetting should
+be applied before or after lens correction.
+
+
+
30.53.1 Options# TOC
+
+
The filter accepts the following options:
+
+
+cx
+Relative x-coordinate of the focal point of the image, and thereby the center of the
+distortion. This value has a range [0,1] and is expressed as fractions of the image
+width.
+
+cy
+Relative y-coordinate of the focal point of the image, and thereby the center of the
+distortion. This value has a range [0,1] and is expressed as fractions of the image
+height.
+
+k1
+Coefficient of the quadratic correction term. 0.5 means no correction.
+
+k2
+Coefficient of the double quadratic correction term. 0.5 means no correction.
+
+
+
+
The formula that generates the correction is:
+
+
r_src = r_tgt * (1 + k1 * (r_tgt / r_0 )^2 + k2 * (r_tgt / r_0 )^4)
+
+
where r_0 is halve of the image diagonal and r_src and r_tgt are the
+distances from the focal point in the source and target images, respectively.
+
+
+
30.54 lut3d# TOC
+
+
Apply a 3D LUT to an input video.
+
+
The filter accepts the following options:
+
+
+file
+Set the 3D LUT file name.
+
+Currently supported formats:
+
+‘3dl ’
+AfterEffects
+
+‘cube ’
+Iridas
+
+‘dat ’
+DaVinci
+
+‘m3d ’
+Pandora
+
+
+
+interp
+Select interpolation mode.
+
+Available values are:
+
+
+‘nearest ’
+Use values from the nearest defined point.
+
+‘trilinear ’
+Interpolate values using the 8 points defining a cube.
+
+‘tetrahedral ’
+Interpolate values using a tetrahedron.
+
+
+
+
+
+
+
30.55 lut, lutrgb, lutyuv# TOC
+
+
Compute a look-up table for binding each pixel component input value
+to an output value, and apply it to the input video.
+
+
lutyuv applies a lookup table to a YUV input video, lutrgb
+to an RGB input video.
+
+
These filters accept the following parameters:
+
+c0
+set first pixel component expression
+
+c1
+set second pixel component expression
+
+c2
+set third pixel component expression
+
+c3
+set fourth pixel component expression, corresponds to the alpha component
+
+
+r
+set red component expression
+
+g
+set green component expression
+
+b
+set blue component expression
+
+a
+alpha component expression
+
+
+y
+set Y/luminance component expression
+
+u
+set U/Cb component expression
+
+v
+set V/Cr component expression
+
+
+
+
Each of them specifies the expression to use for computing the lookup table for
+the corresponding pixel component values.
+
+
The exact component associated to each of the c* options depends on the
+format in input.
+
+
The lut filter requires either YUV or RGB pixel formats in input,
+lutrgb requires RGB pixel formats in input, and lutyuv requires YUV.
+
+
The expressions can contain the following constants and functions:
+
+
+w
+h
+The input width and height.
+
+
+val
+The input value for the pixel component.
+
+
+clipval
+The input value, clipped to the minval -maxval range.
+
+
+maxval
+The maximum value for the pixel component.
+
+
+minval
+The minimum value for the pixel component.
+
+
+negval
+The negated value for the pixel component value, clipped to the
+minval -maxval range; it corresponds to the expression
+"maxval-clipval+minval".
+
+
+clip(val)
+The computed value in val , clipped to the
+minval -maxval range.
+
+
+gammaval(gamma)
+The computed gamma correction value of the pixel component value,
+clipped to the minval -maxval range. It corresponds to the
+expression
+"pow((clipval-minval)/(maxval-minval)\,gamma )*(maxval-minval)+minval"
+
+
+
+
+
All expressions default to "val".
+
+
+
30.55.1 Examples# TOC
+
+
+
+
+
30.56 mergeplanes# TOC
+
+
Merge color channel components from several video streams.
+
+
The filter accepts up to 4 input streams, and merge selected input
+planes to the output video.
+
+
This filter accepts the following options:
+
+mapping
+Set input to output plane mapping. Default is 0
.
+
+The mappings is specified as a bitmap. It should be specified as a
+hexadecimal number in the form 0xAa[Bb[Cc[Dd]]]. ’Aa’ describes the
+mapping for the first plane of the output stream. ’A’ sets the number of
+the input stream to use (from 0 to 3), and ’a’ the plane number of the
+corresponding input to use (from 0 to 3). The rest of the mappings is
+similar, ’Bb’ describes the mapping for the output stream second
+plane, ’Cc’ describes the mapping for the output stream third plane and
+’Dd’ describes the mapping for the output stream fourth plane.
+
+
+format
+Set output pixel format. Default is yuva444p
.
+
+
+
+
+
30.56.1 Examples# TOC
+
+
+ Merge three gray video streams of same width and height into single video stream:
+
+
[a0][a1][a2]mergeplanes=0x001020:yuv444p
+
+
+ Merge 1st yuv444p stream and 2nd gray video stream into yuva444p video stream:
+
+
[a0][a1]mergeplanes=0x00010210:yuva444p
+
+
+ Swap Y and A plane in yuva444p stream:
+
+
format=yuva444p,mergeplanes=0x03010200:yuva444p
+
+
+ Swap U and V plane in yuv420p stream:
+
+
format=yuv420p,mergeplanes=0x000201:yuv420p
+
+
+ Cast a rgb24 clip to yuv444p:
+
+
format=rgb24,mergeplanes=0x000102:yuv444p
+
+
+
+
+
30.57 mcdeint# TOC
+
+
Apply motion-compensation deinterlacing.
+
+
It needs one field per frame as input and must thus be used together
+with yadif=1/3 or equivalent.
+
+
This filter accepts the following options:
+
+mode
+Set the deinterlacing mode.
+
+It accepts one of the following values:
+
+‘fast ’
+‘medium ’
+‘slow ’
+use iterative motion estimation
+
+‘extra_slow ’
+like ‘slow ’, but use multiple reference frames.
+
+
+Default value is ‘fast ’.
+
+
+parity
+Set the picture field parity assumed for the input video. It must be
+one of the following values:
+
+
+‘0, tff ’
+assume top field first
+
+‘1, bff ’
+assume bottom field first
+
+
+
+Default value is ‘bff ’.
+
+
+qp
+Set per-block quantization parameter (QP) used by the internal
+encoder.
+
+Higher values should result in a smoother motion vector field but less
+optimal individual vectors. Default value is 1.
+
+
+
+
+
30.58 mp# TOC
+
+
Apply an MPlayer filter to the input video.
+
+
This filter provides a wrapper around some of the filters of
+MPlayer/MEncoder.
+
+
This wrapper is considered experimental. Some of the wrapped filters
+may not work properly and we may drop support for them, as they will
+be implemented natively into FFmpeg. Thus you should avoid
+depending on them when writing portable scripts.
+
+
The filter accepts the parameters:
+filter_name [:=]filter_params
+
+
filter_name is the name of a supported MPlayer filter,
+filter_params is a string containing the parameters accepted by
+the named filter.
+
+
The list of the currently supported filters follows:
+
+eq2
+eq
+ilpack
+softpulldown
+
+
+
The parameter syntax and behavior for the listed filters are the same
+of the corresponding MPlayer filters. For detailed instructions check
+the "VIDEO FILTERS" section in the MPlayer manual.
+
+
+
30.58.1 Examples# TOC
+
+
+ Adjust gamma, brightness, contrast:
+
+
+
+
See also mplayer(1), http://www.mplayerhq.hu/ .
+
+
+
30.59 mpdecimate# TOC
+
+
Drop frames that do not differ greatly from the previous frame in
+order to reduce frame rate.
+
+
The main use of this filter is for very-low-bitrate encoding
+(e.g. streaming over dialup modem), but it could in theory be used for
+fixing movies that were inverse-telecined incorrectly.
+
+
A description of the accepted options follows.
+
+
+max
+Set the maximum number of consecutive frames which can be dropped (if
+positive), or the minimum interval between dropped frames (if
+negative). If the value is 0, the frame is dropped unregarding the
+number of previous sequentially dropped frames.
+
+Default value is 0.
+
+
+hi
+lo
+frac
+Set the dropping threshold values.
+
+Values for hi and lo are for 8x8 pixel blocks and
+represent actual pixel value differences, so a threshold of 64
+corresponds to 1 unit of difference for each pixel, or the same spread
+out differently over the block.
+
+A frame is a candidate for dropping if no 8x8 blocks differ by more
+than a threshold of hi , and if no more than frac blocks (1
+meaning the whole image) differ by more than a threshold of lo .
+
+Default value for hi is 64*12, default value for lo is
+64*5, and default value for frac is 0.33.
+
+
+
+
+
+
30.60 negate# TOC
+
+
Negate input video.
+
+
It accepts an integer in input; if non-zero it negates the
+alpha component (if available). The default value in input is 0.
+
+
+
30.61 noformat# TOC
+
+
Force libavfilter not to use any of the specified pixel formats for the
+input to the next filter.
+
+
It accepts the following parameters:
+
+pix_fmts
+A ’|’-separated list of pixel format names, such as
+apix_fmts=yuv420p|monow|rgb24".
+
+
+
+
+
+
30.61.1 Examples# TOC
+
+
+ Force libavfilter to use a format different from yuv420p for the
+input to the vflip filter:
+
+
noformat=pix_fmts=yuv420p,vflip
+
+
+ Convert the input video to any of the formats not contained in the list:
+
+
noformat=yuv420p|yuv444p|yuv410p
+
+
+
+
+
30.62 noise# TOC
+
+
Add noise on video input frame.
+
+
The filter accepts the following options:
+
+
+all_seed
+c0_seed
+c1_seed
+c2_seed
+c3_seed
+Set noise seed for specific pixel component or all pixel components in case
+of all_seed . Default value is 123457
.
+
+
+all_strength, alls
+c0_strength, c0s
+c1_strength, c1s
+c2_strength, c2s
+c3_strength, c3s
+Set noise strength for specific pixel component or all pixel components in case
+all_strength . Default value is 0
. Allowed range is [0, 100].
+
+
+all_flags, allf
+c0_flags, c0f
+c1_flags, c1f
+c2_flags, c2f
+c3_flags, c3f
+Set pixel component flags or set flags for all components if all_flags .
+Available values for component flags are:
+
+‘a ’
+averaged temporal noise (smoother)
+
+‘p ’
+mix random noise with a (semi)regular pattern
+
+‘t ’
+temporal noise (noise pattern changes between frames)
+
+‘u ’
+uniform noise (gaussian otherwise)
+
+
+
+
+
+
+
30.62.1 Examples# TOC
+
+
Add temporal and uniform noise to input video:
+
+
noise=alls=20:allf=t+u
+
+
+
+
30.63 null# TOC
+
+
Pass the video source unchanged to the output.
+
+
+
30.64 ocv# TOC
+
+
Apply a video transform using libopencv.
+
+
To enable this filter, install the libopencv library and headers and
+configure FFmpeg with --enable-libopencv
.
+
+
It accepts the following parameters:
+
+
+filter_name
+The name of the libopencv filter to apply.
+
+
+filter_params
+The parameters to pass to the libopencv filter. If not specified, the default
+values are assumed.
+
+
+
+
+
Refer to the official libopencv documentation for more precise
+information:
+http://docs.opencv.org/master/modules/imgproc/doc/filtering.html
+
+
Several libopencv filters are supported; see the following subsections.
+
+
+
30.64.1 dilate# TOC
+
+
Dilate an image by using a specific structuring element.
+It corresponds to the libopencv function cvDilate
.
+
+
It accepts the parameters: struct_el |nb_iterations .
+
+
struct_el represents a structuring element, and has the syntax:
+cols xrows +anchor_x xanchor_y /shape
+
+
cols and rows represent the number of columns and rows of
+the structuring element, anchor_x and anchor_y the anchor
+point, and shape the shape for the structuring element. shape
+must be "rect", "cross", "ellipse", or "custom".
+
+
If the value for shape is "custom", it must be followed by a
+string of the form "=filename ". The file with name
+filename is assumed to represent a binary image, with each
+printable character corresponding to a bright pixel. When a custom
+shape is used, cols and rows are ignored, the number
+or columns and rows of the read file are assumed instead.
+
+
The default value for struct_el is "3x3+0x0/rect".
+
+
nb_iterations specifies the number of times the transform is
+applied to the image, and defaults to 1.
+
+
Some examples:
+
+
# Use the default values
+ocv=dilate
+
+# Dilate using a structuring element with a 5x5 cross, iterating two times
+ocv=filter_name=dilate:filter_params=5x5+2x2/cross|2
+
+# Read the shape from the file diamond.shape, iterating two times.
+# The file diamond.shape may contain a pattern of characters like this
+# *
+# ***
+# *****
+# ***
+# *
+# The specified columns and rows are ignored
+# but the anchor point coordinates are not
+ocv=dilate:0x0+2x2/custom=diamond.shape|2
+
+
+
+
30.64.2 erode# TOC
+
+
Erode an image by using a specific structuring element.
+It corresponds to the libopencv function cvErode
.
+
+
It accepts the parameters: struct_el :nb_iterations ,
+with the same syntax and semantics as the dilate filter.
+
+
+
30.64.3 smooth# TOC
+
+
Smooth the input video.
+
+
The filter takes the following parameters:
+type |param1 |param2 |param3 |param4 .
+
+
type is the type of smooth filter to apply, and must be one of
+the following values: "blur", "blur_no_scale", "median", "gaussian",
+or "bilateral". The default value is "gaussian".
+
+
The meaning of param1 , param2 , param3 , and param4
+depend on the smooth type. param1 and
+param2 accept integer positive values or 0. param3 and
+param4 accept floating point values.
+
+
The default value for param1 is 3. The default value for the
+other parameters is 0.
+
+
These parameters correspond to the parameters assigned to the
+libopencv function cvSmooth
.
+
+
+
30.65 overlay# TOC
+
+
Overlay one video on top of another.
+
+
It takes two inputs and has one output. The first input is the "main"
+video on which the second input is overlaid.
+
+
It accepts the following parameters:
+
+
A description of the accepted options follows.
+
+
+x
+y
+Set the expression for the x and y coordinates of the overlaid video
+on the main video. Default value is "0" for both expressions. In case
+the expression is invalid, it is set to a huge value (meaning that the
+overlay will not be displayed within the output visible area).
+
+
+eof_action
+The action to take when EOF is encountered on the secondary input; it accepts
+one of the following values:
+
+
+repeat
+Repeat the last frame (the default).
+
+endall
+End both streams.
+
+pass
+Pass the main input through.
+
+
+
+
+eval
+Set when the expressions for x , and y are evaluated.
+
+It accepts the following values:
+
+‘init ’
+only evaluate expressions once during the filter initialization or
+when a command is processed
+
+
+‘frame ’
+evaluate expressions for each incoming frame
+
+
+
+Default value is ‘frame ’.
+
+
+shortest
+If set to 1, force the output to terminate when the shortest input
+terminates. Default value is 0.
+
+
+format
+Set the format for the output video.
+
+It accepts the following values:
+
+‘yuv420 ’
+force YUV420 output
+
+
+‘yuv422 ’
+force YUV422 output
+
+
+‘yuv444 ’
+force YUV444 output
+
+
+‘rgb ’
+force RGB output
+
+
+
+Default value is ‘yuv420 ’.
+
+
+rgb (deprecated)
+If set to 1, force the filter to accept inputs in the RGB
+color space. Default value is 0. This option is deprecated, use
+format instead.
+
+
+repeatlast
+If set to 1, force the filter to draw the last overlay frame over the
+main input until the end of the stream. A value of 0 disables this
+behavior. Default value is 1.
+
+
+
+
The x , and y expressions can contain the following
+parameters.
+
+
+main_w, W
+main_h, H
+The main input width and height.
+
+
+overlay_w, w
+overlay_h, h
+The overlay input width and height.
+
+
+x
+y
+The computed values for x and y . They are evaluated for
+each new frame.
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values of the output
+format. For example for the pixel format "yuv422p" hsub is 2 and
+vsub is 1.
+
+
+n
+the number of input frame, starting from 0
+
+
+pos
+the position in the file of the input frame, NAN if unknown
+
+
+t
+The timestamp, expressed in seconds. It’s NAN if the input timestamp is unknown.
+
+
+
+
+
Note that the n , pos , t variables are available only
+when evaluation is done per frame , and will evaluate to NAN
+when eval is set to ‘init ’.
+
+
Be aware that frames are taken from each input video in timestamp
+order, hence, if their initial timestamps differ, it is a good idea
+to pass the two inputs through a setpts=PTS-STARTPTS filter to
+have them begin in the same zero timestamp, as the example for
+the movie filter does.
+
+
You can chain together more overlays but you should test the
+efficiency of such approach.
+
+
+
30.65.1 Commands# TOC
+
+
This filter supports the following commands:
+
+x
+y
+Modify the x and y of the overlay input.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+
+
+
+
30.65.2 Examples# TOC
+
+
+
+
+
30.66 owdenoise# TOC
+
+
Apply Overcomplete Wavelet denoiser.
+
+
The filter accepts the following options:
+
+
+depth
+Set depth.
+
+Larger depth values will denoise lower frequency components more, but
+slow down filtering.
+
+Must be an int in the range 8-16, default is 8
.
+
+
+luma_strength, ls
+Set luma strength.
+
+Must be a double value in the range 0-1000, default is 1.0
.
+
+
+chroma_strength, cs
+Set chroma strength.
+
+Must be a double value in the range 0-1000, default is 1.0
.
+
+
+
+
+
30.67 pad# TOC
+
+
Add paddings to the input image, and place the original input at the
+provided x , y coordinates.
+
+
It accepts the following parameters:
+
+
+width, w
+height, h
+Specify an expression for the size of the output image with the
+paddings added. If the value for width or height is 0, the
+corresponding input size is used for the output.
+
+The width expression can reference the value set by the
+height expression, and vice versa.
+
+The default value of width and height is 0.
+
+
+x
+y
+Specify the offsets to place the input image at within the padded area,
+with respect to the top/left border of the output image.
+
+The x expression can reference the value set by the y
+expression, and vice versa.
+
+The default value of x and y is 0.
+
+
+color
+Specify the color of the padded area. For the syntax of this option,
+check the "Color" section in the ffmpeg-utils manual.
+
+The default value of color is "black".
+
+
+
+
The value for the width , height , x , and y
+options are expressions containing the following constants:
+
+
+in_w
+in_h
+The input video width and height.
+
+
+iw
+ih
+These are the same as in_w and in_h .
+
+
+out_w
+out_h
+The output width and height (the size of the padded area), as
+specified by the width and height expressions.
+
+
+ow
+oh
+These are the same as out_w and out_h .
+
+
+x
+y
+The x and y offsets as specified by the x and y
+expressions, or NAN if not yet specified.
+
+
+a
+same as iw / ih
+
+
+sar
+input sample aspect ratio
+
+
+dar
+input display aspect ratio, it is the same as (iw / ih ) * sar
+
+
+hsub
+vsub
+The horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+
+
+
30.67.1 Examples# TOC
+
+
+
+
+
30.68 perspective# TOC
+
+
Correct perspective of video not recorded perpendicular to the screen.
+
+
A description of the accepted parameters follows.
+
+
+x0
+y0
+x1
+y1
+x2
+y2
+x3
+y3
+Set coordinates expression for top left, top right, bottom left and bottom right corners.
+Default values are 0:0:W:0:0:H:W:H
with which perspective will remain unchanged.
+If the sense
option is set to source
, then the specified points will be sent
+to the corners of the destination. If the sense
option is set to destination
,
+then the corners of the source will be sent to the specified coordinates.
+
+The expressions can use the following variables:
+
+
+W
+H
+the width and height of video frame.
+
+
+
+
+interpolation
+Set interpolation for perspective correction.
+
+It accepts the following values:
+
+‘linear ’
+‘cubic ’
+
+
+Default value is ‘linear ’.
+
+
+sense
+Set interpretation of coordinate options.
+
+It accepts the following values:
+
+‘0, source ’
+
+Send point in the source specified by the given coordinates to
+the corners of the destination.
+
+
+‘1, destination ’
+
+Send the corners of the source to the point in the destination specified
+by the given coordinates.
+
+Default value is ‘source ’.
+
+
+
+
+
+
+
30.69 phase# TOC
+
+
Delay interlaced video by one field time so that the field order changes.
+
+
The intended use is to fix PAL movies that have been captured with the
+opposite field order to the film-to-video transfer.
+
+
A description of the accepted parameters follows.
+
+
+mode
+Set phase mode.
+
+It accepts the following values:
+
+‘t ’
+Capture field order top-first, transfer bottom-first.
+Filter will delay the bottom field.
+
+
+‘b ’
+Capture field order bottom-first, transfer top-first.
+Filter will delay the top field.
+
+
+‘p ’
+Capture and transfer with the same field order. This mode only exists
+for the documentation of the other options to refer to, but if you
+actually select it, the filter will faithfully do nothing.
+
+
+‘a ’
+Capture field order determined automatically by field flags, transfer
+opposite.
+Filter selects among ‘t ’ and ‘b ’ modes on a frame by frame
+basis using field flags. If no field information is available,
+then this works just like ‘u ’.
+
+
+‘u ’
+Capture unknown or varying, transfer opposite.
+Filter selects among ‘t ’ and ‘b ’ on a frame by frame basis by
+analyzing the images and selecting the alternative that produces best
+match between the fields.
+
+
+‘T ’
+Capture top-first, transfer unknown or varying.
+Filter selects among ‘t ’ and ‘p ’ using image analysis.
+
+
+‘B ’
+Capture bottom-first, transfer unknown or varying.
+Filter selects among ‘b ’ and ‘p ’ using image analysis.
+
+
+‘A ’
+Capture determined by field flags, transfer unknown or varying.
+Filter selects among ‘t ’, ‘b ’ and ‘p ’ using field flags and
+image analysis. If no field information is available, then this works just
+like ‘U ’. This is the default mode.
+
+
+‘U ’
+Both capture and transfer unknown or varying.
+Filter selects among ‘t ’, ‘b ’ and ‘p ’ using image analysis only.
+
+
+
+
+
+
+
30.70 pixdesctest# TOC
+
+
Pixel format descriptor test filter, mainly useful for internal
+testing. The output video should be equal to the input video.
+
+
For example:
+
+
format=monow, pixdesctest
+
+
+
can be used to test the monowhite pixel format descriptor definition.
+
+
+
30.71 pp# TOC
+
+
Enable the specified chain of postprocessing subfilters using libpostproc. This
+library should be automatically selected with a GPL build (--enable-gpl
).
+Subfilters must be separated by ’/’ and can be disabled by prepending a ’-’.
+Each subfilter and some options have a short and a long name that can be used
+interchangeably, i.e. dr/dering are the same.
+
+
The filters accept the following options:
+
+
+subfilters
+Set postprocessing subfilters string.
+
+
+
+
All subfilters share common options to determine their scope:
+
+
+a/autoq
+Honor the quality commands for this subfilter.
+
+
+c/chrom
+Do chrominance filtering, too (default).
+
+
+y/nochrom
+Do luminance filtering only (no chrominance).
+
+
+n/noluma
+Do chrominance filtering only (no luminance).
+
+
+
+
These options can be appended after the subfilter name, separated by a ’|’.
+
+
Available subfilters are:
+
+
+hb/hdeblock[|difference[|flatness]]
+Horizontal deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+vb/vdeblock[|difference[|flatness]]
+Vertical deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+ha/hadeblock[|difference[|flatness]]
+Accurate horizontal deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+va/vadeblock[|difference[|flatness]]
+Accurate vertical deblocking filter
+
+difference
+Difference factor where higher values mean more deblocking (default: 32
).
+
+flatness
+Flatness threshold where lower values mean more deblocking (default: 39
).
+
+
+
+
+
+
The horizontal and vertical deblocking filters share the difference and
+flatness values so you cannot set different horizontal and vertical
+thresholds.
+
+
+h1/x1hdeblock
+Experimental horizontal deblocking filter
+
+
+v1/x1vdeblock
+Experimental vertical deblocking filter
+
+
+dr/dering
+Deringing filter
+
+
+tn/tmpnoise[|threshold1[|threshold2[|threshold3]]], temporal noise reducer
+
+threshold1
+larger -> stronger filtering
+
+threshold2
+larger -> stronger filtering
+
+threshold3
+larger -> stronger filtering
+
+
+
+
+al/autolevels[:f/fullyrange], automatic brightness / contrast correction
+
+f/fullyrange
+Stretch luminance to 0-255
.
+
+
+
+
+lb/linblenddeint
+Linear blend deinterlacing filter that deinterlaces the given block by
+filtering all lines with a (1 2 1)
filter.
+
+
+li/linipoldeint
+Linear interpolating deinterlacing filter that deinterlaces the given block by
+linearly interpolating every second line.
+
+
+ci/cubicipoldeint
+Cubic interpolating deinterlacing filter deinterlaces the given block by
+cubically interpolating every second line.
+
+
+md/mediandeint
+Median deinterlacing filter that deinterlaces the given block by applying a
+median filter to every second line.
+
+
+fd/ffmpegdeint
+FFmpeg deinterlacing filter that deinterlaces the given block by filtering every
+second line with a (-1 4 2 4 -1)
filter.
+
+
+l5/lowpass5
+Vertically applied FIR lowpass deinterlacing filter that deinterlaces the given
+block by filtering all lines with a (-1 2 6 2 -1)
filter.
+
+
+fq/forceQuant[|quantizer]
+Overrides the quantizer table from the input with the constant quantizer you
+specify.
+
+quantizer
+Quantizer to use
+
+
+
+
+de/default
+Default pp filter combination (hb|a,vb|a,dr|a
)
+
+
+fa/fast
+Fast pp filter combination (h1|a,v1|a,dr|a
)
+
+
+ac
+High quality pp filter combination (ha|a|128|7,va|a,dr|a
)
+
+
+
+
+
30.71.1 Examples# TOC
+
+
+ Apply horizontal and vertical deblocking, deringing and automatic
+brightness/contrast:
+
+
+ Apply default filters without brightness/contrast correction:
+
+
+ Apply default filters and temporal denoiser:
+
+
pp=default/tmpnoise|1|2|3
+
+
+ Apply deblocking on luminance only, and switch vertical deblocking on or off
+automatically depending on available CPU time:
+
+
+
+
+
30.72 pp7# TOC
+
Apply Postprocessing filter 7. It is variant of the spp filter,
+similar to spp = 6 with 7 point DCT, where only the center sample is
+used after IDCT.
+
+
The filter accepts the following options:
+
+
+qp
+Force a constant quantization parameter. It accepts an integer in range
+0 to 63. If not set, the filter will use the QP from the video stream
+(if available).
+
+
+mode
+Set thresholding mode. Available modes are:
+
+
+‘hard ’
+Set hard thresholding.
+
+‘soft ’
+Set soft thresholding (better de-ringing effect, but likely blurrier).
+
+‘medium ’
+Set medium thresholding (good results, default).
+
+
+
+
+
+
+
30.73 psnr# TOC
+
+
Obtain the average, maximum and minimum PSNR (Peak Signal to Noise
+Ratio) between two input videos.
+
+
This filter takes in input two input videos, the first input is
+considered the "main" source and is passed unchanged to the
+output. The second input is used as a "reference" video for computing
+the PSNR.
+
+
Both video inputs must have the same resolution and pixel format for
+this filter to work correctly. Also it assumes that both inputs
+have the same number of frames, which are compared one by one.
+
+
The obtained average PSNR is printed through the logging system.
+
+
The filter stores the accumulated MSE (mean squared error) of each
+frame, and at the end of the processing it is averaged across all frames
+equally, and the following formula is applied to obtain the PSNR:
+
+
+
PSNR = 10*log10(MAX^2/MSE)
+
+
+
Where MAX is the average of the maximum values of each component of the
+image.
+
+
The description of the accepted parameters follows.
+
+
+stats_file, f
+If specified the filter will use the named file to save the PSNR of
+each individual frame.
+
+
+
+
The file printed if stats_file is selected, contains a sequence of
+key/value pairs of the form key :value for each compared
+couple of frames.
+
+
A description of each shown parameter follows:
+
+
+n
+sequential number of the input frame, starting from 1
+
+
+mse_avg
+Mean Square Error pixel-by-pixel average difference of the compared
+frames, averaged over all the image components.
+
+
+mse_y, mse_u, mse_v, mse_r, mse_g, mse_g, mse_a
+Mean Square Error pixel-by-pixel average difference of the compared
+frames for the component specified by the suffix.
+
+
+psnr_y, psnr_u, psnr_v, psnr_r, psnr_g, psnr_b, psnr_a
+Peak Signal to Noise ratio of the compared frames for the component
+specified by the suffix.
+
+
+
+
For example:
+
+
movie=ref_movie.mpg, setpts=PTS-STARTPTS [main];
+[main][ref] psnr="stats_file=stats.log" [out]
+
+
+
On this example the input file being processed is compared with the
+reference file ref_movie.mpg . The PSNR of each individual frame
+is stored in stats.log .
+
+
+
30.74 pullup# TOC
+
+
Pulldown reversal (inverse telecine) filter, capable of handling mixed
+hard-telecine, 24000/1001 fps progressive, and 30000/1001 fps progressive
+content.
+
+
The pullup filter is designed to take advantage of future context in making
+its decisions. This filter is stateless in the sense that it does not lock
+onto a pattern to follow, but it instead looks forward to the following
+fields in order to identify matches and rebuild progressive frames.
+
+
To produce content with an even framerate, insert the fps filter after
+pullup, use fps=24000/1001
if the input frame rate is 29.97fps,
+fps=24
for 30fps and the (rare) telecined 25fps input.
+
+
The filter accepts the following options:
+
+
+jl
+jr
+jt
+jb
+These options set the amount of "junk" to ignore at the left, right, top, and
+bottom of the image, respectively. Left and right are in units of 8 pixels,
+while top and bottom are in units of 2 lines.
+The default is 8 pixels on each side.
+
+
+sb
+Set the strict breaks. Setting this option to 1 will reduce the chances of
+filter generating an occasional mismatched frame, but it may also cause an
+excessive number of frames to be dropped during high motion sequences.
+Conversely, setting it to -1 will make filter match fields more easily.
+This may help processing of video where there is slight blurring between
+the fields, but may also cause there to be interlaced frames in the output.
+Default value is 0
.
+
+
+mp
+Set the metric plane to use. It accepts the following values:
+
+‘l ’
+Use luma plane.
+
+
+‘u ’
+Use chroma blue plane.
+
+
+‘v ’
+Use chroma red plane.
+
+
+
+This option may be set to use chroma plane instead of the default luma plane
+for doing filter’s computations. This may improve accuracy on very clean
+source material, but more likely will decrease accuracy, especially if there
+is chroma noise (rainbow effect) or any grayscale video.
+The main purpose of setting mp to a chroma plane is to reduce CPU
+load and make pullup usable in realtime on slow machines.
+
+
+
+
For best results (without duplicated frames in the output file) it is
+necessary to change the output frame rate. For example, to inverse
+telecine NTSC input:
+
+
ffmpeg -i input -vf pullup -r 24000/1001 ...
+
+
+
+
30.75 qp# TOC
+
+
Change video quantization parameters (QP).
+
+
The filter accepts the following option:
+
+
+qp
+Set expression for quantization parameter.
+
+
+
+
The expression is evaluated through the eval API and can contain, among others,
+the following constants:
+
+
+known
+1 if index is not 129, 0 otherwise.
+
+
+qp
+Sequentional index starting from -129 to 128.
+
+
+
+
+
30.75.1 Examples# TOC
+
+
+ Some equation like:
+
+
+
+
+
30.76 removelogo# TOC
+
+
Suppress a TV station logo, using an image file to determine which
+pixels comprise the logo. It works by filling in the pixels that
+comprise the logo with neighboring pixels.
+
+
The filter accepts the following options:
+
+
+filename, f
+Set the filter bitmap file, which can be any image format supported by
+libavformat. The width and height of the image file must match those of the
+video stream being processed.
+
+
+
+
Pixels in the provided bitmap image with a value of zero are not
+considered part of the logo, non-zero pixels are considered part of
+the logo. If you use white (255) for the logo and black (0) for the
+rest, you will be safe. For making the filter bitmap, it is
+recommended to take a screen capture of a black frame with the logo
+visible, and then using a threshold filter followed by the erode
+filter once or twice.
+
+
If needed, little splotches can be fixed manually. Remember that if
+logo pixels are not covered, the filter quality will be much
+reduced. Marking too many pixels as part of the logo does not hurt as
+much, but it will increase the amount of blurring needed to cover over
+the image and will destroy more information than necessary, and extra
+pixels will slow things down on a large logo.
+
+
+
30.77 rotate# TOC
+
+
Rotate video by an arbitrary angle expressed in radians.
+
+
The filter accepts the following options:
+
+
A description of the optional parameters follows.
+
+angle, a
+Set an expression for the angle by which to rotate the input video
+clockwise, expressed as a number of radians. A negative value will
+result in a counter-clockwise rotation. By default it is set to "0".
+
+This expression is evaluated for each frame.
+
+
+out_w, ow
+Set the output width expression, default value is "iw".
+This expression is evaluated just once during configuration.
+
+
+out_h, oh
+Set the output height expression, default value is "ih".
+This expression is evaluated just once during configuration.
+
+
+bilinear
+Enable bilinear interpolation if set to 1, a value of 0 disables
+it. Default value is 1.
+
+
+fillcolor, c
+Set the color used to fill the output area not covered by the rotated
+image. For the general syntax of this option, check the "Color" section in the
+ffmpeg-utils manual. If the special value "none" is selected then no
+background is printed (useful for example if the background is never shown).
+
+Default value is "black".
+
+
+
+
The expressions for the angle and the output size can contain the
+following constants and functions:
+
+
+n
+sequential number of the input frame, starting from 0. It is always NAN
+before the first frame is filtered.
+
+
+t
+time in seconds of the input frame, it is set to 0 when the filter is
+configured. It is always NAN before the first frame is filtered.
+
+
+hsub
+vsub
+horizontal and vertical chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+in_w, iw
+in_h, ih
+the input video width and height
+
+
+out_w, ow
+out_h, oh
+the output width and height, that is the size of the padded area as
+specified by the width and height expressions
+
+
+rotw(a)
+roth(a)
+the minimal width/height required for completely containing the input
+video rotated by a radians.
+
+These are only available when computing the out_w and
+out_h expressions.
+
+
+
+
+
30.77.1 Examples# TOC
+
+
+ Rotate the input by PI/6 radians clockwise:
+
+
+ Rotate the input by PI/6 radians counter-clockwise:
+
+
+ Rotate the input by 45 degrees clockwise:
+
+
+ Apply a constant rotation with period T, starting from an angle of PI/3:
+
+
+ Make the input video rotation oscillating with a period of T
+seconds and an amplitude of A radians:
+
+
rotate=A*sin(2*PI/T*t)
+
+
+ Rotate the video, output size is chosen so that the whole rotating
+input video is always completely contained in the output:
+
+
rotate='2*PI*t:ow=hypot(iw,ih):oh=ow'
+
+
+ Rotate the video, reduce the output size so that no background is ever
+shown:
+
+
rotate=2*PI*t:ow='min(iw,ih)/sqrt(2)':oh=ow:c=none
+
+
+
+
+
30.77.2 Commands# TOC
+
+
The filter supports the following commands:
+
+
+a, angle
+Set the angle expression.
+The command accepts the same syntax of the corresponding option.
+
+If the specified expression is not valid, it is kept at its current
+value.
+
+
+
+
+
30.78 sab# TOC
+
+
Apply Shape Adaptive Blur.
+
+
The filter accepts the following options:
+
+
+luma_radius, lr
+Set luma blur filter strength, must be a value in range 0.1-4.0, default
+value is 1.0. A greater value will result in a more blurred image, and
+in slower processing.
+
+
+luma_pre_filter_radius, lpfr
+Set luma pre-filter radius, must be a value in the 0.1-2.0 range, default
+value is 1.0.
+
+
+luma_strength, ls
+Set luma maximum difference between pixels to still be considered, must
+be a value in the 0.1-100.0 range, default value is 1.0.
+
+
+chroma_radius, cr
+Set chroma blur filter strength, must be a value in range 0.1-4.0. A
+greater value will result in a more blurred image, and in slower
+processing.
+
+
+chroma_pre_filter_radius, cpfr
+Set chroma pre-filter radius, must be a value in the 0.1-2.0 range.
+
+
+chroma_strength, cs
+Set chroma maximum difference between pixels to still be considered,
+must be a value in the 0.1-100.0 range.
+
+
+
+
Each chroma option value, if not explicitly specified, is set to the
+corresponding luma option value.
+
+
+
30.79 scale# TOC
+
+
Scale (resize) the input video, using the libswscale library.
+
+
The scale filter forces the output display aspect ratio to be the same
+of the input, by changing the output sample aspect ratio.
+
+
If the input image format is different from the format requested by
+the next filter, the scale filter will convert the input to the
+requested format.
+
+
+
30.79.1 Options# TOC
+
The filter accepts the following options, or any of the options
+supported by the libswscale scaler.
+
+
See (ffmpeg-scaler)the ffmpeg-scaler manual for
+the complete list of scaler options.
+
+
+width, w
+height, h
+Set the output video dimension expression. Default value is the input
+dimension.
+
+If the value is 0, the input width is used for the output.
+
+If one of the values is -1, the scale filter will use a value that
+maintains the aspect ratio of the input image, calculated from the
+other specified dimension. If both of them are -1, the input size is
+used
+
+If one of the values is -n with n > 1, the scale filter will also use a value
+that maintains the aspect ratio of the input image, calculated from the other
+specified dimension. After that it will, however, make sure that the calculated
+dimension is divisible by n and adjust the value if necessary.
+
+See below for the list of accepted constants for use in the dimension
+expression.
+
+
+interl
+Set the interlacing mode. It accepts the following values:
+
+
+‘1 ’
+Force interlaced aware scaling.
+
+
+‘0 ’
+Do not apply interlaced scaling.
+
+
+‘-1 ’
+Select interlaced aware scaling depending on whether the source frames
+are flagged as interlaced or not.
+
+
+
+Default value is ‘0 ’.
+
+
+flags
+Set libswscale scaling flags. See
+(ffmpeg-scaler)the ffmpeg-scaler manual for the
+complete list of values. If not explicitly specified the filter applies
+the default flags.
+
+
+size, s
+Set the video size. For the syntax of this option, check the "Video size"
+section in the ffmpeg-utils manual.
+
+
+in_color_matrix
+out_color_matrix
+Set in/output YCbCr color space type.
+
+This allows the autodetected value to be overridden as well as allows forcing
+a specific value used for the output and encoder.
+
+If not specified, the color space type depends on the pixel format.
+
+Possible values:
+
+
+‘auto ’
+Choose automatically.
+
+
+‘bt709 ’
+Format conforming to International Telecommunication Union (ITU)
+Recommendation BT.709.
+
+
+‘fcc ’
+Set color space conforming to the United States Federal Communications
+Commission (FCC) Code of Federal Regulations (CFR) Title 47 (2003) 73.682 (a).
+
+
+‘bt601 ’
+Set color space conforming to:
+
+
+ ITU Radiocommunication Sector (ITU-R) Recommendation BT.601
+
+ ITU-R Rec. BT.470-6 (1998) Systems B, B1, and G
+
+ Society of Motion Picture and Television Engineers (SMPTE) ST 170:2004
+
+
+
+
+‘smpte240m ’
+Set color space conforming to SMPTE ST 240:1999.
+
+
+
+
+in_range
+out_range
+Set in/output YCbCr sample range.
+
+This allows the autodetected value to be overridden as well as allows forcing
+a specific value used for the output and encoder. If not specified, the
+range depends on the pixel format. Possible values:
+
+
+‘auto ’
+Choose automatically.
+
+
+‘jpeg/full/pc ’
+Set full range (0-255 in case of 8-bit luma).
+
+
+‘mpeg/tv ’
+Set "MPEG" range (16-235 in case of 8-bit luma).
+
+
+
+
+force_original_aspect_ratio
+Enable decreasing or increasing output video width or height if necessary to
+keep the original aspect ratio. Possible values:
+
+
+‘disable ’
+Scale the video as specified and disable this feature.
+
+
+‘decrease ’
+The output video dimensions will automatically be decreased if needed.
+
+
+‘increase ’
+The output video dimensions will automatically be increased if needed.
+
+
+
+
+One useful instance of this option is that when you know a specific device’s
+maximum allowed resolution, you can use this to limit the output video to
+that, while retaining the aspect ratio. For example, device A allows
+1280x720 playback, and your video is 1920x800. Using this option (set it to
+decrease) and specifying 1280x720 to the command line makes the output
+1280x533.
+
+Please note that this is a different thing than specifying -1 for w
+or h , you still need to specify the output resolution for this option
+to work.
+
+
+
+
+
The values of the w and h options are expressions
+containing the following constants:
+
+
+in_w
+in_h
+The input width and height
+
+
+iw
+ih
+These are the same as in_w and in_h .
+
+
+out_w
+out_h
+The output (scaled) width and height
+
+
+ow
+oh
+These are the same as out_w and out_h
+
+
+a
+The same as iw / ih
+
+
+sar
+input sample aspect ratio
+
+
+dar
+The input display aspect ratio. Calculated from (iw / ih) * sar
.
+
+
+hsub
+vsub
+horizontal and vertical input chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+ohsub
+ovsub
+horizontal and vertical output chroma subsample values. For example for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+
+
+
30.79.2 Examples# TOC
+
+
+
+
+
30.80 separatefields# TOC
+
+
The separatefields
takes a frame-based video input and splits
+each frame into its components fields, producing a new half height clip
+with twice the frame rate and twice the frame count.
+
+
This filter use field-dominance information in frame to decide which
+of each pair of fields to place first in the output.
+If it gets it wrong use setfield filter before separatefields
filter.
+
+
+
30.81 setdar, setsar# TOC
+
+
The setdar
filter sets the Display Aspect Ratio for the filter
+output video.
+
+
This is done by changing the specified Sample (aka Pixel) Aspect
+Ratio, according to the following equation:
+
+
DAR = HORIZONTAL_RESOLUTION / VERTICAL_RESOLUTION * SAR
+
+
+
Keep in mind that the setdar
filter does not modify the pixel
+dimensions of the video frame. Also, the display aspect ratio set by
+this filter may be changed by later filters in the filterchain,
+e.g. in case of scaling or if another "setdar" or a "setsar" filter is
+applied.
+
+
The setsar
filter sets the Sample (aka Pixel) Aspect Ratio for
+the filter output video.
+
+
Note that as a consequence of the application of this filter, the
+output display aspect ratio will change according to the equation
+above.
+
+
Keep in mind that the sample aspect ratio set by the setsar
+filter may be changed by later filters in the filterchain, e.g. if
+another "setsar" or a "setdar" filter is applied.
+
+
It accepts the following parameters:
+
+
+r, ratio, dar (setdar
only), sar (setsar
only)
+Set the aspect ratio used by the filter.
+
+The parameter can be a floating point number string, an expression, or
+a string of the form num :den , where num and
+den are the numerator and denominator of the aspect ratio. If
+the parameter is not specified, it is assumed the value "0".
+In case the form "num :den " is used, the :
character
+should be escaped.
+
+
+max
+Set the maximum integer value to use for expressing numerator and
+denominator when reducing the expressed aspect ratio to a rational.
+Default value is 100
.
+
+
+
+
+
The parameter sar is an expression containing
+the following constants:
+
+
+E, PI, PHI
+These are approximated values for the mathematical constants e
+(Euler’s number), pi (Greek pi), and phi (the golden ratio).
+
+
+w, h
+The input width and height.
+
+
+a
+These are the same as w / h .
+
+
+sar
+The input sample aspect ratio.
+
+
+dar
+The input display aspect ratio. It is the same as
+(w / h ) * sar .
+
+
+hsub, vsub
+Horizontal and vertical chroma subsample values. For example, for the
+pixel format "yuv422p" hsub is 2 and vsub is 1.
+
+
+
+
+
30.81.1 Examples# TOC
+
+
+ To change the display aspect ratio to 16:9, specify one of the following:
+
+
setdar=dar=1.77777
+setdar=dar=16/9
+setdar=dar=1.77777
+
+
+ To change the sample aspect ratio to 10:11, specify:
+
+
+ To set a display aspect ratio of 16:9, and specify a maximum integer value of
+1000 in the aspect ratio reduction, use the command:
+
+
setdar=ratio=16/9:max=1000
+
+
+
+
+
+
30.82 setfield# TOC
+
+
Force field for the output video frame.
+
+
The setfield
filter marks the interlace type field for the
+output frames. It does not change the input frame, but only sets the
+corresponding property, which affects how the frame is treated by
+following filters (e.g. fieldorder
or yadif
).
+
+
The filter accepts the following options:
+
+
+mode
+Available values are:
+
+
+‘auto ’
+Keep the same field property.
+
+
+‘bff ’
+Mark the frame as bottom-field-first.
+
+
+‘tff ’
+Mark the frame as top-field-first.
+
+
+‘prog ’
+Mark the frame as progressive.
+
+
+
+
+
+
+
30.83 showinfo# TOC
+
+
Show a line containing various information for each input video frame.
+The input video is not modified.
+
+
The shown line contains a sequence of key/value pairs of the form
+key :value .
+
+
The following values are shown in the output:
+
+
+n
+The (sequential) number of the input frame, starting from 0.
+
+
+pts
+The Presentation TimeStamp of the input frame, expressed as a number of
+time base units. The time base unit depends on the filter input pad.
+
+
+pts_time
+The Presentation TimeStamp of the input frame, expressed as a number of
+seconds.
+
+
+pos
+The position of the frame in the input stream, or -1 if this information is
+unavailable and/or meaningless (for example in case of synthetic video).
+
+
+fmt
+The pixel format name.
+
+
+sar
+The sample aspect ratio of the input frame, expressed in the form
+num /den .
+
+
+s
+The size of the input frame. For the syntax of this option, check the "Video size"
+section in the ffmpeg-utils manual.
+
+
+i
+The type of interlaced mode ("P" for "progressive", "T" for top field first, "B"
+for bottom field first).
+
+
+iskey
+This is 1 if the frame is a key frame, 0 otherwise.
+
+
+type
+The picture type of the input frame ("I" for an I-frame, "P" for a
+P-frame, "B" for a B-frame, or "?" for an unknown type).
+Also refer to the documentation of the AVPictureType
enum and of
+the av_get_picture_type_char
function defined in
+libavutil/avutil.h .
+
+
+checksum
+The Adler-32 checksum (printed in hexadecimal) of all the planes of the input frame.
+
+
+plane_checksum
+The Adler-32 checksum (printed in hexadecimal) of each plane of the input frame,
+expressed in the form "[c0 c1 c2 c3 ]".
+
+
+
+
+
30.84 shuffleplanes# TOC
+
+
Reorder and/or duplicate video planes.
+
+
It accepts the following parameters:
+
+
+map0
+The index of the input plane to be used as the first output plane.
+
+
+map1
+The index of the input plane to be used as the second output plane.
+
+
+map2
+The index of the input plane to be used as the third output plane.
+
+
+map3
+The index of the input plane to be used as the fourth output plane.
+
+
+
+
+
The first plane has the index 0. The default is to keep the input unchanged.
+
+
Swap the second and third planes of the input:
+
+
ffmpeg -i INPUT -vf shuffleplanes=0:2:1:3 OUTPUT
+
+
+
+
30.85 signalstats# TOC
+
Evaluate various visual metrics that assist in determining issues associated
+with the digitization of analog video media.
+
+
By default the filter will log these metadata values:
+
+
+YMIN
+Display the minimal Y value contained within the input frame. Expressed in
+range of [0-255].
+
+
+YLOW
+Display the Y value at the 10% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+YAVG
+Display the average Y value within the input frame. Expressed in range of
+[0-255].
+
+
+YHIGH
+Display the Y value at the 90% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+YMAX
+Display the maximum Y value contained within the input frame. Expressed in
+range of [0-255].
+
+
+UMIN
+Display the minimal U value contained within the input frame. Expressed in
+range of [0-255].
+
+
+ULOW
+Display the U value at the 10% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+UAVG
+Display the average U value within the input frame. Expressed in range of
+[0-255].
+
+
+UHIGH
+Display the U value at the 90% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+UMAX
+Display the maximum U value contained within the input frame. Expressed in
+range of [0-255].
+
+
+VMIN
+Display the minimal V value contained within the input frame. Expressed in
+range of [0-255].
+
+
+VLOW
+Display the V value at the 10% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+VAVG
+Display the average V value within the input frame. Expressed in range of
+[0-255].
+
+
+VHIGH
+Display the V value at the 90% percentile within the input frame. Expressed in
+range of [0-255].
+
+
+VMAX
+Display the maximum V value contained within the input frame. Expressed in
+range of [0-255].
+
+
+SATMIN
+Display the minimal saturation value contained within the input frame.
+Expressed in range of [0-~181.02].
+
+
+SATLOW
+Display the saturation value at the 10% percentile within the input frame.
+Expressed in range of [0-~181.02].
+
+
+SATAVG
+Display the average saturation value within the input frame. Expressed in range
+of [0-~181.02].
+
+
+SATHIGH
+Display the saturation value at the 90% percentile within the input frame.
+Expressed in range of [0-~181.02].
+
+
+SATMAX
+Display the maximum saturation value contained within the input frame.
+Expressed in range of [0-~181.02].
+
+
+HUEMED
+Display the median value for hue within the input frame. Expressed in range of
+[0-360].
+
+
+HUEAVG
+Display the average value for hue within the input frame. Expressed in range of
+[0-360].
+
+
+YDIF
+Display the average of sample value difference between all values of the Y
+plane in the current frame and corresponding values of the previous input frame.
+Expressed in range of [0-255].
+
+
+UDIF
+Display the average of sample value difference between all values of the U
+plane in the current frame and corresponding values of the previous input frame.
+Expressed in range of [0-255].
+
+
+VDIF
+Display the average of sample value difference between all values of the V
+plane in the current frame and corresponding values of the previous input frame.
+Expressed in range of [0-255].
+
+
+
+
The filter accepts the following options:
+
+
+stat
+out
+
+stat specify an additional form of image analysis.
+out output video with the specified type of pixel highlighted.
+
+Both options accept the following values:
+
+
+‘tout ’
+Identify temporal outliers pixels. A temporal outlier is a pixel
+unlike the neighboring pixels of the same field. Examples of temporal outliers
+include the results of video dropouts, head clogs, or tape tracking issues.
+
+
+‘vrep ’
+Identify vertical line repetition . Vertical line repetition includes
+similar rows of pixels within a frame. In born-digital video vertical line
+repetition is common, but this pattern is uncommon in video digitized from an
+analog source. When it occurs in video that results from the digitization of an
+analog source it can indicate concealment from a dropout compensator.
+
+
+‘brng ’
+Identify pixels that fall outside of legal broadcast range.
+
+
+
+
+color, c
+Set the highlight color for the out option. The default color is
+yellow.
+
+
+
+
+
30.85.1 Examples# TOC
+
+
+
+
+
30.86 smartblur# TOC
+
+
Blur the input video without impacting the outlines.
+
+
It accepts the following options:
+
+
+luma_radius, lr
+Set the luma radius. The option value must be a float number in
+the range [0.1,5.0] that specifies the variance of the gaussian filter
+used to blur the image (slower if larger). Default value is 1.0.
+
+
+luma_strength, ls
+Set the luma strength. The option value must be a float number
+in the range [-1.0,1.0] that configures the blurring. A value included
+in [0.0,1.0] will blur the image whereas a value included in
+[-1.0,0.0] will sharpen the image. Default value is 1.0.
+
+
+luma_threshold, lt
+Set the luma threshold used as a coefficient to determine
+whether a pixel should be blurred or not. The option value must be an
+integer in the range [-30,30]. A value of 0 will filter all the image,
+a value included in [0,30] will filter flat areas and a value included
+in [-30,0] will filter edges. Default value is 0.
+
+
+chroma_radius, cr
+Set the chroma radius. The option value must be a float number in
+the range [0.1,5.0] that specifies the variance of the gaussian filter
+used to blur the image (slower if larger). Default value is 1.0.
+
+
+chroma_strength, cs
+Set the chroma strength. The option value must be a float number
+in the range [-1.0,1.0] that configures the blurring. A value included
+in [0.0,1.0] will blur the image whereas a value included in
+[-1.0,0.0] will sharpen the image. Default value is 1.0.
+
+
+chroma_threshold, ct
+Set the chroma threshold used as a coefficient to determine
+whether a pixel should be blurred or not. The option value must be an
+integer in the range [-30,30]. A value of 0 will filter all the image,
+a value included in [0,30] will filter flat areas and a value included
+in [-30,0] will filter edges. Default value is 0.
+
+
+
+
If a chroma option is not explicitly set, the corresponding luma value
+is set.
+
+
+
30.87 stereo3d# TOC
+
+
Convert between different stereoscopic image formats.
+
+
The filters accept the following options:
+
+
+in
+Set stereoscopic image format of input.
+
+Available values for input image formats are:
+
+‘sbsl ’
+side by side parallel (left eye left, right eye right)
+
+
+‘sbsr ’
+side by side crosseye (right eye left, left eye right)
+
+
+‘sbs2l ’
+side by side parallel with half width resolution
+(left eye left, right eye right)
+
+
+‘sbs2r ’
+side by side crosseye with half width resolution
+(right eye left, left eye right)
+
+
+‘abl ’
+above-below (left eye above, right eye below)
+
+
+‘abr ’
+above-below (right eye above, left eye below)
+
+
+‘ab2l ’
+above-below with half height resolution
+(left eye above, right eye below)
+
+
+‘ab2r ’
+above-below with half height resolution
+(right eye above, left eye below)
+
+
+‘al ’
+alternating frames (left eye first, right eye second)
+
+
+‘ar ’
+alternating frames (right eye first, left eye second)
+
+Default value is ‘sbsl ’.
+
+
+
+
+out
+Set stereoscopic image format of output.
+
+Available values for output image formats are all the input formats as well as:
+
+‘arbg ’
+anaglyph red/blue gray
+(red filter on left eye, blue filter on right eye)
+
+
+‘argg ’
+anaglyph red/green gray
+(red filter on left eye, green filter on right eye)
+
+
+‘arcg ’
+anaglyph red/cyan gray
+(red filter on left eye, cyan filter on right eye)
+
+
+‘arch ’
+anaglyph red/cyan half colored
+(red filter on left eye, cyan filter on right eye)
+
+
+‘arcc ’
+anaglyph red/cyan color
+(red filter on left eye, cyan filter on right eye)
+
+
+‘arcd ’
+anaglyph red/cyan color optimized with the least squares projection of dubois
+(red filter on left eye, cyan filter on right eye)
+
+
+‘agmg ’
+anaglyph green/magenta gray
+(green filter on left eye, magenta filter on right eye)
+
+
+‘agmh ’
+anaglyph green/magenta half colored
+(green filter on left eye, magenta filter on right eye)
+
+
+‘agmc ’
+anaglyph green/magenta colored
+(green filter on left eye, magenta filter on right eye)
+
+
+‘agmd ’
+anaglyph green/magenta color optimized with the least squares projection of dubois
+(green filter on left eye, magenta filter on right eye)
+
+
+‘aybg ’
+anaglyph yellow/blue gray
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘aybh ’
+anaglyph yellow/blue half colored
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘aybc ’
+anaglyph yellow/blue colored
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘aybd ’
+anaglyph yellow/blue color optimized with the least squares projection of dubois
+(yellow filter on left eye, blue filter on right eye)
+
+
+‘irl ’
+interleaved rows (left eye has top row, right eye starts on next row)
+
+
+‘irr ’
+interleaved rows (right eye has top row, left eye starts on next row)
+
+
+‘ml ’
+mono output (left eye only)
+
+
+‘mr ’
+mono output (right eye only)
+
+
+
+Default value is ‘arcd ’.
+
+
+
+
+
30.87.1 Examples# TOC
+
+
+ Convert input video from side by side parallel to anaglyph yellow/blue dubois:
+
+
+ Convert input video from above bellow (left eye above, right eye below) to side by side crosseye.
+
+
+
+
+
30.88 spp# TOC
+
+
Apply a simple postprocessing filter that compresses and decompresses the image
+at several (or - in the case of quality level 6
- all) shifts
+and average the results.
+
+
The filter accepts the following options:
+
+
+quality
+Set quality. This option defines the number of levels for averaging. It accepts
+an integer in the range 0-6. If set to 0
, the filter will have no
+effect. A value of 6
means the higher quality. For each increment of
+that value the speed drops by a factor of approximately 2. Default value is
+3
.
+
+
+qp
+Force a constant quantization parameter. If not set, the filter will use the QP
+from the video stream (if available).
+
+
+mode
+Set thresholding mode. Available modes are:
+
+
+‘hard ’
+Set hard thresholding (default).
+
+‘soft ’
+Set soft thresholding (better de-ringing effect, but likely blurrier).
+
+
+
+
+use_bframe_qp
+Enable the use of the QP from the B-Frames if set to 1
. Using this
+option may cause flicker since the B-Frames have often larger QP. Default is
+0
(not enabled).
+
+
+
+
+
30.89 subtitles# TOC
+
+
Draw subtitles on top of input video using the libass library.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libass
. This filter also requires a build with libavcodec and
+libavformat to convert the passed subtitles file to ASS (Advanced Substation
+Alpha) subtitles format.
+
+
The filter accepts the following options:
+
+
+filename, f
+Set the filename of the subtitle file to read. It must be specified.
+
+
+original_size
+Specify the size of the original video, the video for which the ASS file
+was composed. For the syntax of this option, check the "Video size" section in
+the ffmpeg-utils manual. Due to a misdesign in ASS aspect ratio arithmetic,
+this is necessary to correctly scale the fonts if the aspect ratio has been
+changed.
+
+
+charenc
+Set subtitles input character encoding. subtitles
filter only. Only
+useful if not UTF-8.
+
+
+stream_index, si
+Set subtitles stream index. subtitles
filter only.
+
+
+
+
If the first key is not specified, it is assumed that the first value
+specifies the filename .
+
+
For example, to render the file sub.srt on top of the input
+video, use the command:
+
+
+
which is equivalent to:
+
+
subtitles=filename=sub.srt
+
+
+
To render the default subtitles stream from file video.mkv , use:
+
+
+
To render the second subtitles stream from that file, use:
+
+
subtitles=video.mkv:si=1
+
+
+
+
30.90 super2xsai# TOC
+
+
Scale the input by 2x and smooth using the Super2xSaI (Scale and
+Interpolate) pixel art scaling algorithm.
+
+
Useful for enlarging pixel art images without reducing sharpness.
+
+
+
30.91 swapuv# TOC
+
Swap U & V plane.
+
+
+
30.92 telecine# TOC
+
+
Apply telecine process to the video.
+
+
This filter accepts the following options:
+
+
+first_field
+
+‘top, t ’
+top field first
+
+‘bottom, b ’
+bottom field first
+The default value is top
.
+
+
+
+
+pattern
+A string of numbers representing the pulldown pattern you wish to apply.
+The default value is 23
.
+
+
+
+
+
Some typical patterns:
+
+NTSC output (30i):
+27.5p: 32222
+24p: 23 (classic)
+24p: 2332 (preferred)
+20p: 33
+18p: 334
+16p: 3444
+
+PAL output (25i):
+27.5p: 12222
+24p: 222222222223 ("Euro pulldown")
+16.67p: 33
+16p: 33333334
+
+
+
+
30.93 thumbnail# TOC
+
Select the most representative frame in a given sequence of consecutive frames.
+
+
The filter accepts the following options:
+
+
+n
+Set the frames batch size to analyze; in a set of n frames, the filter
+will pick one of them, and then handle the next batch of n frames until
+the end. Default is 100
.
+
+
+
+
Since the filter keeps track of the whole frames sequence, a bigger n
+value will result in a higher memory usage, so a high value is not recommended.
+
+
+
30.93.1 Examples# TOC
+
+
+ Extract one picture each 50 frames:
+
+
+ Complete example of a thumbnail creation with ffmpeg
:
+
+
ffmpeg -i in.avi -vf thumbnail,scale=300:200 -frames:v 1 out.png
+
+
+
+
+
30.94 tile# TOC
+
+
Tile several successive frames together.
+
+
The filter accepts the following options:
+
+
+layout
+Set the grid size (i.e. the number of lines and columns). For the syntax of
+this option, check the "Video size" section in the ffmpeg-utils manual.
+
+
+nb_frames
+Set the maximum number of frames to render in the given area. It must be less
+than or equal to w xh . The default value is 0
, meaning all
+the area will be used.
+
+
+margin
+Set the outer border margin in pixels.
+
+
+padding
+Set the inner border thickness (i.e. the number of pixels between frames). For
+more advanced padding options (such as having different values for the edges),
+refer to the pad video filter.
+
+
+color
+Specify the color of the unused area. For the syntax of this option, check the
+"Color" section in the ffmpeg-utils manual. The default value of color
+is "black".
+
+
+
+
+
30.94.1 Examples# TOC
+
+
+
+
+
30.95 tinterlace# TOC
+
+
Perform various types of temporal field interlacing.
+
+
Frames are counted starting from 1, so the first input frame is
+considered odd.
+
+
The filter accepts the following options:
+
+
+mode
+Specify the mode of the interlacing. This option can also be specified
+as a value alone. See below for a list of values for this option.
+
+Available values are:
+
+
+‘merge, 0 ’
+Move odd frames into the upper field, even into the lower field,
+generating a double height frame at half frame rate.
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+
+
+
+‘drop_odd, 1 ’
+Only output even frames, odd frames are dropped, generating a frame with
+unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+ 22222 44444
+ 22222 44444
+ 22222 44444
+ 22222 44444
+
+
+
+‘drop_even, 2 ’
+Only output odd frames, even frames are dropped, generating a frame with
+unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+11111 33333
+11111 33333
+11111 33333
+11111 33333
+
+
+
+‘pad, 3 ’
+Expand each frame to full height, but pad alternate lines with black,
+generating a frame with double height at the same input frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+11111 22222 33333 44444
+
+Output:
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+11111 ..... 33333 .....
+..... 22222 ..... 44444
+
+
+
+
+‘interleave_top, 4 ’
+Interleave the upper field from odd frames with the lower field from
+even frames, generating a frame with unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111<- 22222 33333<- 44444
+11111 22222<- 33333 44444<-
+11111<- 22222 33333<- 44444
+11111 22222<- 33333 44444<-
+
+Output:
+11111 33333
+22222 44444
+11111 33333
+22222 44444
+
+
+
+
+‘interleave_bottom, 5 ’
+Interleave the lower field from odd frames with the upper field from
+even frames, generating a frame with unchanged height at half frame rate.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222<- 33333 44444<-
+11111<- 22222 33333<- 44444
+11111 22222<- 33333 44444<-
+11111<- 22222 33333<- 44444
+
+Output:
+22222 44444
+11111 33333
+22222 44444
+11111 33333
+
+
+
+
+‘interlacex2, 6 ’
+Double frame rate with unchanged height. Frames are inserted each
+containing the second temporal field from the previous input frame and
+the first temporal field from the next input frame. This mode relies on
+the top_field_first flag. Useful for interlaced video displays with no
+field synchronisation.
+
+
+
------> time
+Input:
+Frame 1 Frame 2 Frame 3 Frame 4
+
+11111 22222 33333 44444
+ 11111 22222 33333 44444
+11111 22222 33333 44444
+ 11111 22222 33333 44444
+
+Output:
+11111 22222 22222 33333 33333 44444 44444
+ 11111 11111 22222 22222 33333 33333 44444
+11111 22222 22222 33333 33333 44444 44444
+ 11111 11111 22222 22222 33333 33333 44444
+
+
+
+
+
+
+Numeric values are deprecated but are accepted for backward
+compatibility reasons.
+
+Default mode is merge
.
+
+
+flags
+Specify flags influencing the filter process.
+
+Available value for flags is:
+
+
+low_pass_filter, vlfp
+Enable vertical low-pass filtering in the filter.
+Vertical low-pass filtering is required when creating an interlaced
+destination from a progressive source which contains high-frequency
+vertical detail. Filtering will reduce interlace ’twitter’ and Moire
+patterning.
+
+Vertical low-pass filtering can only be enabled for mode
+interleave_top and interleave_bottom .
+
+
+
+
+
+
+
+
30.96 transpose# TOC
+
+
Transpose rows with columns in the input video and optionally flip it.
+
+
It accepts the following parameters:
+
+
+dir
+Specify the transposition direction.
+
+Can assume the following values:
+
+‘0, 4, cclock_flip ’
+Rotate by 90 degrees counterclockwise and vertically flip (default), that is:
+
+
L.R L.l
+. . -> . .
+l.r R.r
+
+
+
+‘1, 5, clock ’
+Rotate by 90 degrees clockwise, that is:
+
+
L.R l.L
+. . -> . .
+l.r r.R
+
+
+
+‘2, 6, cclock ’
+Rotate by 90 degrees counterclockwise, that is:
+
+
L.R R.r
+. . -> . .
+l.r L.l
+
+
+
+‘3, 7, clock_flip ’
+Rotate by 90 degrees clockwise and vertically flip, that is:
+
+
L.R r.R
+. . -> . .
+l.r l.L
+
+
+
+
+For values between 4-7, the transposition is only done if the input
+video geometry is portrait and not landscape. These values are
+deprecated, the passthrough
option should be used instead.
+
+Numerical values are deprecated, and should be dropped in favor of
+symbolic constants.
+
+
+passthrough
+Do not apply the transposition if the input geometry matches the one
+specified by the specified value. It accepts the following values:
+
+‘none ’
+Always apply transposition.
+
+‘portrait ’
+Preserve portrait geometry (when height >= width ).
+
+‘landscape ’
+Preserve landscape geometry (when width >= height ).
+
+
+
+Default value is none
.
+
+
+
+
For example to rotate by 90 degrees clockwise and preserve portrait
+layout:
+
+
transpose=dir=1:passthrough=portrait
+
+
+
The command above can also be specified as:
+
+
+
+
30.97 trim# TOC
+
Trim the input so that the output contains one continuous subpart of the input.
+
+
It accepts the following parameters:
+
+start
+Specify the time of the start of the kept section, i.e. the frame with the
+timestamp start will be the first frame in the output.
+
+
+end
+Specify the time of the first frame that will be dropped, i.e. the frame
+immediately preceding the one with the timestamp end will be the last
+frame in the output.
+
+
+start_pts
+This is the same as start , except this option sets the start timestamp
+in timebase units instead of seconds.
+
+
+end_pts
+This is the same as end , except this option sets the end timestamp
+in timebase units instead of seconds.
+
+
+duration
+The maximum duration of the output in seconds.
+
+
+start_frame
+The number of the first frame that should be passed to the output.
+
+
+end_frame
+The number of the first frame that should be dropped.
+
+
+
+
start , end , and duration are expressed as time
+duration specifications; see
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+
+
Note that the first two sets of the start/end options and the duration
+option look at the frame timestamp, while the _frame variants simply count the
+frames that pass through the filter. Also note that this filter does not modify
+the timestamps. If you wish for the output timestamps to start at zero, insert a
+setpts filter after the trim filter.
+
+
If multiple start or end options are set, this filter tries to be greedy and
+keep all the frames that match at least one of the specified constraints. To keep
+only the part that matches all the constraints at once, chain multiple trim
+filters.
+
+
The defaults are such that all the input is kept. So it is possible to set e.g.
+just the end values to keep everything before the specified time.
+
+
Examples:
+
+ Drop everything except the second minute of input:
+
+
ffmpeg -i INPUT -vf trim=60:120
+
+
+ Keep only the first second:
+
+
ffmpeg -i INPUT -vf trim=duration=1
+
+
+
+
+
+
+
30.98 unsharp# TOC
+
+
Sharpen or blur the input video.
+
+
It accepts the following parameters:
+
+
+luma_msize_x, lx
+Set the luma matrix horizontal size. It must be an odd integer between
+3 and 63. The default value is 5.
+
+
+luma_msize_y, ly
+Set the luma matrix vertical size. It must be an odd integer between 3
+and 63. The default value is 5.
+
+
+luma_amount, la
+Set the luma effect strength. It must be a floating point number, reasonable
+values lay between -1.5 and 1.5.
+
+Negative values will blur the input video, while positive values will
+sharpen it, a value of zero will disable the effect.
+
+Default value is 1.0.
+
+
+chroma_msize_x, cx
+Set the chroma matrix horizontal size. It must be an odd integer
+between 3 and 63. The default value is 5.
+
+
+chroma_msize_y, cy
+Set the chroma matrix vertical size. It must be an odd integer
+between 3 and 63. The default value is 5.
+
+
+chroma_amount, ca
+Set the chroma effect strength. It must be a floating point number, reasonable
+values lay between -1.5 and 1.5.
+
+Negative values will blur the input video, while positive values will
+sharpen it, a value of zero will disable the effect.
+
+Default value is 0.0.
+
+
+opencl
+If set to 1, specify using OpenCL capabilities, only available if
+FFmpeg was configured with --enable-opencl
. Default value is 0.
+
+
+
+
+
All parameters are optional and default to the equivalent of the
+string ’5:5:1.0:5:5:0.0’.
+
+
+
30.98.1 Examples# TOC
+
+
+ Apply strong luma sharpen effect:
+
+
unsharp=luma_msize_x=7:luma_msize_y=7:luma_amount=2.5
+
+
+ Apply a strong blur of both luma and chroma parameters:
+
+
+
+
+
30.99 uspp# TOC
+
+
Apply ultra slow/simple postprocessing filter that compresses and decompresses
+the image at several (or - in the case of quality level 8
- all)
+shifts and average the results.
+
+
The way this differs from the behavior of spp is that uspp actually encodes &
+decodes each case with libavcodec Snow, whereas spp uses a simplified intra only 8x8
+DCT similar to MJPEG.
+
+
The filter accepts the following options:
+
+
+quality
+Set quality. This option defines the number of levels for averaging. It accepts
+an integer in the range 0-8. If set to 0
, the filter will have no
+effect. A value of 8
means the higher quality. For each increment of
+that value the speed drops by a factor of approximately 2. Default value is
+3
.
+
+
+qp
+Force a constant quantization parameter. If not set, the filter will use the QP
+from the video stream (if available).
+
+
+
+
+
30.100 vidstabdetect# TOC
+
+
Analyze video stabilization/deshaking. Perform pass 1 of 2, see
+vidstabtransform for pass 2.
+
+
This filter generates a file with relative translation and rotation
+transform information about subsequent frames, which is then used by
+the vidstabtransform filter.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libvidstab
.
+
+
This filter accepts the following options:
+
+
+result
+Set the path to the file used to write the transforms information.
+Default value is transforms.trf .
+
+
+shakiness
+Set how shaky the video is and how quick the camera is. It accepts an
+integer in the range 1-10, a value of 1 means little shakiness, a
+value of 10 means strong shakiness. Default value is 5.
+
+
+accuracy
+Set the accuracy of the detection process. It must be a value in the
+range 1-15. A value of 1 means low accuracy, a value of 15 means high
+accuracy. Default value is 15.
+
+
+stepsize
+Set stepsize of the search process. The region around minimum is
+scanned with 1 pixel resolution. Default value is 6.
+
+
+mincontrast
+Set minimum contrast. Below this value a local measurement field is
+discarded. Must be a floating point value in the range 0-1. Default
+value is 0.3.
+
+
+tripod
+Set reference frame number for tripod mode.
+
+If enabled, the motion of the frames is compared to a reference frame
+in the filtered stream, identified by the specified number. The idea
+is to compensate all movements in a more-or-less static scene and keep
+the camera view absolutely still.
+
+If set to 0, it is disabled. The frames are counted starting from 1.
+
+
+show
+Show fields and transforms in the resulting frames. It accepts an
+integer in the range 0-2. Default value is 0, which disables any
+visualization.
+
+
+
+
+
30.100.1 Examples# TOC
+
+
+ Use default values:
+
+
+ Analyze strongly shaky movie and put the results in file
+mytransforms.trf :
+
+
vidstabdetect=shakiness=10:accuracy=15:result="mytransforms.trf"
+
+
+ Visualize the result of internal transformations in the resulting
+video:
+
+
+ Analyze a video with medium shakiness using ffmpeg
:
+
+
ffmpeg -i input -vf vidstabdetect=shakiness=5:show=1 dummy.avi
+
+
+
+
+
30.101 vidstabtransform# TOC
+
+
Video stabilization/deshaking: pass 2 of 2,
+see vidstabdetect for pass 1.
+
+
Read a file with transform information for each frame and
+apply/compensate them. Together with the vidstabdetect
+filter this can be used to deshake videos. See also
+http://public.hronopik.de/vid.stab . It is important to also use
+the unsharp filter, see below.
+
+
To enable compilation of this filter you need to configure FFmpeg with
+--enable-libvidstab
.
+
+
+
30.101.1 Options# TOC
+
+
+input
+Set path to the file used to read the transforms. Default value is
+transforms.trf .
+
+
+smoothing
+Set the number of frames (value*2 + 1) used for lowpass filtering the
+camera movements. Default value is 10.
+
+For example a number of 10 means that 21 frames are used (10 in the
+past and 10 in the future) to smoothen the motion in the video. A
+larger value leads to a smoother video, but limits the acceleration of
+the camera (pan/tilt movements). 0 is a special case where a static
+camera is simulated.
+
+
+optalgo
+Set the camera path optimization algorithm.
+
+Accepted values are:
+
+‘gauss ’
+gaussian kernel low-pass filter on camera motion (default)
+
+‘avg ’
+averaging on transformations
+
+
+
+
+maxshift
+Set maximal number of pixels to translate frames. Default value is -1,
+meaning no limit.
+
+
+maxangle
+Set maximal angle in radians (degree*PI/180) to rotate frames. Default
+value is -1, meaning no limit.
+
+
+crop
+Specify how to deal with borders that may be visible due to movement
+compensation.
+
+Available values are:
+
+‘keep ’
+keep image information from previous frame (default)
+
+‘black ’
+fill the border black
+
+
+
+
+invert
+Invert transforms if set to 1. Default value is 0.
+
+
+relative
+Consider transforms as relative to previous frame if set to 1,
+absolute if set to 0. Default value is 0.
+
+
+zoom
+Set percentage to zoom. A positive value will result in a zoom-in
+effect, a negative value in a zoom-out effect. Default value is 0 (no
+zoom).
+
+
+optzoom
+Set optimal zooming to avoid borders.
+
+Accepted values are:
+
+‘0 ’
+disabled
+
+‘1 ’
+optimal static zoom value is determined (only very strong movements
+will lead to visible borders) (default)
+
+‘2 ’
+optimal adaptive zoom value is determined (no borders will be
+visible), see zoomspeed
+
+
+
+Note that the value given at zoom is added to the one calculated here.
+
+
+zoomspeed
+Set percent to zoom maximally each frame (enabled when
+optzoom is set to 2). Range is from 0 to 5, default value is
+0.25.
+
+
+interpol
+Specify type of interpolation.
+
+Available values are:
+
+‘no ’
+no interpolation
+
+‘linear ’
+linear only horizontal
+
+‘bilinear ’
+linear in both directions (default)
+
+‘bicubic ’
+cubic in both directions (slow)
+
+
+
+
+tripod
+Enable virtual tripod mode if set to 1, which is equivalent to
+relative=0:smoothing=0
. Default value is 0.
+
+Use also tripod
option of vidstabdetect .
+
+
+debug
+Increase log verbosity if set to 1. Also the detected global motions
+are written to the temporary file global_motions.trf . Default
+value is 0.
+
+
+
+
+
30.101.2 Examples# TOC
+
+
+
+
+
30.102 vflip# TOC
+
+
Flip the input video vertically.
+
+
For example, to vertically flip a video with ffmpeg
:
+
+
ffmpeg -i in.avi -vf "vflip" out.avi
+
+
+
+
30.103 vignette# TOC
+
+
Make or reverse a natural vignetting effect.
+
+
The filter accepts the following options:
+
+
+angle, a
+Set lens angle expression as a number of radians.
+
+The value is clipped in the [0,PI/2]
range.
+
+Default value: "PI/5"
+
+
+x0
+y0
+Set center coordinates expressions. Respectively "w/2"
and "h/2"
+by default.
+
+
+mode
+Set forward/backward mode.
+
+Available modes are:
+
+‘forward ’
+The larger the distance from the central point, the darker the image becomes.
+
+
+‘backward ’
+The larger the distance from the central point, the brighter the image becomes.
+This can be used to reverse a vignette effect, though there is no automatic
+detection to extract the lens angle and other settings (yet). It can
+also be used to create a burning effect.
+
+
+
+Default value is ‘forward ’.
+
+
+eval
+Set evaluation mode for the expressions (angle , x0 , y0 ).
+
+It accepts the following values:
+
+‘init ’
+Evaluate expressions only once during the filter initialization.
+
+
+‘frame ’
+Evaluate expressions for each incoming frame. This is way slower than the
+‘init ’ mode since it requires all the scalers to be re-computed, but it
+allows advanced dynamic expressions.
+
+
+
+Default value is ‘init ’.
+
+
+dither
+Set dithering to reduce the circular banding effects. Default is 1
+(enabled).
+
+
+aspect
+Set vignette aspect. This setting allows one to adjust the shape of the vignette.
+Setting this value to the SAR of the input will make a rectangular vignetting
+following the dimensions of the video.
+
+Default is 1/1
.
+
+
+
+
+
30.103.1 Expressions# TOC
+
+
The alpha , x0 and y0 expressions can contain the
+following parameters.
+
+
+w
+h
+input width and height
+
+
+n
+the number of input frame, starting from 0
+
+
+pts
+the PTS (Presentation TimeStamp) time of the filtered video frame, expressed in
+TB units, NAN if undefined
+
+
+r
+frame rate of the input video, NAN if the input frame rate is unknown
+
+
+t
+the PTS (Presentation TimeStamp) of the filtered video frame,
+expressed in seconds, NAN if undefined
+
+
+tb
+time base of the input video
+
+
+
+
+
+
30.103.2 Examples# TOC
+
+
+ Apply simple strong vignetting effect:
+
+
+ Make a flickering vignetting:
+
+
vignette='PI/4+random(1)*PI/50':eval=frame
+
+
+
+
+
+
30.104 w3fdif# TOC
+
+
Deinterlace the input video ("w3fdif" stands for "Weston 3 Field
+Deinterlacing Filter").
+
+
Based on the process described by Martin Weston for BBC R&D, and
+implemented based on the de-interlace algorithm written by Jim
+Easterbrook for BBC R&D, the Weston 3 field deinterlacing filter
+uses filter coefficients calculated by BBC R&D.
+
+
There are two sets of filter coefficients, so called "simple":
+and "complex". Which set of filter coefficients is used can
+be set by passing an optional parameter:
+
+
+filter
+Set the interlacing filter coefficients. Accepts one of the following values:
+
+
+‘simple ’
+Simple filter coefficient set.
+
+‘complex ’
+More-complex filter coefficient set.
+
+
+Default value is ‘complex ’.
+
+
+deint
+Specify which frames to deinterlace. Accept one of the following values:
+
+
+‘all ’
+Deinterlace all frames,
+
+‘interlaced ’
+Only deinterlace frames marked as interlaced.
+
+
+
+Default value is ‘all ’.
+
+
+
+
+
30.105 xbr# TOC
+
Apply the xBR high-quality magnification filter which is designed for pixel
+art. It follows a set of edge-detection rules, see
+http://www.libretro.com/forums/viewtopic.php?f=6&t=134 .
+
+
It accepts the following option:
+
+
+n
+Set the scaling dimension: 2
for 2xBR
, 3
for
+3xBR
and 4
for 4xBR
.
+Default is 3
.
+
+
+
+
+
30.106 yadif# TOC
+
+
Deinterlace the input video ("yadif" means "yet another deinterlacing
+filter").
+
+
It accepts the following parameters:
+
+
+
+mode
+The interlacing mode to adopt. It accepts one of the following values:
+
+
+0, send_frame
+Output one frame for each frame.
+
+1, send_field
+Output one frame for each field.
+
+2, send_frame_nospatial
+Like send_frame
, but it skips the spatial interlacing check.
+
+3, send_field_nospatial
+Like send_field
, but it skips the spatial interlacing check.
+
+
+
+The default value is send_frame
.
+
+
+parity
+The picture field parity assumed for the input interlaced video. It accepts one
+of the following values:
+
+
+0, tff
+Assume the top field is first.
+
+1, bff
+Assume the bottom field is first.
+
+-1, auto
+Enable automatic detection of field parity.
+
+
+
+The default value is auto
.
+If the interlacing is unknown or the decoder does not export this information,
+top field first will be assumed.
+
+
+deint
+Specify which frames to deinterlace. Accept one of the following
+values:
+
+
+0, all
+Deinterlace all frames.
+
+1, interlaced
+Only deinterlace frames marked as interlaced.
+
+
+
+The default value is all
.
+
+
+
+
+
30.107 zoompan# TOC
+
+
Apply Zoom & Pan effect.
+
+
This filter accepts the following options:
+
+
+zoom, z
+Set the zoom expression. Default is 1.
+
+
+x
+y
+Set the x and y expression. Default is 0.
+
+
+d
+Set the duration expression in number of frames.
+This sets for how many number of frames effect will last for
+single input image.
+
+
+s
+Set the output image size, default is ’hd720’.
+
+
+
+
Each expression can contain the following constants:
+
+
+in_w, iw
+Input width.
+
+
+in_h, ih
+Input height.
+
+
+out_w, ow
+Output width.
+
+
+out_h, oh
+Output height.
+
+
+in
+Input frame count.
+
+
+on
+Output frame count.
+
+
+x
+y
+Last calculated ’x’ and ’y’ position from ’x’ and ’y’ expression
+for current input frame.
+
+
+px
+py
+’x’ and ’y’ of last output frame of previous input frame or 0 when there was
+not yet such frame (first input frame).
+
+
+zoom
+Last calculated zoom from ’z’ expression for current input frame.
+
+
+pzoom
+Last calculated zoom of last output frame of previous input frame.
+
+
+duration
+Number of output frames for current input frame. Calculated from ’d’ expression
+for each input frame.
+
+
+pduration
+number of output frames created for previous input frame
+
+
+a
+Rational number: input width / input height
+
+
+sar
+sample aspect ratio
+
+
+dar
+display aspect ratio
+
+
+
+
+
+
30.107.1 Examples# TOC
+
+
+ Zoom-in up to 1.5 and pan at same time to some spot near center of picture:
+
+
zoompan=z='min(zoom+0.0015,1.5)':d=700:x='if(gte(zoom,1.5),x,x+1/a)':y='if(gte(zoom,1.5),y,y+1)':s=640x360
+
+
+
+
+
+
31 Video Sources# TOC
+
+
Below is a description of the currently available video sources.
+
+
+
31.1 buffer# TOC
+
+
Buffer video frames, and make them available to the filter chain.
+
+
This source is mainly intended for a programmatic use, in particular
+through the interface defined in libavfilter/vsrc_buffer.h .
+
+
It accepts the following parameters:
+
+
+video_size
+Specify the size (width and height) of the buffered video frames. For the
+syntax of this option, check the "Video size" section in the ffmpeg-utils
+manual.
+
+
+width
+The input video width.
+
+
+height
+The input video height.
+
+
+pix_fmt
+A string representing the pixel format of the buffered video frames.
+It may be a number corresponding to a pixel format, or a pixel format
+name.
+
+
+time_base
+Specify the timebase assumed by the timestamps of the buffered frames.
+
+
+frame_rate
+Specify the frame rate expected for the video stream.
+
+
+pixel_aspect, sar
+The sample (pixel) aspect ratio of the input video.
+
+
+sws_param
+Specify the optional parameters to be used for the scale filter which
+is automatically inserted when an input change is detected in the
+input size or format.
+
+
+
+
For example:
+
+
buffer=width=320:height=240:pix_fmt=yuv410p:time_base=1/24:sar=1
+
+
+
will instruct the source to accept video frames with size 320x240 and
+with format "yuv410p", assuming 1/24 as the timestamps timebase and
+square pixels (1:1 sample aspect ratio).
+Since the pixel format with name "yuv410p" corresponds to the number 6
+(check the enum AVPixelFormat definition in libavutil/pixfmt.h ),
+this example corresponds to:
+
+
buffer=size=320x240:pixfmt=6:time_base=1/24:pixel_aspect=1/1
+
+
+
Alternatively, the options can be specified as a flat string, but this
+syntax is deprecated:
+
+
width :height :pix_fmt :time_base.num :time_base.den :pixel_aspect.num :pixel_aspect.den [:sws_param ]
+
+
+
31.2 cellauto# TOC
+
+
Create a pattern generated by an elementary cellular automaton.
+
+
The initial state of the cellular automaton can be defined through the
+filename , and pattern options. If such options are
+not specified an initial state is created randomly.
+
+
At each new frame a new row in the video is filled with the result of
+the cellular automaton next generation. The behavior when the whole
+frame is filled is defined by the scroll option.
+
+
This source accepts the following options:
+
+
+filename, f
+Read the initial cellular automaton state, i.e. the starting row, from
+the specified file.
+In the file, each non-whitespace character is considered an alive
+cell, a newline will terminate the row, and further characters in the
+file will be ignored.
+
+
+pattern, p
+Read the initial cellular automaton state, i.e. the starting row, from
+the specified string.
+
+Each non-whitespace character in the string is considered an alive
+cell, a newline will terminate the row, and further characters in the
+string will be ignored.
+
+
+rate, r
+Set the video rate, that is the number of frames generated per second.
+Default is 25.
+
+
+random_fill_ratio, ratio
+Set the random fill ratio for the initial cellular automaton row. It
+is a floating point number value ranging from 0 to 1, defaults to
+1/PHI.
+
+This option is ignored when a file or a pattern is specified.
+
+
+random_seed, seed
+Set the seed for filling randomly the initial row, must be an integer
+included between 0 and UINT32_MAX. If not specified, or if explicitly
+set to -1, the filter will try to use a good random seed on a best
+effort basis.
+
+
+rule
+Set the cellular automaton rule, it is a number ranging from 0 to 255.
+Default value is 110.
+
+
+size, s
+Set the size of the output video. For the syntax of this option, check
+the "Video size" section in the ffmpeg-utils manual.
+
+If filename or pattern is specified, the size is set
+by default to the width of the specified initial state row, and the
+height is set to width * PHI.
+
+If size is set, it must contain the width of the specified
+pattern string, and the specified pattern will be centered in the
+larger row.
+
+If a filename or a pattern string is not specified, the size value
+defaults to "320x518" (used for a randomly generated initial state).
+
+
+scroll
+If set to 1, scroll the output upward when all the rows in the output
+have been already filled. If set to 0, the new generated row will be
+written over the top row just after the bottom row is filled.
+Defaults to 1.
+
+
+start_full, full
+If set to 1, completely fill the output with generated rows before
+outputting the first frame.
+This is the default behavior, for disabling set the value to 0.
+
+
+stitch
+If set to 1, stitch the left and right row edges together.
+This is the default behavior, for disabling set the value to 0.
+
+
+
+
+
31.2.1 Examples# TOC
+
+
+ Read the initial state from pattern , and specify an output of
+size 200x400.
+
+
cellauto=f=pattern:s=200x400
+
+
+ Generate a random initial row with a width of 200 cells, with a fill
+ratio of 2/3:
+
+
cellauto=ratio=2/3:s=200x200
+
+
+ Create a pattern generated by rule 18 starting by a single alive cell
+centered on an initial row with width 100:
+
+
cellauto=p=@:s=100x400:full=0:rule=18
+
+
+ Specify a more elaborated initial pattern:
+
+
cellauto=p='@@ @ @@':s=100x400:full=0:rule=18
+
+
+
+
+
+
31.3 mandelbrot# TOC
+
+
Generate a Mandelbrot set fractal, and progressively zoom towards the
+point specified with start_x and start_y .
+
+
This source accepts the following options:
+
+
+end_pts
+Set the terminal pts value. Default value is 400.
+
+
+end_scale
+Set the terminal scale value.
+Must be a floating point value. Default value is 0.3.
+
+
+inner
+Set the inner coloring mode, that is the algorithm used to draw the
+Mandelbrot fractal internal region.
+
+It shall assume one of the following values:
+
+black
+Set black mode.
+
+convergence
+Show time until convergence.
+
+mincol
+Set color based on point closest to the origin of the iterations.
+
+period
+Set period mode.
+
+
+
+Default value is mincol .
+
+
+bailout
+Set the bailout value. Default value is 10.0.
+
+
+maxiter
+Set the maximum of iterations performed by the rendering
+algorithm. Default value is 7189.
+
+
+outer
+Set outer coloring mode.
+It shall assume one of following values:
+
+iteration_count
+Set iteration cound mode.
+
+normalized_iteration_count
+set normalized iteration count mode.
+
+
+Default value is normalized_iteration_count .
+
+
+rate, r
+Set frame rate, expressed as number of frames per second. Default
+value is "25".
+
+
+size, s
+Set frame size. For the syntax of this option, check the "Video
+size" section in the ffmpeg-utils manual. Default value is "640x480".
+
+
+start_scale
+Set the initial scale value. Default value is 3.0.
+
+
+start_x
+Set the initial x position. Must be a floating point value between
+-100 and 100. Default value is -0.743643887037158704752191506114774.
+
+
+start_y
+Set the initial y position. Must be a floating point value between
+-100 and 100. Default value is -0.131825904205311970493132056385139.
+
+
+
+
+
31.4 mptestsrc# TOC
+
+
Generate various test patterns, as generated by the MPlayer test filter.
+
+
The size of the generated video is fixed, and is 256x256.
+This source is useful in particular for testing encoding features.
+
+
This source accepts the following options:
+
+
+rate, r
+Specify the frame rate of the sourced video, as the number of frames
+generated per second. It has to be a string in the format
+frame_rate_num /frame_rate_den , an integer number, a floating point
+number or a valid video frame rate abbreviation. The default value is
+"25".
+
+
+duration, d
+Set the duration of the sourced video. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+
+If not specified, or the expressed duration is negative, the video is
+supposed to be generated forever.
+
+
+test, t
+
+Set the number or the name of the test to perform. Supported tests are:
+
+dc_luma
+dc_chroma
+freq_luma
+freq_chroma
+amp_luma
+amp_chroma
+cbp
+mv
+ring1
+ring2
+all
+
+
+Default value is "all", which will cycle through the list of all tests.
+
+
+
+
Some examples:
+
+
+
will generate a "dc_luma" test pattern.
+
+
+
31.5 frei0r_src# TOC
+
+
Provide a frei0r source.
+
+
To enable compilation of this filter you need to install the frei0r
+header and configure FFmpeg with --enable-frei0r
.
+
+
This source accepts the following parameters:
+
+
+size
+The size of the video to generate. For the syntax of this option, check the
+"Video size" section in the ffmpeg-utils manual.
+
+
+framerate
+The framerate of the generated video. It may be a string of the form
+num /den or a frame rate abbreviation.
+
+
+filter_name
+The name to the frei0r source to load. For more information regarding frei0r and
+how to set the parameters, read the frei0r section in the video filters
+documentation.
+
+
+filter_params
+A ’|’-separated list of parameters to pass to the frei0r source.
+
+
+
+
+
For example, to generate a frei0r partik0l source with size 200x200
+and frame rate 10 which is overlaid on the overlay filter main input:
+
+
frei0r_src=size=200x200:framerate=10:filter_name=partik0l:filter_params=1234 [overlay]; [in][overlay] overlay
+
+
+
+
31.6 life# TOC
+
+
Generate a life pattern.
+
+
This source is based on a generalization of John Conway’s life game.
+
+
The sourced input represents a life grid, each pixel represents a cell
+which can be in one of two possible states, alive or dead. Every cell
+interacts with its eight neighbours, which are the cells that are
+horizontally, vertically, or diagonally adjacent.
+
+
At each interaction the grid evolves according to the adopted rule,
+which specifies the number of neighbor alive cells which will make a
+cell stay alive or born. The rule option allows one to specify
+the rule to adopt.
+
+
This source accepts the following options:
+
+
+filename, f
+Set the file from which to read the initial grid state. In the file,
+each non-whitespace character is considered an alive cell, and newline
+is used to delimit the end of each row.
+
+If this option is not specified, the initial grid is generated
+randomly.
+
+
+rate, r
+Set the video rate, that is the number of frames generated per second.
+Default is 25.
+
+
+random_fill_ratio, ratio
+Set the random fill ratio for the initial random grid. It is a
+floating point number value ranging from 0 to 1, defaults to 1/PHI.
+It is ignored when a file is specified.
+
+
+random_seed, seed
+Set the seed for filling the initial random grid, must be an integer
+included between 0 and UINT32_MAX. If not specified, or if explicitly
+set to -1, the filter will try to use a good random seed on a best
+effort basis.
+
+
+rule
+Set the life rule.
+
+A rule can be specified with a code of the kind "SNS /BNB ",
+where NS and NB are sequences of numbers in the range 0-8,
+NS specifies the number of alive neighbor cells which make a
+live cell stay alive, and NB the number of alive neighbor cells
+which make a dead cell to become alive (i.e. to "born").
+"s" and "b" can be used in place of "S" and "B", respectively.
+
+Alternatively a rule can be specified by an 18-bits integer. The 9
+high order bits are used to encode the next cell state if it is alive
+for each number of neighbor alive cells, the low order bits specify
+the rule for "borning" new cells. Higher order bits encode for an
+higher number of neighbor cells.
+For example the number 6153 = (12<<9)+9
specifies a stay alive
+rule of 12 and a born rule of 9, which corresponds to "S23/B03".
+
+Default value is "S23/B3", which is the original Conway’s game of life
+rule, and will keep a cell alive if it has 2 or 3 neighbor alive
+cells, and will born a new cell if there are three alive cells around
+a dead cell.
+
+
+size, s
+Set the size of the output video. For the syntax of this option, check the
+"Video size" section in the ffmpeg-utils manual.
+
+If filename is specified, the size is set by default to the
+same size of the input file. If size is set, it must contain
+the size specified in the input file, and the initial grid defined in
+that file is centered in the larger resulting area.
+
+If a filename is not specified, the size value defaults to "320x240"
+(used for a randomly generated initial grid).
+
+
+stitch
+If set to 1, stitch the left and right grid edges together, and the
+top and bottom edges also. Defaults to 1.
+
+
+mold
+Set cell mold speed. If set, a dead cell will go from death_color to
+mold_color with a step of mold . mold can have a
+value from 0 to 255.
+
+
+life_color
+Set the color of living (or new born) cells.
+
+
+death_color
+Set the color of dead cells. If mold is set, this is the first color
+used to represent a dead cell.
+
+
+mold_color
+Set mold color, for definitely dead and moldy cells.
+
+For the syntax of these 3 color options, check the "Color" section in the
+ffmpeg-utils manual.
+
+
+
+
+
31.6.1 Examples# TOC
+
+
+ Read a grid from pattern , and center it on a grid of size
+300x300 pixels:
+
+
life=f=pattern:s=300x300
+
+
+ Generate a random grid of size 200x200, with a fill ratio of 2/3:
+
+
life=ratio=2/3:s=200x200
+
+
+ Specify a custom rule for evolving a randomly generated grid:
+
+
+ Full example with slow death effect (mold) using ffplay
:
+
+
ffplay -f lavfi life=s=300x200:mold=10:r=60:ratio=0.1:death_color=#C83232:life_color=#00ff00,scale=1200:800:flags=16
+
+
+
+
+
31.7 color, haldclutsrc, nullsrc, rgbtestsrc, smptebars, smptehdbars, testsrc# TOC
+
+
The color
source provides an uniformly colored input.
+
+
The haldclutsrc
source provides an identity Hald CLUT. See also
+haldclut filter.
+
+
The nullsrc
source returns unprocessed video frames. It is
+mainly useful to be employed in analysis / debugging tools, or as the
+source for filters which ignore the input data.
+
+
The rgbtestsrc
source generates an RGB test pattern useful for
+detecting RGB vs BGR issues. You should see a red, green and blue
+stripe from top to bottom.
+
+
The smptebars
source generates a color bars pattern, based on
+the SMPTE Engineering Guideline EG 1-1990.
+
+
The smptehdbars
source generates a color bars pattern, based on
+the SMPTE RP 219-2002.
+
+
The testsrc
source generates a test video pattern, showing a
+color pattern, a scrolling gradient and a timestamp. This is mainly
+intended for testing purposes.
+
+
The sources accept the following parameters:
+
+
+color, c
+Specify the color of the source, only available in the color
+source. For the syntax of this option, check the "Color" section in the
+ffmpeg-utils manual.
+
+
+level
+Specify the level of the Hald CLUT, only available in the haldclutsrc
+source. A level of N
generates a picture of N*N*N
by N*N*N
+pixels to be used as identity matrix for 3D lookup tables. Each component is
+coded on a 1/(N*N)
scale.
+
+
+size, s
+Specify the size of the sourced video. For the syntax of this option, check the
+"Video size" section in the ffmpeg-utils manual. The default value is
+"320x240".
+
+This option is not available with the haldclutsrc
filter.
+
+
+rate, r
+Specify the frame rate of the sourced video, as the number of frames
+generated per second. It has to be a string in the format
+frame_rate_num /frame_rate_den , an integer number, a floating point
+number or a valid video frame rate abbreviation. The default value is
+"25".
+
+
+sar
+Set the sample aspect ratio of the sourced video.
+
+
+duration, d
+Set the duration of the sourced video. See
+(ffmpeg-utils)the Time duration section in the ffmpeg-utils(1) manual
+for the accepted syntax.
+
+If not specified, or the expressed duration is negative, the video is
+supposed to be generated forever.
+
+
+decimals, n
+Set the number of decimals to show in the timestamp, only available in the
+testsrc
source.
+
+The displayed timestamp value will correspond to the original
+timestamp value multiplied by the power of 10 of the specified
+value. Default value is 0.
+
+
+
+
For example the following:
+
+
testsrc=duration=5.3:size=qcif:rate=10
+
+
+
will generate a video with a duration of 5.3 seconds, with size
+176x144 and a frame rate of 10 frames per second.
+
+
The following graph description will generate a red source
+with an opacity of 0.2, with size "qcif" and a frame rate of 10
+frames per second.
+
+
color=c=red@0.2:s=qcif:r=10
+
+
+
If the input content is to be ignored, nullsrc
can be used. The
+following command generates noise in the luminance plane by employing
+the geq
filter:
+
+
nullsrc=s=256x256, geq=random(1)*255:128:128
+
+
+
+
31.7.1 Commands# TOC
+
+
The color
source supports the following commands:
+
+
+c, color
+Set the color of the created image. Accepts the same syntax of the
+corresponding color option.
+
+
+
+
+
+
32 Video Sinks# TOC
+
+
Below is a description of the currently available video sinks.
+
+
+
32.1 buffersink# TOC
+
+
Buffer video frames, and make them available to the end of the filter
+graph.
+
+
This sink is mainly intended for programmatic use, in particular
+through the interface defined in libavfilter/buffersink.h
+or the options system.
+
+
It accepts a pointer to an AVBufferSinkContext structure, which
+defines the incoming buffers’ formats, to be passed as the opaque
+parameter to avfilter_init_filter
for initialization.
+
+
+
32.2 nullsink# TOC
+
+
Null video sink: do absolutely nothing with the input video. It is
+mainly useful as a template and for use in analysis / debugging
+tools.
+
+
+
+
33 Multimedia Filters# TOC
+
+
Below is a description of the currently available multimedia filters.
+
+
+
33.1 avectorscope# TOC
+
+
Convert input audio to a video output, representing the audio vector
+scope.
+
+
The filter is used to measure the difference between channels of stereo
+audio stream. A monoaural signal, consisting of identical left and right
+signal, results in straight vertical line. Any stereo separation is visible
+as a deviation from this line, creating a Lissajous figure.
+If the straight (or deviation from it) but horizontal line appears this
+indicates that the left and right channels are out of phase.
+
+
The filter accepts the following options:
+
+
+mode, m
+Set the vectorscope mode.
+
+Available values are:
+
+‘lissajous ’
+Lissajous rotated by 45 degrees.
+
+
+‘lissajous_xy ’
+Same as above but not rotated.
+
+
+
+Default value is ‘lissajous ’.
+
+
+size, s
+Set the video size for the output. For the syntax of this option, check the "Video size"
+section in the ffmpeg-utils manual. Default value is 400x400
.
+
+
+rate, r
+Set the output frame rate. Default value is 25
.
+
+
+rc
+gc
+bc
+Specify the red, green and blue contrast. Default values are 40
, 160
and 80
.
+Allowed range is [0, 255]
.
+
+
+rf
+gf
+bf
+Specify the red, green and blue fade. Default values are 15
, 10
and 5
.
+Allowed range is [0, 255]
.
+
+
+zoom
+Set the zoom factor. Default value is 1
. Allowed range is [1, 10]
.
+
+
+
+
+
33.1.1 Examples# TOC
+
+
+ Complete example using ffplay
:
+
+
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
+ [a] avectorscope=zoom=1.3:rc=2:gc=200:bc=10:rf=1:gf=8:bf=7 [out0]'
+
+
+
+
+
33.2 concat# TOC
+
+
Concatenate audio and video streams, joining them together one after the
+other.
+
+
The filter works on segments of synchronized video and audio streams. All
+segments must have the same number of streams of each type, and that will
+also be the number of streams at output.
+
+
The filter accepts the following options:
+
+
+n
+Set the number of segments. Default is 2.
+
+
+v
+Set the number of output video streams, that is also the number of video
+streams in each segment. Default is 1.
+
+
+a
+Set the number of output audio streams, that is also the number of audio
+streams in each segment. Default is 0.
+
+
+unsafe
+Activate unsafe mode: do not fail if segments have a different format.
+
+
+
+
+
The filter has v +a outputs: first v video outputs, then
+a audio outputs.
+
+
There are n x(v +a ) inputs: first the inputs for the first
+segment, in the same order as the outputs, then the inputs for the second
+segment, etc.
+
+
Related streams do not always have exactly the same duration, for various
+reasons including codec frame size or sloppy authoring. For that reason,
+related synchronized streams (e.g. a video and its audio track) should be
+concatenated at once. The concat filter will use the duration of the longest
+stream in each segment (except the last one), and if necessary pad shorter
+audio streams with silence.
+
+
For this filter to work correctly, all segments must start at timestamp 0.
+
+
All corresponding streams must have the same parameters in all segments; the
+filtering system will automatically select a common pixel format for video
+streams, and a common sample format, sample rate and channel layout for
+audio streams, but other settings, such as resolution, must be converted
+explicitly by the user.
+
+
Different frame rates are acceptable but will result in variable frame rate
+at output; be sure to configure the output file to handle it.
+
+
+
33.2.1 Examples# TOC
+
+
+
+
+
33.3 ebur128# TOC
+
+
EBU R128 scanner filter. This filter takes an audio stream as input and outputs
+it unchanged. By default, it logs a message at a frequency of 10Hz with the
+Momentary loudness (identified by M
), Short-term loudness (S
),
+Integrated loudness (I
) and Loudness Range (LRA
).
+
+
The filter also has a video output (see the video option) with a real
+time graph to observe the loudness evolution. The graphic contains the logged
+message mentioned above, so it is not printed anymore when this option is set,
+unless the verbose logging is set. The main graphing area contains the
+short-term loudness (3 seconds of analysis), and the gauge on the right is for
+the momentary loudness (400 milliseconds).
+
+
More information about the Loudness Recommendation EBU R128 on
+http://tech.ebu.ch/loudness .
+
+
The filter accepts the following options:
+
+
+video
+Activate the video output. The audio stream is passed unchanged whether this
+option is set or no. The video stream will be the first output stream if
+activated. Default is 0
.
+
+
+size
+Set the video size. This option is for video only. For the syntax of this
+option, check the "Video size" section in the ffmpeg-utils manual. Default
+and minimum resolution is 640x480
.
+
+
+meter
+Set the EBU scale meter. Default is 9
. Common values are 9
and
+18
, respectively for EBU scale meter +9 and EBU scale meter +18. Any
+other integer value between this range is allowed.
+
+
+metadata
+Set metadata injection. If set to 1
, the audio input will be segmented
+into 100ms output frames, each of them containing various loudness information
+in metadata. All the metadata keys are prefixed with lavfi.r128.
.
+
+Default is 0
.
+
+
+framelog
+Force the frame logging level.
+
+Available values are:
+
+‘info ’
+information logging level
+
+‘verbose ’
+verbose logging level
+
+
+
+By default, the logging level is set to info . If the video or
+the metadata options are set, it switches to verbose .
+
+
+peak
+Set peak mode(s).
+
+Available modes can be cumulated (the option is a flag
type). Possible
+values are:
+
+‘none ’
+Disable any peak mode (default).
+
+‘sample ’
+Enable sample-peak mode.
+
+Simple peak mode looking for the higher sample value. It logs a message
+for sample-peak (identified by SPK
).
+
+‘true ’
+Enable true-peak mode.
+
+If enabled, the peak lookup is done on an over-sampled version of the input
+stream for better peak accuracy. It logs a message for true-peak.
+(identified by TPK
) and true-peak per frame (identified by FTPK
).
+This mode requires a build with libswresample
.
+
+
+
+
+
+
+
+
33.3.1 Examples# TOC
+
+
+ Real-time graph using ffplay
, with a EBU scale meter +18:
+
+
ffplay -f lavfi -i "amovie=input.mp3,ebur128=video=1:meter=18 [out0][out1]"
+
+
+ Run an analysis with ffmpeg
:
+
+
ffmpeg -nostats -i input.mp3 -filter_complex ebur128 -f null -
+
+
+
+
+
33.4 interleave, ainterleave# TOC
+
+
Temporally interleave frames from several inputs.
+
+
interleave
works with video inputs, ainterleave
with audio.
+
+
These filters read frames from several inputs and send the oldest
+queued frame to the output.
+
+
Input streams must have a well defined, monotonically increasing frame
+timestamp values.
+
+
In order to submit one frame to output, these filters need to enqueue
+at least one frame for each input, so they cannot work in case one
+input is not yet terminated and will not receive incoming frames.
+
+
For example consider the case when one input is a select
filter
+which always drop input frames. The interleave
filter will keep
+reading from that input, but it will never be able to send new frames
+to output until the input will send an end-of-stream signal.
+
+
Also, depending on inputs synchronization, the filters will drop
+frames in case one input receives more frames than the other ones, and
+the queue is already filled.
+
+
These filters accept the following options:
+
+
+nb_inputs, n
+Set the number of different inputs, it is 2 by default.
+
+
+
+
+
33.4.1 Examples# TOC
+
+
+ Interleave frames belonging to different streams using ffmpeg
:
+
+
ffmpeg -i bambi.avi -i pr0n.mkv -filter_complex "[0:v][1:v] interleave" out.avi
+
+
+ Add flickering blur effect:
+
+
select='if(gt(random(0), 0.2), 1, 2)':n=2 [tmp], boxblur=2:2, [tmp] interleave
+
+
+
+
+
33.5 perms, aperms# TOC
+
+
Set read/write permissions for the output frames.
+
+
These filters are mainly aimed at developers to test direct path in the
+following filter in the filtergraph.
+
+
The filters accept the following options:
+
+
+mode
+Select the permissions mode.
+
+It accepts the following values:
+
+‘none ’
+Do nothing. This is the default.
+
+‘ro ’
+Set all the output frames read-only.
+
+‘rw ’
+Set all the output frames directly writable.
+
+‘toggle ’
+Make the frame read-only if writable, and writable if read-only.
+
+‘random ’
+Set each output frame read-only or writable randomly.
+
+
+
+
+seed
+Set the seed for the random mode, must be an integer included between
+0
and UINT32_MAX
. If not specified, or if explicitly set to
+-1
, the filter will try to use a good random seed on a best effort
+basis.
+
+
+
+
Note: in case of auto-inserted filter between the permission filter and the
+following one, the permission might not be received as expected in that
+following filter. Inserting a format or aformat filter before the
+perms/aperms filter can avoid this problem.
+
+
+
33.6 select, aselect# TOC
+
+
Select frames to pass in output.
+
+
This filter accepts the following options:
+
+
+expr, e
+Set expression, which is evaluated for each input frame.
+
+If the expression is evaluated to zero, the frame is discarded.
+
+If the evaluation result is negative or NaN, the frame is sent to the
+first output; otherwise it is sent to the output with index
+ceil(val)-1
, assuming that the input index starts from 0.
+
+For example a value of 1.2
corresponds to the output with index
+ceil(1.2)-1 = 2-1 = 1
, that is the second output.
+
+
+outputs, n
+Set the number of outputs. The output to which to send the selected
+frame is based on the result of the evaluation. Default value is 1.
+
+
+
+
The expression can contain the following constants:
+
+
+n
+The (sequential) number of the filtered frame, starting from 0.
+
+
+selected_n
+The (sequential) number of the selected frame, starting from 0.
+
+
+prev_selected_n
+The sequential number of the last selected frame. It’s NAN if undefined.
+
+
+TB
+The timebase of the input timestamps.
+
+
+pts
+The PTS (Presentation TimeStamp) of the filtered video frame,
+expressed in TB units. It’s NAN if undefined.
+
+
+t
+The PTS of the filtered video frame,
+expressed in seconds. It’s NAN if undefined.
+
+
+prev_pts
+The PTS of the previously filtered video frame. It’s NAN if undefined.
+
+
+prev_selected_pts
+The PTS of the last previously filtered video frame. It’s NAN if undefined.
+
+
+prev_selected_t
+The PTS of the last previously selected video frame. It’s NAN if undefined.
+
+
+start_pts
+The PTS of the first video frame in the video. It’s NAN if undefined.
+
+
+start_t
+The time of the first video frame in the video. It’s NAN if undefined.
+
+
+pict_type (video only)
+The type of the filtered frame. It can assume one of the following
+values:
+
+I
+P
+B
+S
+SI
+SP
+BI
+
+
+
+interlace_type (video only)
+The frame interlace type. It can assume one of the following values:
+
+PROGRESSIVE
+The frame is progressive (not interlaced).
+
+TOPFIRST
+The frame is top-field-first.
+
+BOTTOMFIRST
+The frame is bottom-field-first.
+
+
+
+
+consumed_sample_n (audio only)
+the number of selected samples before the current frame
+
+
+samples_n (audio only)
+the number of samples in the current frame
+
+
+sample_rate (audio only)
+the input sample rate
+
+
+key
+This is 1 if the filtered frame is a key-frame, 0 otherwise.
+
+
+pos
+the position in the file of the filtered frame, -1 if the information
+is not available (e.g. for synthetic video)
+
+
+scene (video only)
+value between 0 and 1 to indicate a new scene; a low value reflects a low
+probability for the current frame to introduce a new scene, while a higher
+value means the current frame is more likely to be one (see the example below)
+
+
+
+
+
The default value of the select expression is "1".
+
+
+
33.6.1 Examples# TOC
+
+
+
+
+
33.7 sendcmd, asendcmd# TOC
+
+
Send commands to filters in the filtergraph.
+
+
These filters read commands to be sent to other filters in the
+filtergraph.
+
+
sendcmd
must be inserted between two video filters,
+asendcmd
must be inserted between two audio filters, but apart
+from that they act the same way.
+
+
The specification of commands can be provided in the filter arguments
+with the commands option, or in a file specified by the
+filename option.
+
+
These filters accept the following options:
+
+commands, c
+Set the commands to be read and sent to the other filters.
+
+filename, f
+Set the filename of the commands to be read and sent to the other
+filters.
+
+
+
+
+
33.7.1 Commands syntax# TOC
+
+
A commands description consists of a sequence of interval
+specifications, comprising a list of commands to be executed when a
+particular event related to that interval occurs. The occurring event
+is typically the current frame time entering or leaving a given time
+interval.
+
+
An interval is specified by the following syntax:
+
+
+
The time interval is specified by the START and END times.
+END is optional and defaults to the maximum time.
+
+
The current frame time is considered within the specified interval if
+it is included in the interval [START , END ), that is when
+the time is greater or equal to START and is lesser than
+END .
+
+
COMMANDS consists of a sequence of one or more command
+specifications, separated by ",", relating to that interval. The
+syntax of a command specification is given by:
+
+
[FLAGS ] TARGET COMMAND ARG
+
+
+
FLAGS is optional and specifies the type of events relating to
+the time interval which enable sending the specified command, and must
+be a non-null sequence of identifier flags separated by "+" or "|" and
+enclosed between "[" and "]".
+
+
The following flags are recognized:
+
+enter
+The command is sent when the current frame timestamp enters the
+specified interval. In other words, the command is sent when the
+previous frame timestamp was not in the given interval, and the
+current is.
+
+
+leave
+The command is sent when the current frame timestamp leaves the
+specified interval. In other words, the command is sent when the
+previous frame timestamp was in the given interval, and the
+current is not.
+
+
+
+
If FLAGS is not specified, a default value of [enter]
is
+assumed.
+
+
TARGET specifies the target of the command, usually the name of
+the filter class or a specific filter instance name.
+
+
COMMAND specifies the name of the command for the target filter.
+
+
ARG is optional and specifies the optional list of argument for
+the given COMMAND .
+
+
Between one interval specification and another, whitespaces, or
+sequences of characters starting with #
until the end of line,
+are ignored and can be used to annotate comments.
+
+
A simplified BNF description of the commands specification syntax
+follows:
+
+
COMMAND_FLAG ::= "enter" | "leave"
+COMMAND_FLAGS ::= COMMAND_FLAG [(+|"|")COMMAND_FLAG ]
+COMMAND ::= ["[" COMMAND_FLAGS "]"] TARGET COMMAND [ARG ]
+COMMANDS ::= COMMAND [,COMMANDS ]
+INTERVAL ::= START [-END ] COMMANDS
+INTERVALS ::= INTERVAL [;INTERVALS ]
+
+
+
+
33.7.2 Examples# TOC
+
+
+
+
+
33.8 setpts, asetpts# TOC
+
+
Change the PTS (presentation timestamp) of the input frames.
+
+
setpts
works on video frames, asetpts
on audio frames.
+
+
This filter accepts the following options:
+
+
+expr
+The expression which is evaluated for each frame to construct its timestamp.
+
+
+
+
+
The expression is evaluated through the eval API and can contain the following
+constants:
+
+
+FRAME_RATE
+frame rate, only defined for constant frame-rate video
+
+
+PTS
+The presentation timestamp in input
+
+
+N
+The count of the input frame for video or the number of consumed samples,
+not including the current frame for audio, starting from 0.
+
+
+NB_CONSUMED_SAMPLES
+The number of consumed samples, not including the current frame (only
+audio)
+
+
+NB_SAMPLES, S
+The number of samples in the current frame (only audio)
+
+
+SAMPLE_RATE, SR
+The audio sample rate.
+
+
+STARTPTS
+The PTS of the first frame.
+
+
+STARTT
+the time in seconds of the first frame
+
+
+INTERLACED
+State whether the current frame is interlaced.
+
+
+T
+the time in seconds of the current frame
+
+
+POS
+original position in the file of the frame, or undefined if undefined
+for the current frame
+
+
+PREV_INPTS
+The previous input PTS.
+
+
+PREV_INT
+previous input time in seconds
+
+
+PREV_OUTPTS
+The previous output PTS.
+
+
+PREV_OUTT
+previous output time in seconds
+
+
+RTCTIME
+The wallclock (RTC) time in microseconds. This is deprecated, use time(0)
+instead.
+
+
+RTCSTART
+The wallclock (RTC) time at the start of the movie in microseconds.
+
+
+TB
+The timebase of the input timestamps.
+
+
+
+
+
+
33.8.1 Examples# TOC
+
+
+ Start counting PTS from zero
+
+
+ Apply fast motion effect:
+
+
+ Apply slow motion effect:
+
+
+ Set fixed rate of 25 frames per second:
+
+
+ Set fixed rate 25 fps with some jitter:
+
+
setpts='1/(25*TB) * (N + 0.05 * sin(N*2*PI/25))'
+
+
+ Apply an offset of 10 seconds to the input PTS:
+
+
+ Generate timestamps from a "live source" and rebase onto the current timebase:
+
+
setpts='(RTCTIME - RTCSTART) / (TB * 1000000)'
+
+
+ Generate timestamps by counting samples:
+
+
+
+
+
+
33.9 settb, asettb# TOC
+
+
Set the timebase to use for the output frames timestamps.
+It is mainly useful for testing timebase configuration.
+
+
It accepts the following parameters:
+
+
+expr, tb
+The expression which is evaluated into the output timebase.
+
+
+
+
+
The value for tb is an arithmetic expression representing a
+rational. The expression can contain the constants "AVTB" (the default
+timebase), "intb" (the input timebase) and "sr" (the sample rate,
+audio only). Default value is "intb".
+
+
+
33.9.1 Examples# TOC
+
+
+ Set the timebase to 1/25:
+
+
+ Set the timebase to 1/10:
+
+
+ Set the timebase to 1001/1000:
+
+
+ Set the timebase to 2*intb:
+
+
+ Set the default timebase value:
+
+
+
+
+
33.10 showcqt# TOC
+
Convert input audio to a video output representing
+frequency spectrum logarithmically (using constant Q transform with
+Brown-Puckette algorithm), with musical tone scale, from E0 to D#10 (10 octaves).
+
+
The filter accepts the following options:
+
+
+volume
+Specify transform volume (multiplier) expression. The expression can contain
+variables:
+
+frequency, freq, f
+the frequency where transform is evaluated
+
+timeclamp, tc
+value of timeclamp option
+
+
+and functions:
+
+a_weighting(f)
+A-weighting of equal loudness
+
+b_weighting(f)
+B-weighting of equal loudness
+
+c_weighting(f)
+C-weighting of equal loudness
+
+
+Default value is 16
.
+
+
+tlength
+Specify transform length expression. The expression can contain variables:
+
+frequency, freq, f
+the frequency where transform is evaluated
+
+timeclamp, tc
+value of timeclamp option
+
+
+Default value is 384/f*tc/(384/f+tc)
.
+
+
+timeclamp
+Specify the transform timeclamp. At low frequency, there is trade-off between
+accuracy in time domain and frequency domain. If timeclamp is lower,
+event in time domain is represented more accurately (such as fast bass drum),
+otherwise event in frequency domain is represented more accurately
+(such as bass guitar). Acceptable value is [0.1, 1.0]. Default value is 0.17
.
+
+
+coeffclamp
+Specify the transform coeffclamp. If coeffclamp is lower, transform is
+more accurate, otherwise transform is faster. Acceptable value is [0.1, 10.0].
+Default value is 1.0
.
+
+
+gamma
+Specify gamma. Lower gamma makes the spectrum more contrast, higher gamma
+makes the spectrum having more range. Acceptable value is [1.0, 7.0].
+Default value is 3.0
.
+
+
+fontfile
+Specify font file for use with freetype. If not specified, use embedded font.
+
+
+fontcolor
+Specify font color expression. This is arithmetic expression that should return
+integer value 0xRRGGBB. The expression can contain variables:
+
+frequency, freq, f
+the frequency where transform is evaluated
+
+timeclamp, tc
+value of timeclamp option
+
+
+and functions:
+
+midi(f)
+midi number of frequency f, some midi numbers: E0(16), C1(24), C2(36), A4(69)
+
+r(x), g(x), b(x)
+red, green, and blue value of intensity x
+
+
+Default value is st(0, (midi(f)-59.5)/12);
+st(1, if(between(ld(0),0,1), 0.5-0.5*cos(2*PI*ld(0)), 0));
+r(1-ld(1)) + b(ld(1))
+
+
+fullhd
+If set to 1 (the default), the video size is 1920x1080 (full HD),
+if set to 0, the video size is 960x540. Use this option to make CPU usage lower.
+
+
+fps
+Specify video fps. Default value is 25
.
+
+
+count
+Specify number of transform per frame, so there are fps*count transforms
+per second. Note that audio data rate must be divisible by fps*count.
+Default value is 6
.
+
+
+
+
+
+
33.10.1 Examples# TOC
+
+
+ Playing audio while showing the spectrum:
+
+
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt [out0]'
+
+
+ Same as above, but with frame rate 30 fps:
+
+
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fps=30:count=5 [out0]'
+
+
+ Playing at 960x540 and lower CPU usage:
+
+
ffplay -f lavfi 'amovie=a.mp3, asplit [a][out1]; [a] showcqt=fullhd=0:count=3 [out0]'
+
+
+ A1 and its harmonics: A1, A2, (near)E3, A3:
+
+
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
+ asplit[a][out1]; [a] showcqt [out0]'
+
+
+ Same as above, but with more accuracy in frequency domain (and slower):
+
+
ffplay -f lavfi 'aevalsrc=0.1*sin(2*PI*55*t)+0.1*sin(4*PI*55*t)+0.1*sin(6*PI*55*t)+0.1*sin(8*PI*55*t),
+ asplit[a][out1]; [a] showcqt=timeclamp=0.5 [out0]'
+
+
+ B-weighting of equal loudness
+
+
volume=16*b_weighting(f)
+
+
+ Lower Q factor
+
+
tlength=100/f*tc/(100/f+tc)
+
+
+ Custom fontcolor, C-note is colored green, others are colored blue
+
+
fontcolor='if(mod(floor(midi(f)+0.5),12), 0x0000FF, g(1))'
+
+
+
+
+
+
33.11 showspectrum# TOC
+
+
Convert input audio to a video output, representing the audio frequency
+spectrum.
+
+
The filter accepts the following options:
+
+
+size, s
+Specify the video size for the output. For the syntax of this option, check
+the "Video size" section in the ffmpeg-utils manual. Default value is
+640x512
.
+
+
+slide
+Specify how the spectrum should slide along the window.
+
+It accepts the following values:
+
+‘replace ’
+the samples start again on the left when they reach the right
+
+‘scroll ’
+the samples scroll from right to left
+
+‘fullframe ’
+frames are only produced when the samples reach the right
+
+
+
+Default value is replace
.
+
+
+mode
+Specify display mode.
+
+It accepts the following values:
+
+‘combined ’
+all channels are displayed in the same row
+
+‘separate ’
+all channels are displayed in separate rows
+
+
+
+Default value is ‘combined ’.
+
+
+color
+Specify display color mode.
+
+It accepts the following values:
+
+‘channel ’
+each channel is displayed in a separate color
+
+‘intensity ’
+each channel is is displayed using the same color scheme
+
+
+
+Default value is ‘channel ’.
+
+
+scale
+Specify scale used for calculating intensity color values.
+
+It accepts the following values:
+
+‘lin ’
+linear
+
+‘sqrt ’
+square root, default
+
+‘cbrt ’
+cubic root
+
+‘log ’
+logarithmic
+
+
+
+Default value is ‘sqrt ’.
+
+
+saturation
+Set saturation modifier for displayed colors. Negative values provide
+alternative color scheme. 0
is no saturation at all.
+Saturation must be in [-10.0, 10.0] range.
+Default value is 1
.
+
+
+win_func
+Set window function.
+
+It accepts the following values:
+
+‘none ’
+No samples pre-processing (do not expect this to be faster)
+
+‘hann ’
+Hann window
+
+‘hamming ’
+Hamming window
+
+‘blackman ’
+Blackman window
+
+
+
+Default value is hann
.
+
+
+
+
The usage is very similar to the showwaves filter; see the examples in that
+section.
+
+
+
33.11.1 Examples# TOC
+
+
+ Large window with logarithmic color scaling:
+
+
showspectrum=s=1280x480:scale=log
+
+
+ Complete example for a colored and sliding spectrum per channel using ffplay
:
+
+
ffplay -f lavfi 'amovie=input.mp3, asplit [a][out1];
+ [a] showspectrum=mode=separate:color=intensity:slide=1:scale=cbrt [out0]'
+
+
+
+
+
33.12 showwaves# TOC
+
+
Convert input audio to a video output, representing the samples waves.
+
+
The filter accepts the following options:
+
+
+size, s
+Specify the video size for the output. For the syntax of this option, check
+the "Video size" section in the ffmpeg-utils manual. Default value
+is "600x240".
+
+
+mode
+Set display mode.
+
+Available values are:
+
+‘point ’
+Draw a point for each sample.
+
+
+‘line ’
+Draw a vertical line for each sample.
+
+
+‘p2p ’
+Draw a point for each sample and a line between them.
+
+
+‘cline ’
+Draw a centered vertical line for each sample.
+
+
+
+Default value is point
.
+
+
+n
+Set the number of samples which are printed on the same column. A
+larger value will decrease the frame rate. Must be a positive
+integer. This option can be set only if the value for rate
+is not explicitly specified.
+
+
+rate, r
+Set the (approximate) output frame rate. This is done by setting the
+option n . Default value is "25".
+
+
+split_channels
+Set if channels should be drawn separately or overlap. Default value is 0.
+
+
+
+
+
+
33.12.1 Examples# TOC
+
+
+ Output the input file audio and the corresponding video representation
+at the same time:
+
+
amovie=a.mp3,asplit[out0],showwaves[out1]
+
+
+ Create a synthetic signal and show it with showwaves, forcing a
+frame rate of 30 frames per second:
+
+
aevalsrc=sin(1*2*PI*t)*sin(880*2*PI*t):cos(2*PI*200*t),asplit[out0],showwaves=r=30[out1]
+
+
+
+
+
33.13 split, asplit# TOC
+
+
Split input into several identical outputs.
+
+
asplit
works with audio input, split
with video.
+
+
The filter accepts a single parameter which specifies the number of outputs. If
+unspecified, it defaults to 2.
+
+
+
33.13.1 Examples# TOC
+
+
+ Create two separate outputs from the same input:
+
+
[in] split [out0][out1]
+
+
+ To create 3 or more outputs, you need to specify the number of
+outputs, like in:
+
+
[in] asplit=3 [out0][out1][out2]
+
+
+ Create two separate outputs from the same input, one cropped and
+one padded:
+
+
[in] split [splitout1][splitout2];
+[splitout1] crop=100:100:0:0 [cropout];
+[splitout2] pad=200:200:100:100 [padout];
+
+
+ Create 5 copies of the input audio with ffmpeg
:
+
+
ffmpeg -i INPUT -filter_complex asplit=5 OUTPUT
+
+
+
+
+
33.14 zmq, azmq# TOC
+
+
Receive commands sent through a libzmq client, and forward them to
+filters in the filtergraph.
+
+
zmq
and azmq
work as a pass-through filters. zmq
+must be inserted between two video filters, azmq
between two
+audio filters.
+
+
To enable these filters you need to install the libzmq library and
+headers and configure FFmpeg with --enable-libzmq
.
+
+
For more information about libzmq see:
+http://www.zeromq.org/
+
+
The zmq
and azmq
filters work as a libzmq server, which
+receives messages sent through a network interface defined by the
+bind_address option.
+
+
The received message must be in the form:
+
+
+
TARGET specifies the target of the command, usually the name of
+the filter class or a specific filter instance name.
+
+
COMMAND specifies the name of the command for the target filter.
+
+
ARG is optional and specifies the optional argument list for the
+given COMMAND .
+
+
Upon reception, the message is processed and the corresponding command
+is injected into the filtergraph. Depending on the result, the filter
+will send a reply to the client, adopting the format:
+
+
ERROR_CODE ERROR_REASON
+MESSAGE
+
+
+
MESSAGE is optional.
+
+
+
33.14.1 Examples# TOC
+
+
Look at tools/zmqsend for an example of a zmq client which can
+be used to send commands processed by these filters.
+
+
Consider the following filtergraph generated by ffplay
+
+
ffplay -dumpgraph 1 -f lavfi "
+color=s=100x100:c=red [l];
+color=s=100x100:c=blue [r];
+nullsrc=s=200x100, zmq [bg];
+[bg][l] overlay [bg+l];
+[bg+l][r] overlay=x=100 "
+
+
+
To change the color of the left side of the video, the following
+command can be used:
+
+
echo Parsed_color_0 c yellow | tools/zmqsend
+
+
+
To change the right side:
+
+
echo Parsed_color_1 c pink | tools/zmqsend
+
+
+
+
+
34 Multimedia Sources# TOC
+
+
Below is a description of the currently available multimedia sources.
+
+
+
34.1 amovie# TOC
+
+
This is the same as movie source, except it selects an audio
+stream by default.
+
+
+
34.2 movie# TOC
+
+
Read audio and/or video stream(s) from a movie container.
+
+
It accepts the following parameters:
+
+
+filename
+The name of the resource to read (not necessarily a file; it can also be a
+device or a stream accessed through some protocol).
+
+
+format_name, f
+Specifies the format assumed for the movie to read, and can be either
+the name of a container or an input device. If not specified, the
+format is guessed from movie_name or by probing.
+
+
+seek_point, sp
+Specifies the seek point in seconds. The frames will be output
+starting from this seek point. The parameter is evaluated with
+av_strtod
, so the numerical value may be suffixed by an IS
+postfix. The default value is "0".
+
+
+streams, s
+Specifies the streams to read. Several streams can be specified,
+separated by "+". The source will then have as many outputs, in the
+same order. The syntax is explained in the “Stream specifiers”
+section in the ffmpeg manual. Two special names, "dv" and "da" specify
+respectively the default (best suited) video and audio stream. Default
+is "dv", or "da" if the filter is called as "amovie".
+
+
+stream_index, si
+Specifies the index of the video stream to read. If the value is -1,
+the most suitable video stream will be automatically selected. The default
+value is "-1". Deprecated. If the filter is called "amovie", it will select
+audio instead of video.
+
+
+loop
+Specifies how many times to read the stream in sequence.
+If the value is less than 1, the stream will be read again and again.
+Default value is "1".
+
+Note that when the movie is looped the source timestamps are not
+changed, so it will generate non monotonically increasing timestamps.
+
+
+
+
It allows overlaying a second video on top of the main input of
+a filtergraph, as shown in this graph:
+
+
input -----------> deltapts0 --> overlay --> output
+ ^
+ |
+movie --> scale--> deltapts1 -------+
+
+
+
34.2.1 Examples# TOC
+
+
+ Skip 3.2 seconds from the start of the AVI file in.avi, and overlay it
+on top of the input labelled "in":
+
+
movie=in.avi:seek_point=3.2, scale=180:-1, setpts=PTS-STARTPTS [over];
+[in] setpts=PTS-STARTPTS [main];
+[main][over] overlay=16:16 [out]
+
+
+ Read from a video4linux2 device, and overlay it on top of the input
+labelled "in":
+
+
movie=/dev/video0:f=video4linux2, scale=180:-1, setpts=PTS-STARTPTS [over];
+[in] setpts=PTS-STARTPTS [main];
+[main][over] overlay=16:16 [out]
+
+
+ Read the first video stream and the audio stream with id 0x81 from
+dvd.vob; the video is connected to the pad named "video" and the audio is
+connected to the pad named "audio":
+
+
movie=dvd.vob:s=v:0+#0x81 [video] [audio]
+
+
+
+
+
+
35 See Also# TOC
+
+
ffprobe ,
+ffmpeg , ffplay , ffserver ,
+ffmpeg-utils ,
+ffmpeg-scaler ,
+ffmpeg-resampler ,
+ffmpeg-codecs ,
+ffmpeg-bitstream-filters ,
+ffmpeg-formats ,
+ffmpeg-devices ,
+ffmpeg-protocols ,
+ffmpeg-filters
+
+
+
+
36 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/ffprobe.html b/Externals/ffmpeg/dev/doc/ffprobe.html
new file mode 100644
index 0000000000..1e10bf8cf5
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/ffprobe.html
@@ -0,0 +1,1113 @@
+
+
+
+
+
+
+ ffprobe Documentation
+
+
+
+
+
+
+
+
+ ffprobe Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Synopsis# TOC
+
+
ffprobe [options ] [input_file ]
+
+
+
2 Description# TOC
+
+
ffprobe gathers information from multimedia streams and prints it in
+human- and machine-readable fashion.
+
+
For example it can be used to check the format of the container used
+by a multimedia stream and the format and type of each media stream
+contained in it.
+
+
If a filename is specified in input, ffprobe will try to open and
+probe the file content. If the file cannot be opened or recognized as
+a multimedia file, a positive exit code is returned.
+
+
ffprobe may be employed both as a standalone application or in
+combination with a textual filter, which may perform more
+sophisticated processing, e.g. statistical processing or plotting.
+
+
Options are used to list some of the formats supported by ffprobe or
+for specifying which information to display, and for setting how
+ffprobe will show it.
+
+
ffprobe output is designed to be easily parsable by a textual filter,
+and consists of one or more sections of a form defined by the selected
+writer, which is specified by the print_format option.
+
+
Sections may contain other nested sections, and are identified by a
+name (which may be shared by other sections), and an unique
+name. See the output of sections .
+
+
Metadata tags stored in the container or in the streams are recognized
+and printed in the corresponding "FORMAT", "STREAM" or "PROGRAM_STREAM"
+section.
+
+
+
+
3 Options# TOC
+
+
All the numerical options, if not specified otherwise, accept a string
+representing a number as input, which may be followed by one of the SI
+unit prefixes, for example: ’K’, ’M’, or ’G’.
+
+
If ’i’ is appended to the SI unit prefix, the complete prefix will be
+interpreted as a unit prefix for binary multiples, which are based on
+powers of 1024 instead of powers of 1000. Appending ’B’ to the SI unit
+prefix multiplies the value by 8. This allows using, for example:
+’KB’, ’MiB’, ’G’ and ’B’ as number suffixes.
+
+
Options which do not take arguments are boolean options, and set the
+corresponding value to true. They can be set to false by prefixing
+the option name with "no". For example using "-nofoo"
+will set the boolean option with name "foo" to false.
+
+
+
3.1 Stream specifiers# TOC
+
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
+are used to precisely specify which stream(s) a given option belongs to.
+
+
A stream specifier is a string generally appended to the option name and
+separated from it by a colon. E.g. -codec:a:1 ac3
contains the
+a:1
stream specifier, which matches the second audio stream. Therefore, it
+would select the ac3 codec for the second audio stream.
+
+
A stream specifier can match several streams, so that the option is applied to all
+of them. E.g. the stream specifier in -b:a 128k
matches all audio
+streams.
+
+
An empty stream specifier matches all streams. For example, -codec copy
+or -codec: copy
would copy all the streams without reencoding.
+
+
Possible forms of stream specifiers are:
+
+stream_index
+Matches the stream with this index. E.g. -threads:1 4
would set the
+thread count for the second stream to 4.
+
+stream_type [:stream_index ]
+stream_type is one of following: ’v’ for video, ’a’ for audio, ’s’ for subtitle,
+’d’ for data, and ’t’ for attachments. If stream_index is given, then it matches
+stream number stream_index of this type. Otherwise, it matches all
+streams of this type.
+
+p:program_id [:stream_index ]
+If stream_index is given, then it matches the stream with number stream_index
+in the program with the id program_id . Otherwise, it matches all streams in the
+program.
+
+#stream_id or i:stream_id
+Match the stream by stream id (e.g. PID in MPEG-TS container).
+
+m:key [:value ]
+Matches streams with the metadata tag key having the specified value. If
+value is not given, matches streams that contain the given tag with any
+value.
+
+Note that in ffmpeg
, matching by metadata will only work properly for
+input files.
+
+
+
+
+
3.2 Generic options# TOC
+
+
These options are shared amongst the ff* tools.
+
+
+-L
+Show license.
+
+
+-h, -?, -help, --help [arg ]
+Show help. An optional parameter may be specified to print help about a specific
+item. If no argument is specified, only basic (non advanced) tool
+options are shown.
+
+Possible values of arg are:
+
+long
+Print advanced tool options in addition to the basic tool options.
+
+
+full
+Print complete list of options, including shared and private options
+for encoders, decoders, demuxers, muxers, filters, etc.
+
+
+decoder=decoder_name
+Print detailed information about the decoder named decoder_name . Use the
+-decoders option to get a list of all decoders.
+
+
+encoder=encoder_name
+Print detailed information about the encoder named encoder_name . Use the
+-encoders option to get a list of all encoders.
+
+
+demuxer=demuxer_name
+Print detailed information about the demuxer named demuxer_name . Use the
+-formats option to get a list of all demuxers and muxers.
+
+
+muxer=muxer_name
+Print detailed information about the muxer named muxer_name . Use the
+-formats option to get a list of all muxers and demuxers.
+
+
+filter=filter_name
+Print detailed information about the filter name filter_name . Use the
+-filters option to get a list of all filters.
+
+
+
+
+-version
+Show version.
+
+
+-formats
+Show available formats (including devices).
+
+
+-devices
+Show available devices.
+
+
+-codecs
+Show all codecs known to libavcodec.
+
+Note that the term ’codec’ is used throughout this documentation as a shortcut
+for what is more correctly called a media bitstream format.
+
+
+-decoders
+Show available decoders.
+
+
+-encoders
+Show all available encoders.
+
+
+-bsfs
+Show available bitstream filters.
+
+
+-protocols
+Show available protocols.
+
+
+-filters
+Show available libavfilter filters.
+
+
+-pix_fmts
+Show available pixel formats.
+
+
+-sample_fmts
+Show available sample formats.
+
+
+-layouts
+Show channel names and standard channel layouts.
+
+
+-colors
+Show recognized color names.
+
+
+-sources device [,opt1 =val1 [,opt2 =val2 ]...]
+Show autodetected sources of the intput device.
+Some devices may provide system-dependent source names that cannot be autodetected.
+The returned list cannot be assumed to be always complete.
+
+
ffmpeg -sources pulse,server=192.168.0.4
+
+
+
+-sinks device [,opt1 =val1 [,opt2 =val2 ]...]
+Show autodetected sinks of the output device.
+Some devices may provide system-dependent sink names that cannot be autodetected.
+The returned list cannot be assumed to be always complete.
+
+
ffmpeg -sinks pulse,server=192.168.0.4
+
+
+
+-loglevel [repeat+]loglevel | -v [repeat+]loglevel
+Set the logging level used by the library.
+Adding "repeat+" indicates that repeated log output should not be compressed
+to the first line and the "Last message repeated n times" line will be
+omitted. "repeat" can also be used alone.
+If "repeat" is used alone, and with no prior loglevel set, the default
+loglevel will be used. If multiple loglevel parameters are given, using
+’repeat’ will not change the loglevel.
+loglevel is a string or a number containing one of the following values:
+
+‘quiet, -8 ’
+Show nothing at all; be silent.
+
+‘panic, 0 ’
+Only show fatal errors which could lead the process to crash, such as
+and assert failure. This is not currently used for anything.
+
+‘fatal, 8 ’
+Only show fatal errors. These are errors after which the process absolutely
+cannot continue after.
+
+‘error, 16 ’
+Show all errors, including ones which can be recovered from.
+
+‘warning, 24 ’
+Show all warnings and errors. Any message related to possibly
+incorrect or unexpected events will be shown.
+
+‘info, 32 ’
+Show informative messages during processing. This is in addition to
+warnings and errors. This is the default value.
+
+‘verbose, 40 ’
+Same as info
, except more verbose.
+
+‘debug, 48 ’
+Show everything, including debugging information.
+
+
+
+By default the program logs to stderr, if coloring is supported by the
+terminal, colors are used to mark errors and warnings. Log coloring
+can be disabled setting the environment variable
+AV_LOG_FORCE_NOCOLOR
or NO_COLOR
, or can be forced setting
+the environment variable AV_LOG_FORCE_COLOR
.
+The use of the environment variable NO_COLOR
is deprecated and
+will be dropped in a following FFmpeg version.
+
+
+-report
+Dump full command line and console output to a file named
+program -YYYYMMDD -HHMMSS .log
in the current
+directory.
+This file can be useful for bug reports.
+It also implies -loglevel verbose
.
+
+Setting the environment variable FFREPORT
to any value has the
+same effect. If the value is a ’:’-separated key=value sequence, these
+options will affect the report; option values must be escaped if they
+contain special characters or the options delimiter ’:’ (see the
+“Quoting and escaping” section in the ffmpeg-utils manual).
+
+The following options are recognized:
+
+file
+set the file name to use for the report; %p
is expanded to the name
+of the program, %t
is expanded to a timestamp, %%
is expanded
+to a plain %
+
+level
+set the log verbosity level using a numerical value (see -loglevel
).
+
+
+
+For example, to output a report to a file named ffreport.log
+using a log level of 32
(alias for log level info
):
+
+
+
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
+
+
+Errors in parsing the environment variable are not fatal, and will not
+appear in the report.
+
+
+-hide_banner
+Suppress printing banner.
+
+All FFmpeg tools will normally show a copyright notice, build options
+and library versions. This option can be used to suppress printing
+this information.
+
+
+-cpuflags flags (global )
+Allows setting and clearing cpu flags. This option is intended
+for testing. Do not use it unless you know what you’re doing.
+
+
ffmpeg -cpuflags -sse+mmx ...
+ffmpeg -cpuflags mmx ...
+ffmpeg -cpuflags 0 ...
+
+Possible flags for this option are:
+
+‘x86 ’
+
+‘mmx ’
+‘mmxext ’
+‘sse ’
+‘sse2 ’
+‘sse2slow ’
+‘sse3 ’
+‘sse3slow ’
+‘ssse3 ’
+‘atom ’
+‘sse4.1 ’
+‘sse4.2 ’
+‘avx ’
+‘xop ’
+‘fma4 ’
+‘3dnow ’
+‘3dnowext ’
+‘cmov ’
+
+
+‘ARM ’
+
+‘armv5te ’
+‘armv6 ’
+‘armv6t2 ’
+‘vfp ’
+‘vfpv3 ’
+‘neon ’
+
+
+‘PowerPC ’
+
+‘altivec ’
+
+
+‘Specific Processors ’
+
+‘pentium2 ’
+‘pentium3 ’
+‘pentium4 ’
+‘k6 ’
+‘k62 ’
+‘athlon ’
+‘athlonxp ’
+‘k8 ’
+
+
+
+
+
+-opencl_bench
+Benchmark all available OpenCL devices and show the results. This option
+is only available when FFmpeg has been compiled with --enable-opencl
.
+
+
+-opencl_options options (global )
+Set OpenCL environment options. This option is only available when
+FFmpeg has been compiled with --enable-opencl
.
+
+options must be a list of key =value option pairs
+separated by ’:’. See the “OpenCL Options” section in the
+ffmpeg-utils manual for the list of supported options.
+
+
+
+
+
3.3 AVOptions# TOC
+
+
These options are provided directly by the libavformat, libavdevice and
+libavcodec libraries. To see the list of available AVOptions, use the
+-help option. They are separated into two categories:
+
+generic
+These options can be set for any container, codec or device. Generic options
+are listed under AVFormatContext options for containers/devices and under
+AVCodecContext options for codecs.
+
+private
+These options are specific to the given container, device or codec. Private
+options are listed under their corresponding containers/devices/codecs.
+
+
+
+
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
+an MP3 file, use the id3v2_version private option of the MP3
+muxer:
+
+
ffmpeg -i input.flac -id3v2_version 3 out.mp3
+
+
+
All codec AVOptions are per-stream, and thus a stream specifier
+should be attached to them.
+
+
Note: the -nooption syntax cannot be used for boolean
+AVOptions, use -option 0 /-option 1 .
+
+
Note: the old undocumented way of specifying per-stream AVOptions by
+prepending v/a/s to the options name is now obsolete and will be
+removed soon.
+
+
+
3.4 Main options# TOC
+
+
+-f format
+Force format to use.
+
+
+-unit
+Show the unit of the displayed values.
+
+
+-prefix
+Use SI prefixes for the displayed values.
+Unless the "-byte_binary_prefix" option is used all the prefixes
+are decimal.
+
+
+-byte_binary_prefix
+Force the use of binary prefixes for byte values.
+
+
+-sexagesimal
+Use sexagesimal format HH:MM:SS.MICROSECONDS for time values.
+
+
+-pretty
+Prettify the format of the displayed values, it corresponds to the
+options "-unit -prefix -byte_binary_prefix -sexagesimal".
+
+
+-of, -print_format writer_name [=writer_options ]
+Set the output printing format.
+
+writer_name specifies the name of the writer, and
+writer_options specifies the options to be passed to the writer.
+
+For example for printing the output in JSON format, specify:
+
+
+For more details on the available output printing formats, see the
+Writers section below.
+
+
+-sections
+Print sections structure and section information, and exit. The output
+is not meant to be parsed by a machine.
+
+
+-select_streams stream_specifier
+Select only the streams specified by stream_specifier . This
+option affects only the options related to streams
+(e.g. show_streams
, show_packets
, etc.).
+
+For example to show only audio streams, you can use the command:
+
+
ffprobe -show_streams -select_streams a INPUT
+
+
+To show only video packets belonging to the video stream with index 1:
+
+
ffprobe -show_packets -select_streams v:1 INPUT
+
+
+
+-show_data
+Show payload data, as a hexadecimal and ASCII dump. Coupled with
+-show_packets , it will dump the packets’ data. Coupled with
+-show_streams , it will dump the codec extradata.
+
+The dump is printed as the "data" field. It may contain newlines.
+
+
+-show_data_hash algorithm
+Show a hash of payload data, for packets with -show_packets and for
+codec extradata with -show_streams .
+
+
+-show_error
+Show information about the error found when trying to probe the input.
+
+The error information is printed within a section with name "ERROR".
+
+
+-show_format
+Show information about the container format of the input multimedia
+stream.
+
+All the container format information is printed within a section with
+name "FORMAT".
+
+
+-show_format_entry name
+Like -show_format , but only prints the specified entry of the
+container format information, rather than all. This option may be given more
+than once, then all specified entries will be shown.
+
+This option is deprecated, use show_entries
instead.
+
+
+-show_entries section_entries
+Set list of entries to show.
+
+Entries are specified according to the following
+syntax. section_entries contains a list of section entries
+separated by :
. Each section entry is composed by a section
+name (or unique name), optionally followed by a list of entries local
+to that section, separated by ,
.
+
+If section name is specified but is followed by no =
, all
+entries are printed to output, together with all the contained
+sections. Otherwise only the entries specified in the local section
+entries list are printed. In particular, if =
is specified but
+the list of local entries is empty, then no entries will be shown for
+that section.
+
+Note that the order of specification of the local section entries is
+not honored in the output, and the usual display order will be
+retained.
+
+The formal syntax is given by:
+
+
LOCAL_SECTION_ENTRIES ::= SECTION_ENTRY_NAME [,LOCAL_SECTION_ENTRIES ]
+SECTION_ENTRY ::= SECTION_NAME [=[LOCAL_SECTION_ENTRIES ]]
+SECTION_ENTRIES ::= SECTION_ENTRY [:SECTION_ENTRIES ]
+
+
+For example, to show only the index and type of each stream, and the PTS
+time, duration time, and stream index of the packets, you can specify
+the argument:
+
+
packet=pts_time,duration_time,stream_index : stream=index,codec_type
+
+
+To show all the entries in the section "format", but only the codec
+type in the section "stream", specify the argument:
+
+
format : stream=codec_type
+
+
+To show all the tags in the stream and format sections:
+
+
stream_tags : format_tags
+
+
+To show only the title
tag (if available) in the stream
+sections:
+
+
+
+-show_packets
+Show information about each packet contained in the input multimedia
+stream.
+
+The information for each single packet is printed within a dedicated
+section with name "PACKET".
+
+
+-show_frames
+Show information about each frame and subtitle contained in the input
+multimedia stream.
+
+The information for each single frame is printed within a dedicated
+section with name "FRAME" or "SUBTITLE".
+
+
+-show_streams
+Show information about each media stream contained in the input
+multimedia stream.
+
+Each media stream information is printed within a dedicated section
+with name "STREAM".
+
+
+-show_programs
+Show information about programs and their streams contained in the input
+multimedia stream.
+
+Each media stream information is printed within a dedicated section
+with name "PROGRAM_STREAM".
+
+
+-show_chapters
+Show information about chapters stored in the format.
+
+Each chapter is printed within a dedicated section with name "CHAPTER".
+
+
+-count_frames
+Count the number of frames per stream and report it in the
+corresponding stream section.
+
+
+-count_packets
+Count the number of packets per stream and report it in the
+corresponding stream section.
+
+
+-read_intervals read_intervals
+
+Read only the specified intervals. read_intervals must be a
+sequence of interval specifications separated by ",".
+ffprobe
will seek to the interval starting point, and will
+continue reading from that.
+
+Each interval is specified by two optional parts, separated by "%".
+
+The first part specifies the interval start position. It is
+interpreted as an abolute position, or as a relative offset from the
+current position if it is preceded by the "+" character. If this first
+part is not specified, no seeking will be performed when reading this
+interval.
+
+The second part specifies the interval end position. It is interpreted
+as an absolute position, or as a relative offset from the current
+position if it is preceded by the "+" character. If the offset
+specification starts with "#", it is interpreted as the number of
+packets to read (not including the flushing packets) from the interval
+start. If no second part is specified, the program will read until the
+end of the input.
+
+Note that seeking is not accurate, thus the actual interval start
+point may be different from the specified position. Also, when an
+interval duration is specified, the absolute end time will be computed
+by adding the duration to the interval start point found by seeking
+the file, rather than to the specified start value.
+
+The formal syntax is given by:
+
+
INTERVAL ::= [START |+START_OFFSET ][%[END |+END_OFFSET ]]
+INTERVALS ::= INTERVAL [,INTERVALS ]
+
+
+A few examples follow.
+
+ Seek to time 10, read packets until 20 seconds after the found seek
+point, then seek to position 01:30
(1 minute and thirty
+seconds) and read packets until position 01:45
.
+
+
+ Read only 42 packets after seeking to position 01:23
:
+
+
+ Read only the first 20 seconds from the start:
+
+
+ Read from the start until position 02:30
:
+
+
+
+
+-show_private_data, -private
+Show private data, that is data depending on the format of the
+particular shown element.
+This option is enabled by default, but you may need to disable it
+for specific uses, for example when creating XSD-compliant XML output.
+
+
+-show_program_version
+Show information related to program version.
+
+Version information is printed within a section with name
+"PROGRAM_VERSION".
+
+
+-show_library_versions
+Show information related to library versions.
+
+Version information for each library is printed within a section with
+name "LIBRARY_VERSION".
+
+
+-show_versions
+Show information related to program and library versions. This is the
+equivalent of setting both -show_program_version and
+-show_library_versions options.
+
+
+-show_pixel_formats
+Show information about all pixel formats supported by FFmpeg.
+
+Pixel format information for each format is printed within a section
+with name "PIXEL_FORMAT".
+
+
+-bitexact
+Force bitexact output, useful to produce output which is not dependent
+on the specific build.
+
+
+-i input_file
+Read input_file .
+
+
+
+
+
+
4 Writers# TOC
+
+
A writer defines the output format adopted by ffprobe
, and will be
+used for printing all the parts of the output.
+
+
A writer may accept one or more arguments, which specify the options
+to adopt. The options are specified as a list of key =value
+pairs, separated by ":".
+
+
All writers support the following options:
+
+
+string_validation, sv
+Set string validation mode.
+
+The following values are accepted.
+
+‘fail ’
+The writer will fail immediately in case an invalid string (UTF-8)
+sequence or code point is found in the input. This is especially
+useful to validate input metadata.
+
+
+‘ignore ’
+Any validation error will be ignored. This will result in possibly
+broken output, especially with the json or xml writer.
+
+
+‘replace ’
+The writer will substitute invalid UTF-8 sequences or code points with
+the string specified with the string_validation_replacement .
+
+
+
+Default value is ‘replace ’.
+
+
+string_validation_replacement, svr
+Set replacement string to use in case string_validation is
+set to ‘replace ’.
+
+In case the option is not specified, the writer will assume the empty
+string, that is it will remove the invalid sequences from the input
+strings.
+
+
+
+
A description of the currently available writers follows.
+
+
+
4.1 default# TOC
+
Default format.
+
+
Print each section in the form:
+
+
[SECTION]
+key1=val1
+...
+keyN=valN
+[/SECTION]
+
+
+
Metadata tags are printed as a line in the corresponding FORMAT, STREAM or
+PROGRAM_STREAM section, and are prefixed by the string "TAG:".
+
+
A description of the accepted options follows.
+
+
+nokey, nk
+If set to 1 specify not to print the key of each field. Default value
+is 0.
+
+
+noprint_wrappers, nw
+If set to 1 specify not to print the section header and footer.
+Default value is 0.
+
+
+
+
+
4.2 compact, csv# TOC
+
Compact and CSV format.
+
+
The csv
writer is equivalent to compact
, but supports
+different defaults.
+
+
Each section is printed on a single line.
+If no option is specifid, the output has the form:
+
+
section|key1=val1| ... |keyN=valN
+
+
+
Metadata tags are printed in the corresponding "format" or "stream"
+section. A metadata tag key, if printed, is prefixed by the string
+"tag:".
+
+
The description of the accepted options follows.
+
+
+item_sep, s
+Specify the character to use for separating fields in the output line.
+It must be a single printable character, it is "|" by default ("," for
+the csv
writer).
+
+
+nokey, nk
+If set to 1 specify not to print the key of each field. Its default
+value is 0 (1 for the csv
writer).
+
+
+escape, e
+Set the escape mode to use, default to "c" ("csv" for the csv
+writer).
+
+It can assume one of the following values:
+
+c
+Perform C-like escaping. Strings containing a newline (’\n’), carriage
+return (’\r’), a tab (’\t’), a form feed (’\f’), the escaping
+character (’\’) or the item separator character SEP are escaped using C-like fashioned
+escaping, so that a newline is converted to the sequence "\n", a
+carriage return to "\r", ’\’ to "\\" and the separator SEP is
+converted to "\SEP ".
+
+
+csv
+Perform CSV-like escaping, as described in RFC4180. Strings
+containing a newline (’\n’), a carriage return (’\r’), a double quote
+(’"’), or SEP are enclosed in double-quotes.
+
+
+none
+Perform no escaping.
+
+
+
+
+print_section, p
+Print the section name at the begin of each line if the value is
+1
, disable it with value set to 0
. Default value is
+1
.
+
+
+
+
+
+
4.3 flat# TOC
+
Flat format.
+
+
A free-form output where each line contains an explicit key=value, such as
+"streams.stream.3.tags.foo=bar". The output is shell escaped, so it can be
+directly embedded in sh scripts as long as the separator character is an
+alphanumeric character or an underscore (see sep_char option).
+
+
The description of the accepted options follows.
+
+
+sep_char, s
+Separator character used to separate the chapter, the section name, IDs and
+potential tags in the printed field key.
+
+Default value is ’.’.
+
+
+hierarchical, h
+Specify if the section name specification should be hierarchical. If
+set to 1, and if there is more than one section in the current
+chapter, the section name will be prefixed by the name of the
+chapter. A value of 0 will disable this behavior.
+
+Default value is 1.
+
+
+
+
+
+
INI format output.
+
+
Print output in an INI based format.
+
+
The following conventions are adopted:
+
+
+ all key and values are UTF-8
+ ’.’ is the subgroup separator
+ newline, ’\t’, ’\f’, ’\b’ and the following characters are escaped
+ ’\’ is the escape character
+ ’#’ is the comment indicator
+ ’=’ is the key/value separator
+ ’:’ is not used but usually parsed as key/value separator
+
+
+
This writer accepts options as a list of key =value pairs,
+separated by ":".
+
+
The description of the accepted options follows.
+
+
+hierarchical, h
+Specify if the section name specification should be hierarchical. If
+set to 1, and if there is more than one section in the current
+chapter, the section name will be prefixed by the name of the
+chapter. A value of 0 will disable this behavior.
+
+Default value is 1.
+
+
+
+
+
4.5 json# TOC
+
JSON based format.
+
+
Each section is printed using JSON notation.
+
+
The description of the accepted options follows.
+
+
+compact, c
+If set to 1 enable compact output, that is each section will be
+printed on a single line. Default value is 0.
+
+
+
+
For more information about JSON, see http://www.json.org/ .
+
+
+
+
XML based format.
+
+
The XML output is described in the XML schema description file
+ffprobe.xsd installed in the FFmpeg datadir.
+
+
An updated version of the schema can be retrieved at the url
+http://www.ffmpeg.org/schema/ffprobe.xsd , which redirects to the
+latest schema committed into the FFmpeg development source code tree.
+
+
Note that the output issued will be compliant to the
+ffprobe.xsd schema only when no special global output options
+(unit , prefix , byte_binary_prefix ,
+sexagesimal etc.) are specified.
+
+
The description of the accepted options follows.
+
+
+fully_qualified, q
+If set to 1 specify if the output should be fully qualified. Default
+value is 0.
+This is required for generating an XML file which can be validated
+through an XSD file.
+
+
+xsd_compliant, x
+If set to 1 perform more checks for ensuring that the output is XSD
+compliant. Default value is 0.
+This option automatically sets fully_qualified to 1.
+
+
+
+
For more information about the XML format, see
+http://www.w3.org/XML/ .
+
+
+
5 Timecode# TOC
+
+
ffprobe
supports Timecode extraction:
+
+
+ MPEG1/2 timecode is extracted from the GOP, and is available in the video
+stream details (-show_streams , see timecode ).
+
+ MOV timecode is extracted from tmcd track, so is available in the tmcd
+stream metadata (-show_streams , see TAG:timecode ).
+
+ DV, GXF and AVI timecodes are available in format metadata
+(-show_format , see TAG:timecode ).
+
+
+
+
+
+
6 See Also# TOC
+
+
ffprobe-all ,
+ffmpeg , ffplay , ffserver ,
+ffmpeg-utils ,
+ffmpeg-scaler ,
+ffmpeg-resampler ,
+ffmpeg-codecs ,
+ffmpeg-bitstream-filters ,
+ffmpeg-formats ,
+ffmpeg-devices ,
+ffmpeg-protocols ,
+ffmpeg-filters
+
+
+
+
7 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/general.html b/Externals/ffmpeg/dev/doc/general.html
new file mode 100644
index 0000000000..df45ecabca
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/general.html
@@ -0,0 +1,986 @@
+
+
+
+
+
+
+ General Documentation
+
+
+
+
+
+
+
+
+ General Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 External libraries# TOC
+
+
FFmpeg can be hooked up with a number of external libraries to add support
+for more formats. None of them are used by default, their use has to be
+explicitly requested by passing the appropriate flags to
+./configure
.
+
+
+
1.1 OpenJPEG# TOC
+
+
FFmpeg can use the OpenJPEG libraries for encoding/decoding J2K videos. Go to
+http://www.openjpeg.org/ to get the libraries and follow the installation
+instructions. To enable using OpenJPEG in FFmpeg, pass --enable-libopenjpeg
to
+./configure .
+
+
+
+
1.2 OpenCORE, VisualOn, and Fraunhofer libraries# TOC
+
+
Spun off Google Android sources, OpenCore, VisualOn and Fraunhofer
+libraries provide encoders for a number of audio codecs.
+
+
+
OpenCORE and VisualOn libraries are under the Apache License 2.0
+(see http://www.apache.org/licenses/LICENSE-2.0 for details), which is
+incompatible to the LGPL version 2.1 and GPL version 2. You have to
+upgrade FFmpeg’s license to LGPL version 3 (or if you have enabled
+GPL components, GPL version 3) by passing --enable-version3
to configure in
+order to use it.
+
+
The Fraunhofer AAC library is licensed under a license incompatible to the GPL
+and is not known to be compatible to the LGPL. Therefore, you have to pass
+--enable-nonfree
to configure to use it.
+
+
+
1.2.1 OpenCORE AMR# TOC
+
+
FFmpeg can make use of the OpenCORE libraries for AMR-NB
+decoding/encoding and AMR-WB decoding.
+
+
Go to http://sourceforge.net/projects/opencore-amr/ and follow the
+instructions for installing the libraries.
+Then pass --enable-libopencore-amrnb
and/or
+--enable-libopencore-amrwb
to configure to enable them.
+
+
+
1.2.2 VisualOn AAC encoder library# TOC
+
+
FFmpeg can make use of the VisualOn AACenc library for AAC encoding.
+
+
Go to http://sourceforge.net/projects/opencore-amr/ and follow the
+instructions for installing the library.
+Then pass --enable-libvo-aacenc
to configure to enable it.
+
+
+
1.2.3 VisualOn AMR-WB encoder library# TOC
+
+
FFmpeg can make use of the VisualOn AMR-WBenc library for AMR-WB encoding.
+
+
Go to http://sourceforge.net/projects/opencore-amr/ and follow the
+instructions for installing the library.
+Then pass --enable-libvo-amrwbenc
to configure to enable it.
+
+
+
1.2.4 Fraunhofer AAC library# TOC
+
+
FFmpeg can make use of the Fraunhofer AAC library for AAC encoding.
+
+
Go to http://sourceforge.net/projects/opencore-amr/ and follow the
+instructions for installing the library.
+Then pass --enable-libfdk-aac
to configure to enable it.
+
+
+
1.3 LAME# TOC
+
+
FFmpeg can make use of the LAME library for MP3 encoding.
+
+
Go to http://lame.sourceforge.net/ and follow the
+instructions for installing the library.
+Then pass --enable-libmp3lame
to configure to enable it.
+
+
+
1.4 TwoLAME# TOC
+
+
FFmpeg can make use of the TwoLAME library for MP2 encoding.
+
+
Go to http://www.twolame.org/ and follow the
+instructions for installing the library.
+Then pass --enable-libtwolame
to configure to enable it.
+
+
+
1.5 libvpx# TOC
+
+
FFmpeg can make use of the libvpx library for VP8/VP9 encoding.
+
+
Go to http://www.webmproject.org/ and follow the instructions for
+installing the library. Then pass --enable-libvpx
to configure to
+enable it.
+
+
+
1.6 libwavpack# TOC
+
+
FFmpeg can make use of the libwavpack library for WavPack encoding.
+
+
Go to http://www.wavpack.com/ and follow the instructions for
+installing the library. Then pass --enable-libwavpack
to configure to
+enable it.
+
+
+
1.7 OpenH264# TOC
+
+
FFmpeg can make use of the OpenH264 library for H.264 encoding.
+
+
Go to http://www.openh264.org/ and follow the instructions for
+installing the library. Then pass --enable-libopenh264
to configure to
+enable it.
+
+
+
1.8 x264# TOC
+
+
FFmpeg can make use of the x264 library for H.264 encoding.
+
+
Go to http://www.videolan.org/developers/x264.html and follow the
+instructions for installing the library. Then pass --enable-libx264
to
+configure to enable it.
+
+
+
+
1.9 x265# TOC
+
+
FFmpeg can make use of the x265 library for HEVC encoding.
+
+
Go to http://x265.org/developers.html and follow the instructions
+for installing the library. Then pass --enable-libx265
to configure
+to enable it.
+
+
+
+
1.10 libilbc# TOC
+
+
iLBC is a narrowband speech codec that has been made freely available
+by Google as part of the WebRTC project. libilbc is a packaging friendly
+copy of the iLBC codec. FFmpeg can make use of the libilbc library for
+iLBC encoding and decoding.
+
+
Go to https://github.com/TimothyGu/libilbc and follow the instructions for
+installing the library. Then pass --enable-libilbc
to configure to
+enable it.
+
+
+
1.11 libzvbi# TOC
+
+
libzvbi is a VBI decoding library which can be used by FFmpeg to decode DVB
+teletext pages and DVB teletext subtitles.
+
+
Go to http://sourceforge.net/projects/zapping/ and follow the instructions for
+installing the library. Then pass --enable-libzvbi
to configure to
+enable it.
+
+
+
+
1.12 AviSynth# TOC
+
+
FFmpeg can read AviSynth scripts as input. To enable support, pass
+--enable-avisynth
to configure. The correct headers are
+included in compat/avisynth/, which allows the user to enable support
+without needing to search for these headers themselves.
+
+
For Windows, supported AviSynth variants are
+AviSynth 2.5 or 2.6 for 32-bit builds and
+AviSynth+ 0.1 for 32-bit and 64-bit builds.
+
+
For Linux and OS X, the supported AviSynth variant is
+AvxSynth .
+
+
+
AviSynth and AvxSynth are loaded dynamically. Distributors can build FFmpeg
+with --enable-avisynth
, and the binaries will work regardless of the
+end user having AviSynth or AvxSynth installed - they’ll only need to be
+installed to use AviSynth scripts (obviously).
+
+
+
+
2 Supported File Formats, Codecs or Features# TOC
+
+
You can use the -formats
and -codecs
options to have an exhaustive list.
+
+
+
2.1 File Formats# TOC
+
+
FFmpeg supports the following file formats through the libavformat
+library:
+
+
+Name Encoding Decoding Comments
+4xm X 4X Technologies format, used in some games.
+8088flex TMV X
+ACT Voice X contains G.729 audio
+Adobe Filmstrip X X
+Audio IFF (AIFF) X X
+American Laser Games MM X Multimedia format used in games like Mad Dog McCree.
+3GPP AMR X X
+Amazing Studio Packed Animation File X Multimedia format used in game Heart Of Darkness.
+Apple HTTP Live Streaming X
+Artworx Data Format X
+ADP X Audio format used on the Nintendo Gamecube.
+AFC X Audio format used on the Nintendo Gamecube.
+ASF X X
+AST X X Audio format used on the Nintendo Wii.
+AVI X X
+AviSynth X
+AVR X Audio format used on Mac.
+AVS X Multimedia format used by the Creature Shock game.
+Beam Software SIFF X Audio and video format used in some games by Beam Software.
+Bethesda Softworks VID X Used in some games from Bethesda Softworks.
+Binary text X
+Bink X Multimedia format used by many games.
+Bitmap Brothers JV X Used in Z and Z95 games.
+Brute Force & Ignorance X Used in the game Flash Traffic: City of Angels.
+BRSTM X Audio format used on the Nintendo Wii.
+BWF X X
+CRI ADX X X Audio-only format used in console video games.
+Discworld II BMV X
+Interplay C93 X Used in the game Cyberia from Interplay.
+Delphine Software International CIN X Multimedia format used by Delphine Software games.
+CD+G X Video format used by CD+G karaoke disks
+Phantom Cine X
+Commodore CDXL X Amiga CD video format
+Core Audio Format X X Apple Core Audio Format
+CRC testing format X
+Creative Voice X X Created for the Sound Blaster Pro.
+CRYO APC X Audio format used in some games by CRYO Interactive Entertainment.
+D-Cinema audio X X
+Deluxe Paint Animation X
+DFA X This format is used in Chronomaster game
+DSD Stream File (DSF) X
+DV video X X
+DXA X This format is used in the non-Windows version of the Feeble Files
+ game and different game cutscenes repacked for use with ScummVM.
+Electronic Arts cdata X
+Electronic Arts Multimedia X Used in various EA games; files have extensions like WVE and UV2.
+Ensoniq Paris Audio File X
+FFM (FFserver live feed) X X
+Flash (SWF) X X
+Flash 9 (AVM2) X X Only embedded audio is decoded.
+FLI/FLC/FLX animation X .fli/.flc files
+Flash Video (FLV) X X Macromedia Flash video files
+framecrc testing format X
+FunCom ISS X Audio format used in various games from FunCom like The Longest Journey.
+G.723.1 X X
+G.729 BIT X X
+G.729 raw X
+GIF Animation X X
+GXF X X General eXchange Format SMPTE 360M, used by Thomson Grass Valley
+ playout servers.
+HNM X Only version 4 supported, used in some games from Cryo Interactive
+iCEDraw File X
+ICO X X Microsoft Windows ICO
+id Quake II CIN video X
+id RoQ X X Used in Quake III, Jedi Knight 2 and other computer games.
+IEC61937 encapsulation X X
+IFF X Interchange File Format
+iLBC X X
+Interplay MVE X Format used in various Interplay computer games.
+IV8 X A format generated by IndigoVision 8000 video server.
+IVF (On2) X X A format used by libvpx
+IRCAM X X
+LATM X X
+LMLM4 X Used by Linux Media Labs MPEG-4 PCI boards
+LOAS X contains LATM multiplexed AAC audio
+LRC X X
+LVF X
+LXF X VR native stream format, used by Leitch/Harris’ video servers.
+Magic Lantern Video (MLV) X
+Matroska X X
+Matroska audio X
+FFmpeg metadata X X Metadata in text format.
+MAXIS XA X Used in Sim City 3000; file extension .xa.
+MD Studio X
+Metal Gear Solid: The Twin Snakes X
+Megalux Frame X Used by Megalux Ultimate Paint
+Mobotix .mxg X
+Monkey’s Audio X
+Motion Pixels MVI X
+MOV/QuickTime/MP4 X X 3GP, 3GP2, PSP, iPod variants supported
+MP2 X X
+MP3 X X
+MPEG-1 System X X muxed audio and video, VCD format supported
+MPEG-PS (program stream) X X also known as VOB
file, SVCD and DVD format supported
+MPEG-TS (transport stream) X X also known as DVB Transport Stream
+MPEG-4 X X MPEG-4 is a variant of QuickTime.
+Mirillis FIC video X No cursor rendering.
+MIME multipart JPEG X
+MSN TCP webcam X Used by MSN Messenger webcam streams.
+MTV X
+Musepack X
+Musepack SV8 X
+Material eXchange Format (MXF) X X SMPTE 377M, used by D-Cinema, broadcast industry.
+Material eXchange Format (MXF), D-10 Mapping X X SMPTE 386M, D-10/IMX Mapping.
+NC camera feed X NC (AVIP NC4600) camera streams
+NIST SPeech HEader REsources X
+NTT TwinVQ (VQF) X Nippon Telegraph and Telephone Corporation TwinVQ.
+Nullsoft Streaming Video X
+NuppelVideo X
+NUT X X NUT Open Container Format
+Ogg X X
+Playstation Portable PMP X
+Portable Voice Format X
+TechnoTrend PVA X Used by TechnoTrend DVB PCI boards.
+QCP X
+raw ADTS (AAC) X X
+raw AC-3 X X
+raw Chinese AVS video X X
+raw CRI ADX X X
+raw Dirac X X
+raw DNxHD X X
+raw DTS X X
+raw DTS-HD X
+raw E-AC-3 X X
+raw FLAC X X
+raw GSM X
+raw H.261 X X
+raw H.263 X X
+raw H.264 X X
+raw HEVC X X
+raw Ingenient MJPEG X
+raw MJPEG X X
+raw MLP X
+raw MPEG X
+raw MPEG-1 X
+raw MPEG-2 X
+raw MPEG-4 X X
+raw NULL X
+raw video X X
+raw id RoQ X
+raw Shorten X
+raw TAK X
+raw TrueHD X X
+raw VC-1 X X
+raw PCM A-law X X
+raw PCM mu-law X X
+raw PCM signed 8 bit X X
+raw PCM signed 16 bit big-endian X X
+raw PCM signed 16 bit little-endian X X
+raw PCM signed 24 bit big-endian X X
+raw PCM signed 24 bit little-endian X X
+raw PCM signed 32 bit big-endian X X
+raw PCM signed 32 bit little-endian X X
+raw PCM unsigned 8 bit X X
+raw PCM unsigned 16 bit big-endian X X
+raw PCM unsigned 16 bit little-endian X X
+raw PCM unsigned 24 bit big-endian X X
+raw PCM unsigned 24 bit little-endian X X
+raw PCM unsigned 32 bit big-endian X X
+raw PCM unsigned 32 bit little-endian X X
+raw PCM floating-point 32 bit big-endian X X
+raw PCM floating-point 32 bit little-endian X X
+raw PCM floating-point 64 bit big-endian X X
+raw PCM floating-point 64 bit little-endian X X
+RDT X
+REDCODE R3D X File format used by RED Digital cameras, contains JPEG 2000 frames and PCM audio.
+RealMedia X X
+Redirector X
+RedSpark X
+Renderware TeXture Dictionary X
+RL2 X Audio and video format used in some games by Entertainment Software Partners.
+RPL/ARMovie X
+Lego Mindstorms RSO X X
+RSD X
+RTMP X X Output is performed by publishing stream to RTMP server
+RTP X X
+RTSP X X
+SAP X X
+SBG X
+SDP X
+Sega FILM/CPK X Used in many Sega Saturn console games.
+Silicon Graphics Movie X
+Sierra SOL X .sol files used in Sierra Online games.
+Sierra VMD X Used in Sierra CD-ROM games.
+Smacker X Multimedia format used by many games.
+SMJPEG X X Used in certain Loki game ports.
+Smush X Multimedia format used in some LucasArts games.
+Sony OpenMG (OMA) X X Audio format used in Sony Sonic Stage and Sony Vegas.
+Sony PlayStation STR X
+Sony Wave64 (W64) X X
+SoX native format X X
+SUN AU format X X
+SUP raw PGS subtitles X
+Text files X
+THP X Used on the Nintendo GameCube.
+Tiertex Limited SEQ X Tiertex .seq files used in the DOS CD-ROM version of the game Flashback.
+True Audio X
+VC-1 test bitstream X X
+Vivo X
+WAV X X
+WavPack X X
+WebM X X
+Windows Televison (WTV) X X
+Wing Commander III movie X Multimedia format used in Origin’s Wing Commander III computer game.
+Westwood Studios audio X Multimedia format used in Westwood Studios games.
+Westwood Studios VQA X Multimedia format used in Westwood Studios games.
+XMV X Microsoft video container used in Xbox games.
+xWMA X Microsoft audio container used by XAudio 2.
+eXtended BINary text (XBIN) X
+YUV4MPEG pipe X X
+Psygnosis YOP X
+
+
+
X
means that encoding (resp. decoding) is supported.
+
+
+
2.2 Image Formats# TOC
+
+
FFmpeg can read and write images for each frame of a video sequence. The
+following image formats are supported:
+
+
+Name Encoding Decoding Comments
+.Y.U.V X X one raw file per component
+Alias PIX X X Alias/Wavefront PIX image format
+animated GIF X X
+BMP X X Microsoft BMP image
+BRender PIX X Argonaut BRender 3D engine image format.
+DPX X X Digital Picture Exchange
+EXR X OpenEXR
+JPEG X X Progressive JPEG is not supported.
+JPEG 2000 X X
+JPEG-LS X X
+LJPEG X Lossless JPEG
+PAM X X PAM is a PNM extension with alpha support.
+PBM X X Portable BitMap image
+PCX X X PC Paintbrush
+PGM X X Portable GrayMap image
+PGMYUV X X PGM with U and V components in YUV 4:2:0
+PIC X Pictor/PC Paint
+PNG X X
+PPM X X Portable PixelMap image
+PTX X V.Flash PTX format
+SGI X X SGI RGB image format
+Sun Rasterfile X X Sun RAS image format
+TIFF X X YUV, JPEG and some extension is not supported yet.
+Truevision Targa X X Targa (.TGA) image format
+WebP E X WebP image format, encoding supported through external library libwebp
+XBM X X X BitMap image format
+XFace X X X-Face image format
+XWD X X X Window Dump image format
+
+
+
X
means that encoding (resp. decoding) is supported.
+
+
E
means that support is provided through an external library.
+
+
+
2.3 Video Codecs# TOC
+
+
+Name Encoding Decoding Comments
+4X Movie X Used in certain computer games.
+8088flex TMV X
+A64 multicolor X Creates video suitable to be played on a commodore 64 (multicolor mode).
+Amazing Studio PAF Video X
+American Laser Games MM X Used in games like Mad Dog McCree.
+AMV Video X X Used in Chinese MP3 players.
+ANSI/ASCII art X
+Apple Intermediate Codec X
+Apple MJPEG-B X
+Apple ProRes X X
+Apple QuickDraw X fourcc: qdrw
+Asus v1 X X fourcc: ASV1
+Asus v2 X X fourcc: ASV2
+ATI VCR1 X fourcc: VCR1
+ATI VCR2 X fourcc: VCR2
+Auravision Aura X
+Auravision Aura 2 X
+Autodesk Animator Flic video X
+Autodesk RLE X fourcc: AASC
+Avid 1:1 10-bit RGB Packer X X fourcc: AVrp
+AVS (Audio Video Standard) video X Video encoding used by the Creature Shock game.
+AYUV X X Microsoft uncompressed packed 4:4:4:4
+Beam Software VB X
+Bethesda VID video X Used in some games from Bethesda Softworks.
+Bink Video X
+Bitmap Brothers JV video X
+y41p Brooktree uncompressed 4:1:1 12-bit X X
+Brute Force & Ignorance X Used in the game Flash Traffic: City of Angels.
+C93 video X Codec used in Cyberia game.
+CamStudio X fourcc: CSCD
+CD+G X Video codec for CD+G karaoke disks
+CDXL X Amiga CD video codec
+Chinese AVS video E X AVS1-P2, JiZhun profile, encoding through external library libxavs
+Delphine Software International CIN video X Codec used in Delphine Software International games.
+Discworld II BMV Video X
+Canopus Lossless Codec X
+Cinepak X
+Cirrus Logic AccuPak X X fourcc: CLJR
+CPiA Video Format X
+Creative YUV (CYUV) X
+DFA X Codec used in Chronomaster game.
+Dirac E X supported through external library libschroedinger
+Deluxe Paint Animation X
+DNxHD X X aka SMPTE VC3
+Duck TrueMotion 1.0 X fourcc: DUCK
+Duck TrueMotion 2.0 X fourcc: TM20
+DV (Digital Video) X X
+Dxtory capture format X
+Feeble Files/ScummVM DXA X Codec originally used in Feeble Files game.
+Electronic Arts CMV video X Used in NHL 95 game.
+Electronic Arts Madcow video X
+Electronic Arts TGV video X
+Electronic Arts TGQ video X
+Electronic Arts TQI video X
+Escape 124 X
+Escape 130 X
+FFmpeg video codec #1 X X lossless codec (fourcc: FFV1)
+Flash Screen Video v1 X X fourcc: FSV1
+Flash Screen Video v2 X X
+Flash Video (FLV) X X Sorenson H.263 used in Flash
+Forward Uncompressed X
+Fraps X
+Go2Webinar X fourcc: G2M4
+H.261 X X
+H.263 / H.263-1996 X X
+H.263+ / H.263-1998 / H.263 version 2 X X
+H.264 / AVC / MPEG-4 AVC / MPEG-4 part 10 E X encoding supported through external library libx264 and OpenH264
+HEVC X X encoding supported through the external library libx265
+HNM version 4 X
+HuffYUV X X
+HuffYUV FFmpeg variant X X
+IBM Ultimotion X fourcc: ULTI
+id Cinematic video X Used in Quake II.
+id RoQ video X X Used in Quake III, Jedi Knight 2, other computer games.
+IFF ILBM X IFF interleaved bitmap
+IFF ByteRun1 X IFF run length encoded bitmap
+Intel H.263 X
+Intel Indeo 2 X
+Intel Indeo 3 X
+Intel Indeo 4 X
+Intel Indeo 5 X
+Interplay C93 X Used in the game Cyberia from Interplay.
+Interplay MVE video X Used in Interplay .MVE files.
+J2K X X
+Karl Morton’s video codec X Codec used in Worms games.
+Kega Game Video (KGV1) X Kega emulator screen capture codec.
+Lagarith X
+LCL (LossLess Codec Library) MSZH X
+LCL (LossLess Codec Library) ZLIB E E
+LOCO X
+LucasArts SANM/Smush X Used in LucasArts games / SMUSH animations.
+lossless MJPEG X X
+Microsoft ATC Screen X Also known as Microsoft Screen 3.
+Microsoft Expression Encoder Screen X Also known as Microsoft Titanium Screen 2.
+Microsoft RLE X
+Microsoft Screen 1 X Also known as Windows Media Video V7 Screen.
+Microsoft Screen 2 X Also known as Windows Media Video V9 Screen.
+Microsoft Video 1 X
+Mimic X Used in MSN Messenger Webcam streams.
+Miro VideoXL X fourcc: VIXL
+MJPEG (Motion JPEG) X X
+Mobotix MxPEG video X
+Motion Pixels video X
+MPEG-1 video X X
+MPEG-2 video X X
+MPEG-4 part 2 X X libxvidcore can be used alternatively for encoding.
+MPEG-4 part 2 Microsoft variant version 1 X
+MPEG-4 part 2 Microsoft variant version 2 X X
+MPEG-4 part 2 Microsoft variant version 3 X X
+Nintendo Gamecube THP video X
+NuppelVideo/RTjpeg X Video encoding used in NuppelVideo files.
+On2 VP3 X still experimental
+On2 VP5 X fourcc: VP50
+On2 VP6 X fourcc: VP60,VP61,VP62
+On2 VP7 X fourcc: VP70,VP71
+VP8 E X fourcc: VP80, encoding supported through external library libvpx
+VP9 E X encoding supported through external library libvpx
+Pinnacle TARGA CineWave YUV16 X fourcc: Y216
+Prores X fourcc: apch,apcn,apcs,apco
+Q-team QPEG X fourccs: QPEG, Q1.0, Q1.1
+QuickTime 8BPS video X
+QuickTime Animation (RLE) video X X fourcc: ’rle ’
+QuickTime Graphics (SMC) X fourcc: ’smc ’
+QuickTime video (RPZA) X fourcc: rpza
+R10K AJA Kona 10-bit RGB Codec X X
+R210 Quicktime Uncompressed RGB 10-bit X X
+Raw Video X X
+RealVideo 1.0 X X
+RealVideo 2.0 X X
+RealVideo 3.0 X still far from ideal
+RealVideo 4.0 X
+Renderware TXD (TeXture Dictionary) X Texture dictionaries used by the Renderware Engine.
+RL2 video X used in some games by Entertainment Software Partners
+Sierra VMD video X Used in Sierra VMD files.
+Silicon Graphics Motion Video Compressor 1 (MVC1) X
+Silicon Graphics Motion Video Compressor 2 (MVC2) X
+Silicon Graphics RLE 8-bit video X
+Smacker video X Video encoding used in Smacker.
+SMPTE VC-1 X
+Snow X X experimental wavelet codec (fourcc: SNOW)
+Sony PlayStation MDEC (Motion DECoder) X
+Sorenson Vector Quantizer 1 X X fourcc: SVQ1
+Sorenson Vector Quantizer 3 X fourcc: SVQ3
+Sunplus JPEG (SP5X) X fourcc: SP5X
+TechSmith Screen Capture Codec X fourcc: TSCC
+TechSmith Screen Capture Codec 2 X fourcc: TSC2
+Theora E X encoding supported through external library libtheora
+Tiertex Limited SEQ video X Codec used in DOS CD-ROM FlashBack game.
+Ut Video X X
+v210 QuickTime uncompressed 4:2:2 10-bit X X
+v308 QuickTime uncompressed 4:4:4 X X
+v408 QuickTime uncompressed 4:4:4:4 X X
+v410 QuickTime uncompressed 4:4:4 10-bit X X
+VBLE Lossless Codec X
+VMware Screen Codec / VMware Video X Codec used in videos captured by VMware.
+Westwood Studios VQA (Vector Quantized Animation) video X
+Windows Media Image X
+Windows Media Video 7 X X
+Windows Media Video 8 X X
+Windows Media Video 9 X not completely working
+Wing Commander III / Xan X Used in Wing Commander III .MVE files.
+Wing Commander IV / Xan X Used in Wing Commander IV.
+Winnov WNV1 X
+WMV7 X X
+YAMAHA SMAF X X
+Psygnosis YOP Video X
+yuv4 X X libquicktime uncompressed packed 4:2:0
+ZeroCodec Lossless Video X
+ZLIB X X part of LCL, encoder experimental
+Zip Motion Blocks Video X X Encoder works only in PAL8.
+
+
+
X
means that encoding (resp. decoding) is supported.
+
+
E
means that support is provided through an external library.
+
+
+
2.4 Audio Codecs# TOC
+
+
+Name Encoding Decoding Comments
+8SVX exponential X
+8SVX fibonacci X
+AAC+ E X encoding supported through external library libaacplus
+AAC E X encoding supported through external library libfaac and libvo-aacenc
+AC-3 IX IX
+ADPCM 4X Movie X
+ADPCM CDROM XA X
+ADPCM Creative Technology X 16 -> 4, 8 -> 4, 8 -> 3, 8 -> 2
+ADPCM Electronic Arts X Used in various EA titles.
+ADPCM Electronic Arts Maxis CDROM XS X Used in Sim City 3000.
+ADPCM Electronic Arts R1 X
+ADPCM Electronic Arts R2 X
+ADPCM Electronic Arts R3 X
+ADPCM Electronic Arts XAS X
+ADPCM G.722 X X
+ADPCM G.726 X X
+ADPCM IMA AMV X Used in AMV files
+ADPCM IMA Electronic Arts EACS X
+ADPCM IMA Electronic Arts SEAD X
+ADPCM IMA Funcom X
+ADPCM IMA QuickTime X X
+ADPCM IMA Loki SDL MJPEG X
+ADPCM IMA WAV X X
+ADPCM IMA Westwood X
+ADPCM ISS IMA X Used in FunCom games.
+ADPCM IMA Dialogic X
+ADPCM IMA Duck DK3 X Used in some Sega Saturn console games.
+ADPCM IMA Duck DK4 X Used in some Sega Saturn console games.
+ADPCM IMA Radical X
+ADPCM Microsoft X X
+ADPCM MS IMA X X
+ADPCM Nintendo Gamecube AFC X
+ADPCM Nintendo Gamecube DTK X
+ADPCM Nintendo Gamecube THP X
+ADPCM QT IMA X X
+ADPCM SEGA CRI ADX X X Used in Sega Dreamcast games.
+ADPCM Shockwave Flash X X
+ADPCM Sound Blaster Pro 2-bit X
+ADPCM Sound Blaster Pro 2.6-bit X
+ADPCM Sound Blaster Pro 4-bit X
+ADPCM VIMA Used in LucasArts SMUSH animations.
+ADPCM Westwood Studios IMA X Used in Westwood Studios games like Command and Conquer.
+ADPCM Yamaha X X
+AMR-NB E X encoding supported through external library libopencore-amrnb
+AMR-WB E X encoding supported through external library libvo-amrwbenc
+Amazing Studio PAF Audio X
+Apple lossless audio X X QuickTime fourcc ’alac’
+ATRAC1 X
+ATRAC3 X
+ATRAC3+ X
+Bink Audio X Used in Bink and Smacker files in many games.
+CELT E decoding supported through external library libcelt
+Delphine Software International CIN audio X Codec used in Delphine Software International games.
+Discworld II BMV Audio X
+COOK X All versions except 5.1 are supported.
+DCA (DTS Coherent Acoustics) X X
+DPCM id RoQ X X Used in Quake III, Jedi Knight 2 and other computer games.
+DPCM Interplay X Used in various Interplay computer games.
+DPCM Sierra Online X Used in Sierra Online game audio files.
+DPCM Sol X
+DPCM Xan X Used in Origin’s Wing Commander IV AVI files.
+DSD (Direct Stream Digitial), least significant bit first X
+DSD (Direct Stream Digitial), most significant bit first X
+DSD (Direct Stream Digitial), least significant bit first, planar X
+DSD (Direct Stream Digitial), most significant bit first, planar X
+DSP Group TrueSpeech X
+DV audio X
+Enhanced AC-3 X X
+EVRC (Enhanced Variable Rate Codec) X
+FLAC (Free Lossless Audio Codec) X IX
+G.723.1 X X
+G.729 X
+GSM E X encoding supported through external library libgsm
+GSM Microsoft variant E X encoding supported through external library libgsm
+IAC (Indeo Audio Coder) X
+iLBC (Internet Low Bitrate Codec) E E encoding and decoding supported through external library libilbc
+IMC (Intel Music Coder) X
+MACE (Macintosh Audio Compression/Expansion) 3:1 X
+MACE (Macintosh Audio Compression/Expansion) 6:1 X
+MLP (Meridian Lossless Packing) X Used in DVD-Audio discs.
+Monkey’s Audio X
+MP1 (MPEG audio layer 1) IX
+MP2 (MPEG audio layer 2) IX IX encoding supported also through external library TwoLAME
+MP3 (MPEG audio layer 3) E IX encoding supported through external library LAME, ADU MP3 and MP3onMP4 also supported
+MPEG-4 Audio Lossless Coding (ALS) X
+Musepack SV7 X
+Musepack SV8 X
+Nellymoser Asao X X
+On2 AVC (Audio for Video Codec) X
+Opus E E supported through external library libopus
+PCM A-law X X
+PCM mu-law X X
+PCM signed 8-bit planar X X
+PCM signed 16-bit big-endian planar X X
+PCM signed 16-bit little-endian planar X X
+PCM signed 24-bit little-endian planar X X
+PCM signed 32-bit little-endian planar X X
+PCM 32-bit floating point big-endian X X
+PCM 32-bit floating point little-endian X X
+PCM 64-bit floating point big-endian X X
+PCM 64-bit floating point little-endian X X
+PCM D-Cinema audio signed 24-bit X X
+PCM signed 8-bit X X
+PCM signed 16-bit big-endian X X
+PCM signed 16-bit little-endian X X
+PCM signed 24-bit big-endian X X
+PCM signed 24-bit little-endian X X
+PCM signed 32-bit big-endian X X
+PCM signed 32-bit little-endian X X
+PCM signed 16/20/24-bit big-endian in MPEG-TS X
+PCM unsigned 8-bit X X
+PCM unsigned 16-bit big-endian X X
+PCM unsigned 16-bit little-endian X X
+PCM unsigned 24-bit big-endian X X
+PCM unsigned 24-bit little-endian X X
+PCM unsigned 32-bit big-endian X X
+PCM unsigned 32-bit little-endian X X
+PCM Zork X
+QCELP / PureVoice X
+QDesign Music Codec 2 X There are still some distortions.
+RealAudio 1.0 (14.4K) X X Real 14400 bit/s codec
+RealAudio 2.0 (28.8K) X Real 28800 bit/s codec
+RealAudio 3.0 (dnet) IX X Real low bitrate AC-3 codec
+RealAudio Lossless X
+RealAudio SIPR / ACELP.NET X
+Shorten X
+Sierra VMD audio X Used in Sierra VMD files.
+Smacker audio X
+SMPTE 302M AES3 audio X X
+Sonic X X experimental codec
+Sonic lossless X X experimental codec
+Speex E E supported through external library libspeex
+TAK (Tom’s lossless Audio Kompressor) X
+True Audio (TTA) X X
+TrueHD X Used in HD-DVD and Blu-Ray discs.
+TwinVQ (VQF flavor) X
+VIMA X Used in LucasArts SMUSH animations.
+Vorbis E X A native but very primitive encoder exists.
+Voxware MetaSound X
+WavPack X X
+Westwood Audio (SND1) X
+Windows Media Audio 1 X X
+Windows Media Audio 2 X X
+Windows Media Audio Lossless X
+Windows Media Audio Pro X
+Windows Media Audio Voice X
+
+
+
X
means that encoding (resp. decoding) is supported.
+
+
E
means that support is provided through an external library.
+
+
I
means that an integer-only version is available, too (ensures high
+performance on systems without hardware floating point support).
+
+
+
2.5 Subtitle Formats# TOC
+
+
+Name Muxing Demuxing Encoding Decoding
+3GPP Timed Text X X
+AQTitle X X
+DVB X X X X
+DVB teletext X E
+DVD X X X X
+JACOsub X X X
+MicroDVD X X X
+MPL2 X X
+MPsub (MPlayer) X X
+PGS X
+PJS (Phoenix) X X
+RealText X X
+SAMI X X
+Spruce format (STL) X X
+SSA/ASS X X X X
+SubRip (SRT) X X X X
+SubViewer v1 X X
+SubViewer X X
+TED Talks captions X X
+VobSub (IDX+SUB) X X
+VPlayer X X
+WebVTT X X X X
+XSUB X X
+
+
+
X
means that the feature is supported.
+
+
E
means that support is provided through an external library.
+
+
+
2.6 Network Protocols# TOC
+
+
+Name Support
+file X
+FTP X
+Gopher X
+HLS X
+HTTP X
+HTTPS X
+Icecast X
+MMSH X
+MMST X
+pipe X
+RTMP X
+RTMPE X
+RTMPS X
+RTMPT X
+RTMPTE X
+RTMPTS X
+RTP X
+SAMBA E
+SCTP X
+SFTP E
+TCP X
+TLS X
+UDP X
+
+
+
X
means that the protocol is supported.
+
+
E
means that support is provided through an external library.
+
+
+
+
2.7 Input/Output Devices# TOC
+
+
+Name Input Output
+ALSA X X
+BKTR X
+caca X
+DV1394 X
+Lavfi virtual device X
+Linux framebuffer X X
+JACK X
+LIBCDIO X
+LIBDC1394 X
+OpenAL X
+OpenGL X
+OSS X X
+PulseAudio X X
+SDL X
+Video4Linux2 X X
+VfW capture X
+X11 grabbing X
+Win32 grabbing X
+
+
+
X
means that input/output is supported.
+
+
+
2.8 Timecode# TOC
+
+
+Codec/format Read Write
+AVI X X
+DV X X
+GXF X X
+MOV X X
+MPEG1/2 X X
+MXF X X
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/git-howto.html b/Externals/ffmpeg/dev/doc/git-howto.html
new file mode 100644
index 0000000000..dbbc681f4e
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/git-howto.html
@@ -0,0 +1,493 @@
+
+
+
+
+
+
+ Using git to develop FFmpeg
+
+
+
+
+
+
+
+
+ Using git to develop FFmpeg
+
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Introduction# TOC
+
+
This document aims in giving some quick references on a set of useful git
+commands. You should always use the extensive and detailed documentation
+provided directly by git:
+
+
+
+
shows you the available subcommands,
+
+
+
git <command> --help
+man git-<command>
+
+
+
shows information about the subcommand <command>.
+
+
Additional information could be found on the
+Git Reference website
+
+
For more information about the Git project, visit the
+
+
Git website
+
+
Consult these resources whenever you have problems, they are quite exhaustive.
+
+
What follows now is a basic introduction to Git and some FFmpeg-specific
+guidelines to ease the contribution to the project
+
+
+
2 Basics Usage# TOC
+
+
+
2.1 Get GIT# TOC
+
+
You can get git from http://git-scm.com/
+Most distribution and operating system provide a package for it.
+
+
+
+
2.2 Cloning the source tree# TOC
+
+
+
git clone git://source.ffmpeg.org/ffmpeg <target>
+
+
+
This will put the FFmpeg sources into the directory <target> .
+
+
+
git clone git@source.ffmpeg.org:ffmpeg <target>
+
+
+
This will put the FFmpeg sources into the directory <target> and let
+you push back your changes to the remote repository.
+
+
Make sure that you do not have Windows line endings in your checkouts,
+otherwise you may experience spurious compilation failures. One way to
+achieve this is to run
+
+
+
git config --global core.autocrlf false
+
+
+
+
+
2.3 Updating the source tree to the latest revision# TOC
+
+
+
+
pulls in the latest changes from the tracked branch. The tracked branch
+can be remote. By default the master branch tracks the branch master in
+the remote origin.
+
+
+
--rebase
(see below) is recommended.
+
+
+
2.4 Rebasing your local branches# TOC
+
+
+
+
fetches the changes from the main repository and replays your local commits
+over it. This is required to keep all your local changes at the top of
+FFmpeg’s master tree. The master tree will reject pushes with merge commits.
+
+
+
+
2.5 Adding/removing files/directories# TOC
+
+
+
git add [-A] <filename/dirname>
+git rm [-r] <filename/dirname>
+
+
+
GIT needs to get notified of all changes you make to your working
+directory that makes files appear or disappear.
+Line moves across files are automatically tracked.
+
+
+
+
2.6 Showing modifications# TOC
+
+
+
git diff <filename(s)>
+
+
+
will show all local modifications in your working directory as unified diff.
+
+
+
+
2.7 Inspecting the changelog# TOC
+
+
+
+
You may also use the graphical tools like gitview or gitk or the web
+interface available at http://source.ffmpeg.org/
+
+
+
2.8 Checking source tree status# TOC
+
+
+
+
detects all the changes you made and lists what actions will be taken in case
+of a commit (additions, modifications, deletions, etc.).
+
+
+
+
2.9 Committing# TOC
+
+
+
+
to double check your changes before committing them to avoid trouble later
+on. All experienced developers do this on each and every commit, no matter
+how small.
+Every one of them has been saved from looking like a fool by this many times.
+It’s very easy for stray debug output or cosmetic modifications to slip in,
+please avoid problems through this extra level of scrutiny.
+
+
For cosmetics-only commits you should get (almost) empty output from
+
+
+
git diff -w -b <filename(s)>
+
+
+
Also check the output of
+
+
+
+
to make sure you don’t have untracked files or deletions.
+
+
+
git add [-i|-p|-A] <filenames/dirnames>
+
+
+
Make sure you have told git your name and email address
+
+
+
git config --global user.name "My Name"
+git config --global user.email my@email.invalid
+
+
+
Use –global to set the global configuration for all your git checkouts.
+
+
Git will select the changes to the files for commit. Optionally you can use
+the interactive or the patch mode to select hunk by hunk what should be
+added to the commit.
+
+
+
+
+
Git will commit the selected changes to your current local branch.
+
+
You will be prompted for a log message in an editor, which is either
+set in your personal configuration file through
+
+
+
git config --global core.editor
+
+
+
or set by one of the following environment variables:
+GIT_EDITOR , VISUAL or EDITOR .
+
+
Log messages should be concise but descriptive. Explain why you made a change,
+what you did will be obvious from the changes themselves most of the time.
+Saying just "bug fix" or "10l" is bad. Remember that people of varying skill
+levels look at and educate themselves while reading through your code. Don’t
+include filenames in log messages, Git provides that information.
+
+
Possibly make the commit message have a terse, descriptive first line, an
+empty line and then a full description. The first line will be used to name
+the patch by git format-patch.
+
+
+
2.10 Preparing a patchset# TOC
+
+
+
git format-patch <commit> [-o directory]
+
+
+
will generate a set of patches for each commit between <commit> and
+current HEAD . E.g.
+
+
+
git format-patch origin/master
+
+
+
will generate patches for all commits on current branch which are not
+present in upstream.
+A useful shortcut is also
+
+
+
+
which will generate patches from last n commits.
+By default the patches are created in the current directory.
+
+
+
2.11 Sending patches for review# TOC
+
+
+
git send-email <commit list|directory>
+
+
+
will send the patches created by git format-patch
or directly
+generates them. All the email fields can be configured in the global/local
+configuration or overridden by command line.
+Note that this tool must often be installed separately (e.g. git-email
+package on Debian-based distros).
+
+
+
+
2.12 Renaming/moving/copying files or contents of files# TOC
+
+
Git automatically tracks such changes, making those normal commits.
+
+
+
mv/cp path/file otherpath/otherfile
+git add [-A] .
+git commit
+
+
+
+
+
3 Git configuration# TOC
+
+
In order to simplify a few workflows, it is advisable to configure both
+your personal Git installation and your local FFmpeg repository.
+
+
+
3.1 Personal Git installation# TOC
+
+
Add the following to your ~/.gitconfig to help git send-email
+and git format-patch
detect renames:
+
+
+
[diff]
+ renames = copy
+
+
+
+
3.2 Repository configuration# TOC
+
+
In order to have git send-email
automatically send patches
+to the ffmpeg-devel mailing list, add the following stanza
+to /path/to/ffmpeg/repository/.git/config :
+
+
+
[sendemail]
+ to = ffmpeg-devel@ffmpeg.org
+
+
+
+
4 FFmpeg specific# TOC
+
+
+
4.1 Reverting broken commits# TOC
+
+
+
+
git reset
will uncommit the changes till <commit> rewriting
+the current branch history.
+
+
+
+
allows one to amend the last commit details quickly.
+
+
+
git rebase -i origin/master
+
+
+
will replay local commits over the main repository allowing to edit, merge
+or remove some of them in the process.
+
+
+
git reset
, git commit --amend
and git rebase
+rewrite history, so you should use them ONLY on your local or topic branches.
+The main repository will reject those changes.
+
+
+
+
git revert
will generate a revert commit. This will not make the
+faulty commit disappear from the history.
+
+
+
4.2 Pushing changes to remote trees# TOC
+
+
+
+
Will push the changes to the default remote (origin ).
+Git will prevent you from pushing changes if the local and remote trees are
+out of sync. Refer to and to sync the local tree.
+
+
+
git remote add <name> <url>
+
+
+
Will add additional remote with a name reference, it is useful if you want
+to push your local branch for review on a remote host.
+
+
+
git push <remote> <refspec>
+
+
+
Will push the changes to the <remote> repository.
+Omitting <refspec> makes git push
update all the remote
+branches matching the local ones.
+
+
+
4.3 Finding a specific svn revision# TOC
+
+
Since version 1.7.1 git supports :/foo syntax for specifying commits
+based on a regular expression. see man gitrevisions
+
+
+
git show :/'as revision 23456'
+
+
+
will show the svn changeset r23456 . With older git versions searching in
+the git log
output is the easiest option (especially if a pager with
+search capabilities is used).
+This commit can be checked out with
+
+
+
git checkout -b svn_23456 :/'as revision 23456'
+
+
+
or for git < 1.7.1 with
+
+
+
git checkout -b svn_23456 $SHA1
+
+
+
where $SHA1 is the commit hash from the git log
output.
+
+
+
+
5 pre-push checklist# TOC
+
+
Once you have a set of commits that you feel are ready for pushing,
+work through the following checklist to doublecheck everything is in
+proper order. This list tries to be exhaustive. In case you are just
+pushing a typo in a comment, some of the steps may be unnecessary.
+Apply your common sense, but if in doubt, err on the side of caution.
+
+
First, make sure that the commits and branches you are going to push
+match what you want pushed and that nothing is missing, extraneous or
+wrong. You can see what will be pushed by running the git push command
+with –dry-run first. And then inspecting the commits listed with
+git log -p 1234567..987654
. The git status
command
+may help in finding local changes that have been forgotten to be added.
+
+
Next let the code pass through a full run of our testsuite.
+
+
+ make distclean
+ /path/to/ffmpeg/configure
+ make check
+ if fate fails due to missing samples run make fate-rsync
and retry
+
+
+
Make sure all your changes have been checked before pushing them, the
+testsuite only checks against regressions and that only to some extend. It does
+obviously not check newly added features/code to be working unless you have
+added a test for that (which is recommended).
+
+
Also note that every single commit should pass the test suite, not just
+the result of a series of patches.
+
+
Once everything passed, push the changes to your public ffmpeg clone and post a
+merge request to ffmpeg-devel. You can also push them directly but this is not
+recommended.
+
+
+
6 Server Issues# TOC
+
+
Contact the project admins root@ffmpeg.org if you have technical
+problems with the GIT server.
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/libavcodec.html b/Externals/ffmpeg/dev/doc/libavcodec.html
new file mode 100644
index 0000000000..b15f00ec35
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/libavcodec.html
@@ -0,0 +1,76 @@
+
+
+
+
+
+
+ Libavcodec Documentation
+
+
+
+
+
+
+
+
+ Libavcodec Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
The libavcodec library provides a generic encoding/decoding framework
+and contains multiple decoders and encoders for audio, video and
+subtitle streams, and several bitstream filters.
+
+
The shared architecture provides various services ranging from bit
+stream I/O to DSP optimizations, and makes it suitable for
+implementing robust and fast codecs as well as for experimentation.
+
+
+
+
2 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+ffmpeg-codecs , bitstream-filters ,
+libavutil
+
+
+
+
3 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/libavdevice.html b/Externals/ffmpeg/dev/doc/libavdevice.html
new file mode 100644
index 0000000000..dd0379b147
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/libavdevice.html
@@ -0,0 +1,73 @@
+
+
+
+
+
+
+ Libavdevice Documentation
+
+
+
+
+
+
+
+
+ Libavdevice Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
The libavdevice library provides a generic framework for grabbing from
+and rendering to many common multimedia input/output devices, and
+supports several input and output devices, including Video4Linux2,
+VfW, DShow, and ALSA.
+
+
+
+
2 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+ffmpeg-devices ,
+libavutil , libavcodec , libavformat
+
+
+
+
3 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/libavfilter.html b/Externals/ffmpeg/dev/doc/libavfilter.html
new file mode 100644
index 0000000000..0bfd0e598a
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/libavfilter.html
@@ -0,0 +1,72 @@
+
+
+
+
+
+
+ Libavfilter Documentation
+
+
+
+
+
+
+
+
+ Libavfilter Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
The libavfilter library provides a generic audio/video filtering
+framework containing several filters, sources and sinks.
+
+
+
+
2 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+ffmpeg-filters ,
+libavutil , libswscale , libswresample ,
+libavcodec , libavformat , libavdevice
+
+
+
+
3 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/libavformat.html b/Externals/ffmpeg/dev/doc/libavformat.html
new file mode 100644
index 0000000000..57e29c5057
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/libavformat.html
@@ -0,0 +1,76 @@
+
+
+
+
+
+
+ Libavformat Documentation
+
+
+
+
+
+
+
+
+ Libavformat Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
The libavformat library provides a generic framework for multiplexing
+and demultiplexing (muxing and demuxing) audio, video and subtitle
+streams. It encompasses multiple muxers and demuxers for multimedia
+container formats.
+
+
It also supports several input and output protocols to access a media
+resource.
+
+
+
+
2 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+ffmpeg-formats , ffmpeg-protocols ,
+libavutil , libavcodec
+
+
+
+
3 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/libavutil.html b/Externals/ffmpeg/dev/doc/libavutil.html
new file mode 100644
index 0000000000..23e471d17a
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/libavutil.html
@@ -0,0 +1,95 @@
+
+
+
+
+
+
+ Libavutil Documentation
+
+
+
+
+
+
+
+
+ Libavutil Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
The libavutil library is a utility library to aid portable
+multimedia programming. It contains safe portable string functions,
+random number generators, data structures, additional mathematics
+functions, cryptography and multimedia related functionality (like
+enumerations for pixel and sample formats). It is not a library for
+code needed by both libavcodec and libavformat.
+
+
The goals for this library is to be:
+
+
+Modular
+It should have few interdependencies and the possibility of disabling individual
+parts during ./configure
.
+
+
+Small
+Both sources and objects should be small.
+
+
+Efficient
+It should have low CPU and memory usage.
+
+
+Useful
+It should avoid useless features that almost no one needs.
+
+
+
+
+
+
2 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+ffmpeg-utils
+
+
+
+
3 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/libswresample.html b/Externals/ffmpeg/dev/doc/libswresample.html
new file mode 100644
index 0000000000..6df93990ef
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/libswresample.html
@@ -0,0 +1,95 @@
+
+
+
+
+
+
+ Libswresample Documentation
+
+
+
+
+
+
+
+
+ Libswresample Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
The libswresample library performs highly optimized audio resampling,
+rematrixing and sample format conversion operations.
+
+
Specifically, this library performs the following conversions:
+
+
+ Resampling : is the process of changing the audio rate, for
+example from a high sample rate of 44100Hz to 8000Hz. Audio
+conversion from high to low sample rate is a lossy process. Several
+resampling options and algorithms are available.
+
+ Format conversion : is the process of converting the type of
+samples, for example from 16-bit signed samples to unsigned 8-bit or
+float samples. It also handles packing conversion, when passing from
+packed layout (all samples belonging to distinct channels interleaved
+in the same buffer), to planar layout (all samples belonging to the
+same channel stored in a dedicated buffer or "plane").
+
+ Rematrixing : is the process of changing the channel layout, for
+example from stereo to mono. When the input channels cannot be mapped
+to the output streams, the process is lossy, since it involves
+different gain factors and mixing.
+
+
+
Various other audio conversions (e.g. stretching and padding) are
+enabled through dedicated options.
+
+
+
+
2 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+ffmpeg-resampler ,
+libavutil
+
+
+
+
3 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/libswscale.html b/Externals/ffmpeg/dev/doc/libswscale.html
new file mode 100644
index 0000000000..425df90758
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/libswscale.html
@@ -0,0 +1,89 @@
+
+
+
+
+
+
+ Libswscale Documentation
+
+
+
+
+
+
+
+
+ Libswscale Documentation
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
+
The libswscale library performs highly optimized image scaling and
+colorspace and pixel format conversion operations.
+
+
Specifically, this library performs the following conversions:
+
+
+
+
+
+
2 See Also# TOC
+
+
ffmpeg , ffplay , ffprobe , ffserver ,
+ffmpeg-scaler ,
+libavutil
+
+
+
+
3 Authors# TOC
+
+
The FFmpeg developers.
+
+
For details about the authorship, see the Git history of the project
+(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
+git log
in the FFmpeg source directory, or browsing the
+online repository at http://source.ffmpeg.org .
+
+
Maintainers for the specific components are listed in the file
+MAINTAINERS in the source code tree.
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/nut.html b/Externals/ffmpeg/dev/doc/nut.html
new file mode 100644
index 0000000000..7b16df6a4d
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/nut.html
@@ -0,0 +1,211 @@
+
+
+
+
+
+
+ NUT
+
+
+
+
+
+
+
+
+ NUT
+
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Description# TOC
+
NUT is a low overhead generic container format. It stores audio, video,
+subtitle and user-defined streams in a simple, yet efficient, way.
+
+
It was created by a group of FFmpeg and MPlayer developers in 2003
+and was finalized in 2008.
+
+
The official nut specification is at svn://svn.mplayerhq.hu/nut
+In case of any differences between this text and the official specification,
+the official specification shall prevail.
+
+
+
+
NUT has some variants signaled by using the flags field in its main header.
+
+
+BROADCAST Extend the syncpoint to report the sender wallclock
+PIPE Omit completely the syncpoint
+
+
+
+
2.1 BROADCAST# TOC
+
+
The BROADCAST variant provides a secondary time reference to facilitate
+detecting endpoint latency and network delays.
+It assumes all the endpoint clocks are syncronized.
+To be used in real-time scenarios.
+
+
+
2.2 PIPE# TOC
+
+
The PIPE variant assumes NUT is used as non-seekable intermediate container,
+by not using syncpoint removes unneeded overhead and reduces the overall
+memory usage.
+
+
+
3 Container-specific codec tags# TOC
+
+
+
3.1 Generic raw YUVA formats# TOC
+
+
Since many exotic planar YUVA pixel formats are not considered by
+the AVI/QuickTime FourCC lists, the following scheme is adopted for
+representing them.
+
+
The first two bytes can contain the values:
+Y1 = only Y
+Y2 = Y+A
+Y3 = YUV
+Y4 = YUVA
+
+
The third byte represents the width and height chroma subsampling
+values for the UV planes, that is the amount to shift the luma
+width/height right to find the chroma width/height.
+
+
The fourth byte is the number of bits used (8, 16, ...).
+
+
If the order of bytes is inverted, that means that each component has
+to be read big-endian.
+
+
+
3.2 Raw Audio# TOC
+
+
+ALAW A-LAW
+ULAW MU-LAW
+P<type><interleaving><bits> little-endian PCM
+<bits><interleaving><type>P big-endian PCM
+
+
+
<type> is S for signed integer, U for unsigned integer, F for IEEE float
+<interleaving> is D for default, P is for planar.
+<bits> is 8/16/24/32
+
+
+
PFD[32] would for example be signed 32 bit little-endian IEEE float
+
+
+
+
3.3 Subtitles# TOC
+
+
+UTF8 Raw UTF-8
+SSA[0] SubStation Alpha
+DVDS DVD subtitles
+DVBS DVB subtitles
+
+
+
+
3.4 Raw Data# TOC
+
+
+
+
+
3.5 Codecs# TOC
+
+
+3IV1 non-compliant MPEG-4 generated by old 3ivx
+ASV1 Asus Video
+ASV2 Asus Video 2
+CVID Cinepak
+CYUV Creative YUV
+DIVX non-compliant MPEG-4 generated by old DivX
+DUCK Truemotion 1
+FFV1 FFmpeg video 1
+FFVH FFmpeg Huffyuv
+H261 ITU H.261
+H262 ITU H.262
+H263 ITU H.263
+H264 ITU H.264
+HFYU Huffyuv
+I263 Intel H.263
+IV31 Indeo 3.1
+IV32 Indeo 3.2
+IV50 Indeo 5.0
+LJPG ITU JPEG (lossless)
+MJLS ITU JPEG-LS
+MJPG ITU JPEG
+MPG4 MS MPEG-4v1 (not ISO MPEG-4)
+MP42 MS MPEG-4v2
+MP43 MS MPEG-4v3
+MP4V ISO MPEG-4 Part 2 Video (from old encoders)
+mpg1 ISO MPEG-1 Video
+mpg2 ISO MPEG-2 Video
+MRLE MS RLE
+MSVC MS Video 1
+RT21 Indeo 2.1
+RV10 RealVideo 1.0
+RV20 RealVideo 2.0
+RV30 RealVideo 3.0
+RV40 RealVideo 4.0
+SNOW FFmpeg Snow
+SVQ1 Sorenson Video 1
+SVQ3 Sorenson Video 3
+theo Xiph Theora
+TM20 Truemotion 2.0
+UMP4 non-compliant MPEG-4 generated by UB Video MPEG-4
+VCR1 ATI VCR1
+VP30 VP 3.0
+VP31 VP 3.1
+VP50 VP 5.0
+VP60 VP 6.0
+VP61 VP 6.1
+VP62 VP 6.2
+VP70 VP 7.0
+WMV1 MS WMV7
+WMV2 MS WMV8
+WMV3 MS WMV9
+WV1F non-compliant MPEG-4 generated by ?
+WVC1 VC-1
+XVID non-compliant MPEG-4 generated by old Xvid
+XVIX non-compliant MPEG-4 generated by old Xvid with interlacing bug
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/doc/platform.html b/Externals/ffmpeg/dev/doc/platform.html
new file mode 100644
index 0000000000..fb57926de2
--- /dev/null
+++ b/Externals/ffmpeg/dev/doc/platform.html
@@ -0,0 +1,447 @@
+
+
+
+
+
+
+ Platform Specific Information
+
+
+
+
+
+
+
+
+ Platform Specific Information
+
+
+
+
+
+
+
+
Table of Contents
+
+
+
+
+
+
1 Unix-like# TOC
+
+
Some parts of FFmpeg cannot be built with version 2.15 of the GNU
+assembler which is still provided by a few AMD64 distributions. To
+make sure your compiler really uses the required version of gas
+after a binutils upgrade, run:
+
+
+
$(gcc -print-prog-name=as) --version
+
+
+
If not, then you should install a different compiler that has no
+hard-coded path to gas. In the worst case pass --disable-asm
+to configure.
+
+
+
1.1 Advanced linking configuration# TOC
+
+
If you compiled FFmpeg libraries statically and you want to use them to
+build your own shared library, you may need to force PIC support (with
+--enable-pic
during FFmpeg configure) and add the following option
+to your project LDFLAGS:
+
+
+
+
If your target platform requires position independent binaries, you should
+pass the correct linking flag (e.g. -pie
) to --extra-ldexeflags
.
+
+
+
+
+
BSD make will not build FFmpeg, you need to install and use GNU Make
+(gmake
).
+
+
+
1.3 (Open)Solaris# TOC
+
+
GNU Make is required to build FFmpeg, so you have to invoke (gmake
),
+standard Solaris Make will not work. When building with a non-c99 front-end
+(gcc, generic suncc) add either --extra-libs=/usr/lib/values-xpg6.o
+or --extra-libs=/usr/lib/64/values-xpg6.o
to the configure options
+since the libc is not c99-compliant by default. The probes performed by
+configure may raise an exception leading to the death of configure itself
+due to a bug in the system shell. Simply invoke a different shell such as
+bash directly to work around this:
+
+
+
+
+
1.4 Darwin (Mac OS X, iPhone)# TOC
+
+
The toolchain provided with Xcode is sufficient to build the basic
+unacelerated code.
+
+
Mac OS X on PowerPC or ARM (iPhone) requires a preprocessor from
+https://github.com/FFmpeg/gas-preprocessor or
+https://github.com/yuvi/gas-preprocessor (currently outdated) to build the optimized
+assembly functions. Put the Perl script somewhere
+in your PATH, FFmpeg’s configure will pick it up automatically.
+
+
Mac OS X on amd64 and x86 requires yasm
to build most of the
+optimized assembly functions. Fink ,
+Gentoo Prefix ,
+Homebrew
+or MacPorts can easily provide it.
+
+
+
+
+
+
Using a cross-compiler is preferred for various reasons.
+http://www.delorie.com/howto/djgpp/linux-x-djgpp.html
+
+
+
+
+
+
For information about compiling FFmpeg on OS/2 see
+http://www.edm2.com/index.php/FFmpeg .
+
+
+
+
4 Windows# TOC
+
+
To get help and instructions for building FFmpeg under Windows, check out
+the FFmpeg Windows Help Forum at http://ffmpeg.zeranoe.com/forum/ .
+
+
+
4.1 Native Windows compilation using MinGW or MinGW-w64# TOC
+
+
FFmpeg can be built to run natively on Windows using the MinGW or MinGW-w64
+toolchains. Install the latest versions of MSYS and MinGW or MinGW-w64 from
+http://www.mingw.org/ or http://mingw-w64.sourceforge.net/ .
+You can find detailed installation instructions in the download section and
+the FAQ.
+
+
Notes:
+
+
+ Building natively using MSYS can be sped up by disabling implicit rules
+in the Makefile by calling make -r
instead of plain make
. This
+speed up is close to non-existent for normal one-off builds and is only
+noticeable when running make for a second time (for example during
+make install
).
+
+ In order to compile FFplay, you must have the MinGW development library
+of SDL and pkg-config
installed.
+
+ By using ./configure --enable-shared
when configuring FFmpeg,
+you can build the FFmpeg libraries (e.g. libavutil, libavcodec,
+libavformat) as DLLs.
+
+
+
+
+
4.2 Microsoft Visual C++ or Intel C++ Compiler for Windows# TOC
+
+
FFmpeg can be built with MSVC 2012 or earlier using a C99-to-C89 conversion utility
+and wrapper, or with MSVC 2013 and ICL natively.
+
+
You will need the following prerequisites:
+
+
+
+
To set up a proper environment in MSYS, you need to run msys.bat
from
+the Visual Studio or Intel Compiler command prompt.
+
+
Place yasm.exe
somewhere in your PATH
. If using MSVC 2012 or
+earlier, place c99wrap.exe
and c99conv.exe
somewhere in your
+PATH
as well.
+
+
Next, make sure any other headers and libs you want to use, such as zlib, are
+located in a spot that the compiler can see. Do so by modifying the LIB
+and INCLUDE
environment variables to include the Windows-style
+paths to these directories. Alternatively, you can try and use the
+--extra-cflags
/--extra-ldflags
configure options. If using MSVC
+2012 or earlier, place inttypes.h
somewhere the compiler can see too.
+
+
Finally, run:
+
+
+
For MSVC:
+./configure --toolchain=msvc
+
+For ICL:
+./configure --toolchain=icl
+
+make
+make install
+
+
+
If you wish to compile shared libraries, add --enable-shared
to your
+configure options. Note that due to the way MSVC and ICL handle DLL imports and
+exports, you cannot compile static and shared libraries at the same time, and
+enabling shared libraries will automatically disable the static ones.
+
+
Notes:
+
+
+
+
+
4.2.1 Linking to FFmpeg with Microsoft Visual C++# TOC
+
+
If you plan to link with MSVC-built static libraries, you will need
+to make sure you have Runtime Library
set to
+Multi-threaded (/MT)
in your project’s settings.
+
+
You will need to define inline
to something MSVC understands:
+
+
#define inline __inline
+
+
+
Also note, that as stated in Microsoft Visual C++ , you will need
+an MSVC-compatible inttypes.h .
+
+
If you plan on using import libraries created by dlltool, you must
+set References
to No (/OPT:NOREF)
under the linker optimization
+settings, otherwise the resulting binaries will fail during runtime.
+This is not required when using import libraries generated by lib.exe
.
+This issue is reported upstream at
+http://sourceware.org/bugzilla/show_bug.cgi?id=12633 .
+
+
To create import libraries that work with the /OPT:REF
option
+(which is enabled by default in Release mode), follow these steps:
+
+
+ Open the Visual Studio Command Prompt .
+
+Alternatively, in a normal command line prompt, call vcvars32.bat
+which sets up the environment variables for the Visual C++ tools
+(the standard location for this file is something like
+C:\Program Files (x86_\Microsoft Visual Studio 10.0\VC\bin\vcvars32.bat ).
+
+ Enter the bin directory where the created LIB and DLL files
+are stored.
+
+ Generate new import libraries with lib.exe
:
+
+
+
lib /machine:i386 /def:..\lib\foo-version.def /out:foo.lib
+
+
+Replace foo-version
and foo
with the respective library names.
+
+
+
+
+
4.3 Cross compilation for Windows with Linux# TOC
+
+
You must use the MinGW cross compilation tools available at
+http://www.mingw.org/ .
+
+
Then configure FFmpeg with the following options:
+
+
./configure --target-os=mingw32 --cross-prefix=i386-mingw32msvc-
+
+
(you can change the cross-prefix according to the prefix chosen for the
+MinGW tools).
+
+
Then you can easily test FFmpeg with Wine .
+
+
+
4.4 Compilation under Cygwin# TOC
+
+
Please use Cygwin 1.7.x as the obsolete 1.5.x Cygwin versions lack
+llrint() in its C library.
+
+
Install your Cygwin with all the "Base" packages, plus the
+following "Devel" ones:
+
+
binutils, gcc4-core, make, git, mingw-runtime, texinfo
+
+
+
In order to run FATE you will also need the following "Utils" packages:
+
+
+
If you want to build FFmpeg with additional libraries, download Cygwin
+"Devel" packages for Ogg and Vorbis from any Cygwin packages repository:
+
+
libogg-devel, libvorbis-devel
+
+
+
These library packages are only available from
+Cygwin Ports :
+
+
+
yasm, libSDL-devel, libfaac-devel, libaacplus-devel, libgsm-devel, libmp3lame-devel,
+libschroedinger1.0-devel, speex-devel, libtheora-devel, libxvidcore-devel
+
+
+
The recommendation for x264 is to build it from source, as it evolves too
+quickly for Cygwin Ports to be up to date.
+
+
+
4.5 Crosscompilation for Windows under Cygwin# TOC
+
+
With Cygwin you can create Windows binaries that do not need the cygwin1.dll.
+
+
Just install your Cygwin as explained before, plus these additional
+"Devel" packages:
+
+
gcc-mingw-core, mingw-runtime, mingw-zlib
+
+
+
and add some special flags to your configure invocation.
+
+
For a static build run
+
+
./configure --target-os=mingw32 --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
+
+
+
and for a build with shared libraries
+
+
./configure --target-os=mingw32 --enable-shared --disable-static --extra-cflags=-mno-cygwin --extra-libs=-mno-cygwin
+
+
+
+
5 Plan 9# TOC
+
+
The native Plan 9 compiler
+does not implement all the C99 features needed by FFmpeg so the gcc
+port must be used. Furthermore, a few items missing from the C
+library and shell environment need to be fixed.
+
+
+ GNU awk, grep, make, and sed
+
+Working packages of these tools can be found at
+ports2plan9 .
+They can be installed with 9front’s pkg
+utility by setting pkgpath
to
+http://ports2plan9.googlecode.com/files/
.
+
+ Missing/broken head
and printf
commands
+
+Replacements adequate for building FFmpeg can be found in the
+compat/plan9
directory. Place these somewhere they will be
+found by the shell. These are not full implementations of the
+commands and are not suitable for general use.
+
+ Missing C99 stdint.h
and inttypes.h
+
+Replacement headers are available from
+http://code.google.com/p/plan9front/issues/detail?id=152 .
+
+ Missing or non-standard library functions
+
+Some functions in the C library are missing or incomplete. The
+gcc-apelibs-1207
package from
+ports2plan9
+includes an updated C library, but installing the full package gives
+unusable executables. Instead, keep the files from gccbin.tgz
+under /386/lib/gnu
. From the libc.a
archive in the
+gcc-apelibs-1207
package, extract the following object files and
+turn them into a library:
+
+
+ strerror.o
+ strtoll.o
+ snprintf.o
+ vsnprintf.o
+ vfprintf.o
+ _IO_getc.o
+ _IO_putc.o
+
+
+Use the --extra-libs
option of configure
to inform the
+build system of this library.
+
+ FPU exceptions enabled by default
+
+Unlike most other systems, Plan 9 enables FPU exceptions by default.
+These must be disabled before calling any FFmpeg functions. While the
+included tools will do this automatically, other users of the
+libraries must do it themselves.
+
+
+
+
+
+ This document was generated on January 14, 2015 using makeinfo .
+
+
+
+
diff --git a/Externals/ffmpeg/dev/include/libavcodec/avcodec.h b/Externals/ffmpeg/dev/include/libavcodec/avcodec.h
new file mode 100644
index 0000000000..99467bb069
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavcodec/avcodec.h
@@ -0,0 +1,5262 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVCODEC_H
+#define AVCODEC_AVCODEC_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec external API header
+ */
+
+#include
+#include "libavutil/samplefmt.h"
+#include "libavutil/attributes.h"
+#include "libavutil/avutil.h"
+#include "libavutil/buffer.h"
+#include "libavutil/cpu.h"
+#include "libavutil/channel_layout.h"
+#include "libavutil/dict.h"
+#include "libavutil/frame.h"
+#include "libavutil/log.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/rational.h"
+
+#include "version.h"
+
+/**
+ * @defgroup libavc Encoding/Decoding Library
+ * @{
+ *
+ * @defgroup lavc_decoding Decoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_encoding Encoding
+ * @{
+ * @}
+ *
+ * @defgroup lavc_codec Codecs
+ * @{
+ * @defgroup lavc_codec_native Native Codecs
+ * @{
+ * @}
+ * @defgroup lavc_codec_wrappers External library wrappers
+ * @{
+ * @}
+ * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge
+ * @{
+ * @}
+ * @}
+ * @defgroup lavc_internal Internal
+ * @{
+ * @}
+ * @}
+ *
+ */
+
+/**
+ * @defgroup lavc_core Core functions/structures.
+ * @ingroup libavc
+ *
+ * Basic definitions, functions for querying libavcodec capabilities,
+ * allocating core structures, etc.
+ * @{
+ */
+
+
+/**
+ * Identify the syntax and semantics of the bitstream.
+ * The principle is roughly:
+ * Two decoders with the same ID can decode the same streams.
+ * Two encoders with the same ID can encode compatible streams.
+ * There may be slight deviations from the principle due to implementation
+ * details.
+ *
+ * If you add a codec ID to this list, add it so that
+ * 1. no value of a existing codec ID changes (that would break ABI),
+ * 2. Give it a value which when taken as ASCII is recognized uniquely by a human as this specific codec.
+ * This ensures that 2 forks can independently add AVCodecIDs without producing conflicts.
+ *
+ * After adding new codec IDs, do not forget to add an entry to the codec
+ * descriptor list and bump libavcodec minor version.
+ */
+enum AVCodecID {
+ AV_CODEC_ID_NONE,
+
+ /* video codecs */
+ AV_CODEC_ID_MPEG1VIDEO,
+ AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
+#if FF_API_XVMC
+ AV_CODEC_ID_MPEG2VIDEO_XVMC,
+#endif /* FF_API_XVMC */
+ AV_CODEC_ID_H261,
+ AV_CODEC_ID_H263,
+ AV_CODEC_ID_RV10,
+ AV_CODEC_ID_RV20,
+ AV_CODEC_ID_MJPEG,
+ AV_CODEC_ID_MJPEGB,
+ AV_CODEC_ID_LJPEG,
+ AV_CODEC_ID_SP5X,
+ AV_CODEC_ID_JPEGLS,
+ AV_CODEC_ID_MPEG4,
+ AV_CODEC_ID_RAWVIDEO,
+ AV_CODEC_ID_MSMPEG4V1,
+ AV_CODEC_ID_MSMPEG4V2,
+ AV_CODEC_ID_MSMPEG4V3,
+ AV_CODEC_ID_WMV1,
+ AV_CODEC_ID_WMV2,
+ AV_CODEC_ID_H263P,
+ AV_CODEC_ID_H263I,
+ AV_CODEC_ID_FLV1,
+ AV_CODEC_ID_SVQ1,
+ AV_CODEC_ID_SVQ3,
+ AV_CODEC_ID_DVVIDEO,
+ AV_CODEC_ID_HUFFYUV,
+ AV_CODEC_ID_CYUV,
+ AV_CODEC_ID_H264,
+ AV_CODEC_ID_INDEO3,
+ AV_CODEC_ID_VP3,
+ AV_CODEC_ID_THEORA,
+ AV_CODEC_ID_ASV1,
+ AV_CODEC_ID_ASV2,
+ AV_CODEC_ID_FFV1,
+ AV_CODEC_ID_4XM,
+ AV_CODEC_ID_VCR1,
+ AV_CODEC_ID_CLJR,
+ AV_CODEC_ID_MDEC,
+ AV_CODEC_ID_ROQ,
+ AV_CODEC_ID_INTERPLAY_VIDEO,
+ AV_CODEC_ID_XAN_WC3,
+ AV_CODEC_ID_XAN_WC4,
+ AV_CODEC_ID_RPZA,
+ AV_CODEC_ID_CINEPAK,
+ AV_CODEC_ID_WS_VQA,
+ AV_CODEC_ID_MSRLE,
+ AV_CODEC_ID_MSVIDEO1,
+ AV_CODEC_ID_IDCIN,
+ AV_CODEC_ID_8BPS,
+ AV_CODEC_ID_SMC,
+ AV_CODEC_ID_FLIC,
+ AV_CODEC_ID_TRUEMOTION1,
+ AV_CODEC_ID_VMDVIDEO,
+ AV_CODEC_ID_MSZH,
+ AV_CODEC_ID_ZLIB,
+ AV_CODEC_ID_QTRLE,
+ AV_CODEC_ID_TSCC,
+ AV_CODEC_ID_ULTI,
+ AV_CODEC_ID_QDRAW,
+ AV_CODEC_ID_VIXL,
+ AV_CODEC_ID_QPEG,
+ AV_CODEC_ID_PNG,
+ AV_CODEC_ID_PPM,
+ AV_CODEC_ID_PBM,
+ AV_CODEC_ID_PGM,
+ AV_CODEC_ID_PGMYUV,
+ AV_CODEC_ID_PAM,
+ AV_CODEC_ID_FFVHUFF,
+ AV_CODEC_ID_RV30,
+ AV_CODEC_ID_RV40,
+ AV_CODEC_ID_VC1,
+ AV_CODEC_ID_WMV3,
+ AV_CODEC_ID_LOCO,
+ AV_CODEC_ID_WNV1,
+ AV_CODEC_ID_AASC,
+ AV_CODEC_ID_INDEO2,
+ AV_CODEC_ID_FRAPS,
+ AV_CODEC_ID_TRUEMOTION2,
+ AV_CODEC_ID_BMP,
+ AV_CODEC_ID_CSCD,
+ AV_CODEC_ID_MMVIDEO,
+ AV_CODEC_ID_ZMBV,
+ AV_CODEC_ID_AVS,
+ AV_CODEC_ID_SMACKVIDEO,
+ AV_CODEC_ID_NUV,
+ AV_CODEC_ID_KMVC,
+ AV_CODEC_ID_FLASHSV,
+ AV_CODEC_ID_CAVS,
+ AV_CODEC_ID_JPEG2000,
+ AV_CODEC_ID_VMNC,
+ AV_CODEC_ID_VP5,
+ AV_CODEC_ID_VP6,
+ AV_CODEC_ID_VP6F,
+ AV_CODEC_ID_TARGA,
+ AV_CODEC_ID_DSICINVIDEO,
+ AV_CODEC_ID_TIERTEXSEQVIDEO,
+ AV_CODEC_ID_TIFF,
+ AV_CODEC_ID_GIF,
+ AV_CODEC_ID_DXA,
+ AV_CODEC_ID_DNXHD,
+ AV_CODEC_ID_THP,
+ AV_CODEC_ID_SGI,
+ AV_CODEC_ID_C93,
+ AV_CODEC_ID_BETHSOFTVID,
+ AV_CODEC_ID_PTX,
+ AV_CODEC_ID_TXD,
+ AV_CODEC_ID_VP6A,
+ AV_CODEC_ID_AMV,
+ AV_CODEC_ID_VB,
+ AV_CODEC_ID_PCX,
+ AV_CODEC_ID_SUNRAST,
+ AV_CODEC_ID_INDEO4,
+ AV_CODEC_ID_INDEO5,
+ AV_CODEC_ID_MIMIC,
+ AV_CODEC_ID_RL2,
+ AV_CODEC_ID_ESCAPE124,
+ AV_CODEC_ID_DIRAC,
+ AV_CODEC_ID_BFI,
+ AV_CODEC_ID_CMV,
+ AV_CODEC_ID_MOTIONPIXELS,
+ AV_CODEC_ID_TGV,
+ AV_CODEC_ID_TGQ,
+ AV_CODEC_ID_TQI,
+ AV_CODEC_ID_AURA,
+ AV_CODEC_ID_AURA2,
+ AV_CODEC_ID_V210X,
+ AV_CODEC_ID_TMV,
+ AV_CODEC_ID_V210,
+ AV_CODEC_ID_DPX,
+ AV_CODEC_ID_MAD,
+ AV_CODEC_ID_FRWU,
+ AV_CODEC_ID_FLASHSV2,
+ AV_CODEC_ID_CDGRAPHICS,
+ AV_CODEC_ID_R210,
+ AV_CODEC_ID_ANM,
+ AV_CODEC_ID_BINKVIDEO,
+ AV_CODEC_ID_IFF_ILBM,
+ AV_CODEC_ID_IFF_BYTERUN1,
+ AV_CODEC_ID_KGV1,
+ AV_CODEC_ID_YOP,
+ AV_CODEC_ID_VP8,
+ AV_CODEC_ID_PICTOR,
+ AV_CODEC_ID_ANSI,
+ AV_CODEC_ID_A64_MULTI,
+ AV_CODEC_ID_A64_MULTI5,
+ AV_CODEC_ID_R10K,
+ AV_CODEC_ID_MXPEG,
+ AV_CODEC_ID_LAGARITH,
+ AV_CODEC_ID_PRORES,
+ AV_CODEC_ID_JV,
+ AV_CODEC_ID_DFA,
+ AV_CODEC_ID_WMV3IMAGE,
+ AV_CODEC_ID_VC1IMAGE,
+ AV_CODEC_ID_UTVIDEO,
+ AV_CODEC_ID_BMV_VIDEO,
+ AV_CODEC_ID_VBLE,
+ AV_CODEC_ID_DXTORY,
+ AV_CODEC_ID_V410,
+ AV_CODEC_ID_XWD,
+ AV_CODEC_ID_CDXL,
+ AV_CODEC_ID_XBM,
+ AV_CODEC_ID_ZEROCODEC,
+ AV_CODEC_ID_MSS1,
+ AV_CODEC_ID_MSA1,
+ AV_CODEC_ID_TSCC2,
+ AV_CODEC_ID_MTS2,
+ AV_CODEC_ID_CLLC,
+ AV_CODEC_ID_MSS2,
+ AV_CODEC_ID_VP9,
+ AV_CODEC_ID_AIC,
+ AV_CODEC_ID_ESCAPE130_DEPRECATED,
+ AV_CODEC_ID_G2M_DEPRECATED,
+ AV_CODEC_ID_WEBP_DEPRECATED,
+ AV_CODEC_ID_HNM4_VIDEO,
+ AV_CODEC_ID_HEVC_DEPRECATED,
+ AV_CODEC_ID_FIC,
+ AV_CODEC_ID_ALIAS_PIX,
+ AV_CODEC_ID_BRENDER_PIX_DEPRECATED,
+ AV_CODEC_ID_PAF_VIDEO_DEPRECATED,
+ AV_CODEC_ID_EXR_DEPRECATED,
+ AV_CODEC_ID_VP7_DEPRECATED,
+ AV_CODEC_ID_SANM_DEPRECATED,
+ AV_CODEC_ID_SGIRLE_DEPRECATED,
+ AV_CODEC_ID_MVC1_DEPRECATED,
+ AV_CODEC_ID_MVC2_DEPRECATED,
+
+ AV_CODEC_ID_BRENDER_PIX= MKBETAG('B','P','I','X'),
+ AV_CODEC_ID_Y41P = MKBETAG('Y','4','1','P'),
+ AV_CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'),
+ AV_CODEC_ID_EXR = MKBETAG('0','E','X','R'),
+ AV_CODEC_ID_AVRP = MKBETAG('A','V','R','P'),
+
+ AV_CODEC_ID_012V = MKBETAG('0','1','2','V'),
+ AV_CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'),
+ AV_CODEC_ID_AVUI = MKBETAG('A','V','U','I'),
+ AV_CODEC_ID_AYUV = MKBETAG('A','Y','U','V'),
+ AV_CODEC_ID_TARGA_Y216 = MKBETAG('T','2','1','6'),
+ AV_CODEC_ID_V308 = MKBETAG('V','3','0','8'),
+ AV_CODEC_ID_V408 = MKBETAG('V','4','0','8'),
+ AV_CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'),
+ AV_CODEC_ID_SANM = MKBETAG('S','A','N','M'),
+ AV_CODEC_ID_PAF_VIDEO = MKBETAG('P','A','F','V'),
+ AV_CODEC_ID_AVRN = MKBETAG('A','V','R','n'),
+ AV_CODEC_ID_CPIA = MKBETAG('C','P','I','A'),
+ AV_CODEC_ID_XFACE = MKBETAG('X','F','A','C'),
+ AV_CODEC_ID_SGIRLE = MKBETAG('S','G','I','R'),
+ AV_CODEC_ID_MVC1 = MKBETAG('M','V','C','1'),
+ AV_CODEC_ID_MVC2 = MKBETAG('M','V','C','2'),
+ AV_CODEC_ID_SNOW = MKBETAG('S','N','O','W'),
+ AV_CODEC_ID_WEBP = MKBETAG('W','E','B','P'),
+ AV_CODEC_ID_SMVJPEG = MKBETAG('S','M','V','J'),
+ AV_CODEC_ID_HEVC = MKBETAG('H','2','6','5'),
+#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC
+ AV_CODEC_ID_VP7 = MKBETAG('V','P','7','0'),
+ AV_CODEC_ID_APNG = MKBETAG('A','P','N','G'),
+
+ /* various PCM "codecs" */
+ AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
+ AV_CODEC_ID_PCM_S16LE = 0x10000,
+ AV_CODEC_ID_PCM_S16BE,
+ AV_CODEC_ID_PCM_U16LE,
+ AV_CODEC_ID_PCM_U16BE,
+ AV_CODEC_ID_PCM_S8,
+ AV_CODEC_ID_PCM_U8,
+ AV_CODEC_ID_PCM_MULAW,
+ AV_CODEC_ID_PCM_ALAW,
+ AV_CODEC_ID_PCM_S32LE,
+ AV_CODEC_ID_PCM_S32BE,
+ AV_CODEC_ID_PCM_U32LE,
+ AV_CODEC_ID_PCM_U32BE,
+ AV_CODEC_ID_PCM_S24LE,
+ AV_CODEC_ID_PCM_S24BE,
+ AV_CODEC_ID_PCM_U24LE,
+ AV_CODEC_ID_PCM_U24BE,
+ AV_CODEC_ID_PCM_S24DAUD,
+ AV_CODEC_ID_PCM_ZORK,
+ AV_CODEC_ID_PCM_S16LE_PLANAR,
+ AV_CODEC_ID_PCM_DVD,
+ AV_CODEC_ID_PCM_F32BE,
+ AV_CODEC_ID_PCM_F32LE,
+ AV_CODEC_ID_PCM_F64BE,
+ AV_CODEC_ID_PCM_F64LE,
+ AV_CODEC_ID_PCM_BLURAY,
+ AV_CODEC_ID_PCM_LXF,
+ AV_CODEC_ID_S302M,
+ AV_CODEC_ID_PCM_S8_PLANAR,
+ AV_CODEC_ID_PCM_S24LE_PLANAR_DEPRECATED,
+ AV_CODEC_ID_PCM_S32LE_PLANAR_DEPRECATED,
+ AV_CODEC_ID_PCM_S24LE_PLANAR = MKBETAG(24,'P','S','P'),
+ AV_CODEC_ID_PCM_S32LE_PLANAR = MKBETAG(32,'P','S','P'),
+ AV_CODEC_ID_PCM_S16BE_PLANAR = MKBETAG('P','S','P',16),
+
+ /* various ADPCM codecs */
+ AV_CODEC_ID_ADPCM_IMA_QT = 0x11000,
+ AV_CODEC_ID_ADPCM_IMA_WAV,
+ AV_CODEC_ID_ADPCM_IMA_DK3,
+ AV_CODEC_ID_ADPCM_IMA_DK4,
+ AV_CODEC_ID_ADPCM_IMA_WS,
+ AV_CODEC_ID_ADPCM_IMA_SMJPEG,
+ AV_CODEC_ID_ADPCM_MS,
+ AV_CODEC_ID_ADPCM_4XM,
+ AV_CODEC_ID_ADPCM_XA,
+ AV_CODEC_ID_ADPCM_ADX,
+ AV_CODEC_ID_ADPCM_EA,
+ AV_CODEC_ID_ADPCM_G726,
+ AV_CODEC_ID_ADPCM_CT,
+ AV_CODEC_ID_ADPCM_SWF,
+ AV_CODEC_ID_ADPCM_YAMAHA,
+ AV_CODEC_ID_ADPCM_SBPRO_4,
+ AV_CODEC_ID_ADPCM_SBPRO_3,
+ AV_CODEC_ID_ADPCM_SBPRO_2,
+ AV_CODEC_ID_ADPCM_THP,
+ AV_CODEC_ID_ADPCM_IMA_AMV,
+ AV_CODEC_ID_ADPCM_EA_R1,
+ AV_CODEC_ID_ADPCM_EA_R3,
+ AV_CODEC_ID_ADPCM_EA_R2,
+ AV_CODEC_ID_ADPCM_IMA_EA_SEAD,
+ AV_CODEC_ID_ADPCM_IMA_EA_EACS,
+ AV_CODEC_ID_ADPCM_EA_XAS,
+ AV_CODEC_ID_ADPCM_EA_MAXIS_XA,
+ AV_CODEC_ID_ADPCM_IMA_ISS,
+ AV_CODEC_ID_ADPCM_G722,
+ AV_CODEC_ID_ADPCM_IMA_APC,
+ AV_CODEC_ID_ADPCM_VIMA_DEPRECATED,
+ AV_CODEC_ID_ADPCM_VIMA = MKBETAG('V','I','M','A'),
+ AV_CODEC_ID_VIMA = MKBETAG('V','I','M','A'),
+ AV_CODEC_ID_ADPCM_AFC = MKBETAG('A','F','C',' '),
+ AV_CODEC_ID_ADPCM_IMA_OKI = MKBETAG('O','K','I',' '),
+ AV_CODEC_ID_ADPCM_DTK = MKBETAG('D','T','K',' '),
+ AV_CODEC_ID_ADPCM_IMA_RAD = MKBETAG('R','A','D',' '),
+ AV_CODEC_ID_ADPCM_G726LE = MKBETAG('6','2','7','G'),
+
+ /* AMR */
+ AV_CODEC_ID_AMR_NB = 0x12000,
+ AV_CODEC_ID_AMR_WB,
+
+ /* RealAudio codecs*/
+ AV_CODEC_ID_RA_144 = 0x13000,
+ AV_CODEC_ID_RA_288,
+
+ /* various DPCM codecs */
+ AV_CODEC_ID_ROQ_DPCM = 0x14000,
+ AV_CODEC_ID_INTERPLAY_DPCM,
+ AV_CODEC_ID_XAN_DPCM,
+ AV_CODEC_ID_SOL_DPCM,
+
+ /* audio codecs */
+ AV_CODEC_ID_MP2 = 0x15000,
+ AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
+ AV_CODEC_ID_AAC,
+ AV_CODEC_ID_AC3,
+ AV_CODEC_ID_DTS,
+ AV_CODEC_ID_VORBIS,
+ AV_CODEC_ID_DVAUDIO,
+ AV_CODEC_ID_WMAV1,
+ AV_CODEC_ID_WMAV2,
+ AV_CODEC_ID_MACE3,
+ AV_CODEC_ID_MACE6,
+ AV_CODEC_ID_VMDAUDIO,
+ AV_CODEC_ID_FLAC,
+ AV_CODEC_ID_MP3ADU,
+ AV_CODEC_ID_MP3ON4,
+ AV_CODEC_ID_SHORTEN,
+ AV_CODEC_ID_ALAC,
+ AV_CODEC_ID_WESTWOOD_SND1,
+ AV_CODEC_ID_GSM, ///< as in Berlin toast format
+ AV_CODEC_ID_QDM2,
+ AV_CODEC_ID_COOK,
+ AV_CODEC_ID_TRUESPEECH,
+ AV_CODEC_ID_TTA,
+ AV_CODEC_ID_SMACKAUDIO,
+ AV_CODEC_ID_QCELP,
+ AV_CODEC_ID_WAVPACK,
+ AV_CODEC_ID_DSICINAUDIO,
+ AV_CODEC_ID_IMC,
+ AV_CODEC_ID_MUSEPACK7,
+ AV_CODEC_ID_MLP,
+ AV_CODEC_ID_GSM_MS, /* as found in WAV */
+ AV_CODEC_ID_ATRAC3,
+#if FF_API_VOXWARE
+ AV_CODEC_ID_VOXWARE,
+#endif
+ AV_CODEC_ID_APE,
+ AV_CODEC_ID_NELLYMOSER,
+ AV_CODEC_ID_MUSEPACK8,
+ AV_CODEC_ID_SPEEX,
+ AV_CODEC_ID_WMAVOICE,
+ AV_CODEC_ID_WMAPRO,
+ AV_CODEC_ID_WMALOSSLESS,
+ AV_CODEC_ID_ATRAC3P,
+ AV_CODEC_ID_EAC3,
+ AV_CODEC_ID_SIPR,
+ AV_CODEC_ID_MP1,
+ AV_CODEC_ID_TWINVQ,
+ AV_CODEC_ID_TRUEHD,
+ AV_CODEC_ID_MP4ALS,
+ AV_CODEC_ID_ATRAC1,
+ AV_CODEC_ID_BINKAUDIO_RDFT,
+ AV_CODEC_ID_BINKAUDIO_DCT,
+ AV_CODEC_ID_AAC_LATM,
+ AV_CODEC_ID_QDMC,
+ AV_CODEC_ID_CELT,
+ AV_CODEC_ID_G723_1,
+ AV_CODEC_ID_G729,
+ AV_CODEC_ID_8SVX_EXP,
+ AV_CODEC_ID_8SVX_FIB,
+ AV_CODEC_ID_BMV_AUDIO,
+ AV_CODEC_ID_RALF,
+ AV_CODEC_ID_IAC,
+ AV_CODEC_ID_ILBC,
+ AV_CODEC_ID_OPUS_DEPRECATED,
+ AV_CODEC_ID_COMFORT_NOISE,
+ AV_CODEC_ID_TAK_DEPRECATED,
+ AV_CODEC_ID_METASOUND,
+ AV_CODEC_ID_PAF_AUDIO_DEPRECATED,
+ AV_CODEC_ID_ON2AVC,
+ AV_CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),
+ AV_CODEC_ID_SONIC = MKBETAG('S','O','N','C'),
+ AV_CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'),
+ AV_CODEC_ID_PAF_AUDIO = MKBETAG('P','A','F','A'),
+ AV_CODEC_ID_OPUS = MKBETAG('O','P','U','S'),
+ AV_CODEC_ID_TAK = MKBETAG('t','B','a','K'),
+ AV_CODEC_ID_EVRC = MKBETAG('s','e','v','c'),
+ AV_CODEC_ID_SMV = MKBETAG('s','s','m','v'),
+ AV_CODEC_ID_DSD_LSBF = MKBETAG('D','S','D','L'),
+ AV_CODEC_ID_DSD_MSBF = MKBETAG('D','S','D','M'),
+ AV_CODEC_ID_DSD_LSBF_PLANAR = MKBETAG('D','S','D','1'),
+ AV_CODEC_ID_DSD_MSBF_PLANAR = MKBETAG('D','S','D','8'),
+
+ /* subtitle codecs */
+ AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
+ AV_CODEC_ID_DVD_SUBTITLE = 0x17000,
+ AV_CODEC_ID_DVB_SUBTITLE,
+ AV_CODEC_ID_TEXT, ///< raw UTF-8 text
+ AV_CODEC_ID_XSUB,
+ AV_CODEC_ID_SSA,
+ AV_CODEC_ID_MOV_TEXT,
+ AV_CODEC_ID_HDMV_PGS_SUBTITLE,
+ AV_CODEC_ID_DVB_TELETEXT,
+ AV_CODEC_ID_SRT,
+ AV_CODEC_ID_MICRODVD = MKBETAG('m','D','V','D'),
+ AV_CODEC_ID_EIA_608 = MKBETAG('c','6','0','8'),
+ AV_CODEC_ID_JACOSUB = MKBETAG('J','S','U','B'),
+ AV_CODEC_ID_SAMI = MKBETAG('S','A','M','I'),
+ AV_CODEC_ID_REALTEXT = MKBETAG('R','T','X','T'),
+ AV_CODEC_ID_STL = MKBETAG('S','p','T','L'),
+ AV_CODEC_ID_SUBVIEWER1 = MKBETAG('S','b','V','1'),
+ AV_CODEC_ID_SUBVIEWER = MKBETAG('S','u','b','V'),
+ AV_CODEC_ID_SUBRIP = MKBETAG('S','R','i','p'),
+ AV_CODEC_ID_WEBVTT = MKBETAG('W','V','T','T'),
+ AV_CODEC_ID_MPL2 = MKBETAG('M','P','L','2'),
+ AV_CODEC_ID_VPLAYER = MKBETAG('V','P','l','r'),
+ AV_CODEC_ID_PJS = MKBETAG('P','h','J','S'),
+ AV_CODEC_ID_ASS = MKBETAG('A','S','S',' '), ///< ASS as defined in Matroska
+
+ /* other specific kind of codecs (generally used for attachments) */
+ AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
+ AV_CODEC_ID_TTF = 0x18000,
+ AV_CODEC_ID_BINTEXT = MKBETAG('B','T','X','T'),
+ AV_CODEC_ID_XBIN = MKBETAG('X','B','I','N'),
+ AV_CODEC_ID_IDF = MKBETAG( 0 ,'I','D','F'),
+ AV_CODEC_ID_OTF = MKBETAG( 0 ,'O','T','F'),
+ AV_CODEC_ID_SMPTE_KLV = MKBETAG('K','L','V','A'),
+ AV_CODEC_ID_DVD_NAV = MKBETAG('D','N','A','V'),
+ AV_CODEC_ID_TIMED_ID3 = MKBETAG('T','I','D','3'),
+ AV_CODEC_ID_BIN_DATA = MKBETAG('D','A','T','A'),
+
+
+ AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it
+
+ AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
+ * stream (only used by libavformat) */
+ AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information.
+
+#if FF_API_CODEC_ID
+#include "old_codec_ids.h"
+#endif
+};
+
+/**
+ * This struct describes the properties of a single codec described by an
+ * AVCodecID.
+ * @see avcodec_descriptor_get()
+ */
+typedef struct AVCodecDescriptor {
+ enum AVCodecID id;
+ enum AVMediaType type;
+ /**
+ * Name of the codec described by this descriptor. It is non-empty and
+ * unique for each codec descriptor. It should contain alphanumeric
+ * characters and '_' only.
+ */
+ const char *name;
+ /**
+ * A more descriptive name for this codec. May be NULL.
+ */
+ const char *long_name;
+ /**
+ * Codec properties, a combination of AV_CODEC_PROP_* flags.
+ */
+ int props;
+
+ /**
+ * MIME type(s) associated with the codec.
+ * May be NULL; if not, a NULL-terminated array of MIME types.
+ * The first item is always non-NULL and is the preferred MIME type.
+ */
+ const char *const *mime_types;
+} AVCodecDescriptor;
+
+/**
+ * Codec uses only intra compression.
+ * Video codecs only.
+ */
+#define AV_CODEC_PROP_INTRA_ONLY (1 << 0)
+/**
+ * Codec supports lossy compression. Audio and video codecs only.
+ * @note a codec may support both lossy and lossless
+ * compression modes
+ */
+#define AV_CODEC_PROP_LOSSY (1 << 1)
+/**
+ * Codec supports lossless compression. Audio and video codecs only.
+ */
+#define AV_CODEC_PROP_LOSSLESS (1 << 2)
+/**
+ * Codec supports frame reordering. That is, the coded order (the order in which
+ * the encoded packets are output by the encoders / stored / input to the
+ * decoders) may be different from the presentation order of the corresponding
+ * frames.
+ *
+ * For codecs that do not have this property set, PTS and DTS should always be
+ * equal.
+ */
+#define AV_CODEC_PROP_REORDER (1 << 3)
+/**
+ * Subtitle codec is bitmap based
+ * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field.
+ */
+#define AV_CODEC_PROP_BITMAP_SUB (1 << 16)
+/**
+ * Subtitle codec is text based.
+ * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field.
+ */
+#define AV_CODEC_PROP_TEXT_SUB (1 << 17)
+
+/**
+ * @ingroup lavc_decoding
+ * Required number of additionally allocated bytes at the end of the input bitstream for decoding.
+ * This is mainly needed because some optimized bitstream readers read
+ * 32 or 64 bit at once and could read over the end.
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged
+ * MPEG bitstreams could cause overread and segfault.
+ */
+#define FF_INPUT_BUFFER_PADDING_SIZE 32
+
+/**
+ * @ingroup lavc_encoding
+ * minimum encoding buffer size
+ * Used to avoid some checks during header writing.
+ */
+#define FF_MIN_BUFFER_SIZE 16384
+
+
+/**
+ * @ingroup lavc_encoding
+ * motion estimation type.
+ */
+enum Motion_Est_ID {
+ ME_ZERO = 1, ///< no search, that is use 0,0 vector whenever one is needed
+ ME_FULL,
+ ME_LOG,
+ ME_PHODS,
+ ME_EPZS, ///< enhanced predictive zonal search
+ ME_X1, ///< reserved for experiments
+ ME_HEX, ///< hexagon based search
+ ME_UMH, ///< uneven multi-hexagon search
+ ME_TESA, ///< transformed exhaustive search algorithm
+ ME_ITER=50, ///< iterative search
+};
+
+/**
+ * @ingroup lavc_decoding
+ */
+enum AVDiscard{
+ /* We leave some space between them for extensions (drop some
+ * keyframes for intra-only or drop just some bidir frames). */
+ AVDISCARD_NONE =-16, ///< discard nothing
+ AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi
+ AVDISCARD_NONREF = 8, ///< discard all non reference
+ AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames
+ AVDISCARD_NONINTRA= 24, ///< discard all non intra frames
+ AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes
+ AVDISCARD_ALL = 48, ///< discard all
+};
+
+enum AVAudioServiceType {
+ AV_AUDIO_SERVICE_TYPE_MAIN = 0,
+ AV_AUDIO_SERVICE_TYPE_EFFECTS = 1,
+ AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2,
+ AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3,
+ AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4,
+ AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5,
+ AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6,
+ AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7,
+ AV_AUDIO_SERVICE_TYPE_KARAOKE = 8,
+ AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI
+};
+
+/**
+ * @ingroup lavc_encoding
+ */
+typedef struct RcOverride{
+ int start_frame;
+ int end_frame;
+ int qscale; // If this is 0 then quality_factor will be used instead.
+ float quality_factor;
+} RcOverride;
+
+#if FF_API_MAX_BFRAMES
+/**
+ * @deprecated there is no libavcodec-wide limit on the number of B-frames
+ */
+#define FF_MAX_B_FRAMES 16
+#endif
+
+/* encoding support
+ These flags can be passed in AVCodecContext.flags before initialization.
+ Note: Not everything is supported yet.
+*/
+
+/**
+ * Allow decoders to produce frames with data planes that are not aligned
+ * to CPU requirements (e.g. due to cropping).
+ */
+#define CODEC_FLAG_UNALIGNED 0x0001
+#define CODEC_FLAG_QSCALE 0x0002 ///< Use fixed qscale.
+#define CODEC_FLAG_4MV 0x0004 ///< 4 MV per MB allowed / advanced prediction for H.263.
+#define CODEC_FLAG_OUTPUT_CORRUPT 0x0008 ///< Output even those frames that might be corrupted
+#define CODEC_FLAG_QPEL 0x0010 ///< Use qpel MC.
+#if FF_API_GMC
+/**
+ * @deprecated use the "gmc" private option of the libxvid encoder
+ */
+#define CODEC_FLAG_GMC 0x0020 ///< Use GMC.
+#endif
+#if FF_API_MV0
+/**
+ * @deprecated use the flag "mv0" in the "mpv_flags" private option of the
+ * mpegvideo encoders
+ */
+#define CODEC_FLAG_MV0 0x0040
+#endif
+#if FF_API_INPUT_PRESERVED
+/**
+ * @deprecated passing reference-counted frames to the encoders replaces this
+ * flag
+ */
+#define CODEC_FLAG_INPUT_PRESERVED 0x0100
+#endif
+#define CODEC_FLAG_PASS1 0x0200 ///< Use internal 2pass ratecontrol in first pass mode.
+#define CODEC_FLAG_PASS2 0x0400 ///< Use internal 2pass ratecontrol in second pass mode.
+#define CODEC_FLAG_GRAY 0x2000 ///< Only decode/encode grayscale.
+#if FF_API_EMU_EDGE
+/**
+ * @deprecated edges are not used/required anymore. I.e. this flag is now always
+ * set.
+ */
+#define CODEC_FLAG_EMU_EDGE 0x4000
+#endif
+#define CODEC_FLAG_PSNR 0x8000 ///< error[?] variables will be set during encoding.
+#define CODEC_FLAG_TRUNCATED 0x00010000 /** Input bitstream might be truncated at a random
+ location instead of only at frame boundaries. */
+#if FF_API_NORMALIZE_AQP
+/**
+ * @deprecated use the flag "naq" in the "mpv_flags" private option of the
+ * mpegvideo encoders
+ */
+#define CODEC_FLAG_NORMALIZE_AQP 0x00020000
+#endif
+#define CODEC_FLAG_INTERLACED_DCT 0x00040000 ///< Use interlaced DCT.
+#define CODEC_FLAG_LOW_DELAY 0x00080000 ///< Force low delay.
+#define CODEC_FLAG_GLOBAL_HEADER 0x00400000 ///< Place global headers in extradata instead of every keyframe.
+#define CODEC_FLAG_BITEXACT 0x00800000 ///< Use only bitexact stuff (except (I)DCT).
+/* Fx : Flag for h263+ extra options */
+#define CODEC_FLAG_AC_PRED 0x01000000 ///< H.263 advanced intra coding / MPEG-4 AC prediction
+#define CODEC_FLAG_LOOP_FILTER 0x00000800 ///< loop filter
+#define CODEC_FLAG_INTERLACED_ME 0x20000000 ///< interlaced motion estimation
+#define CODEC_FLAG_CLOSED_GOP 0x80000000
+#define CODEC_FLAG2_FAST 0x00000001 ///< Allow non spec compliant speedup tricks.
+#define CODEC_FLAG2_NO_OUTPUT 0x00000004 ///< Skip bitstream encoding.
+#define CODEC_FLAG2_LOCAL_HEADER 0x00000008 ///< Place global headers at every keyframe instead of in extradata.
+#define CODEC_FLAG2_DROP_FRAME_TIMECODE 0x00002000 ///< timecode is in drop frame format. DEPRECATED!!!!
+#define CODEC_FLAG2_IGNORE_CROP 0x00010000 ///< Discard cropping information from SPS.
+
+#define CODEC_FLAG2_CHUNKS 0x00008000 ///< Input bitstream might be truncated at a packet boundaries instead of only at frame boundaries.
+#define CODEC_FLAG2_SHOW_ALL 0x00400000 ///< Show all frames before the first keyframe
+#define CODEC_FLAG2_EXPORT_MVS 0x10000000 ///< Export motion vectors through frame side data
+#define CODEC_FLAG2_SKIP_MANUAL 0x20000000 ///< Do not skip samples and export skip information as frame side data
+
+/* Unsupported options :
+ * Syntax Arithmetic coding (SAC)
+ * Reference Picture Selection
+ * Independent Segment Decoding */
+/* /Fx */
+/* codec capabilities */
+
+#define CODEC_CAP_DRAW_HORIZ_BAND 0x0001 ///< Decoder can use draw_horiz_band callback.
+/**
+ * Codec uses get_buffer() for allocating buffers and supports custom allocators.
+ * If not set, it might not use get_buffer() at all or use operations that
+ * assume the buffer was allocated by avcodec_default_get_buffer.
+ */
+#define CODEC_CAP_DR1 0x0002
+#define CODEC_CAP_TRUNCATED 0x0008
+#if FF_API_XVMC
+/* Codec can export data for HW decoding. This flag indicates that
+ * the codec would call get_format() with list that might contain HW accelerated
+ * pixel formats (XvMC, VDPAU, VAAPI, etc). The application can pick any of them
+ * including raw image format.
+ * The application can use the passed context to determine bitstream version,
+ * chroma format, resolution etc.
+ */
+#define CODEC_CAP_HWACCEL 0x0010
+#endif /* FF_API_XVMC */
+/**
+ * Encoder or decoder requires flushing with NULL input at the end in order to
+ * give the complete and correct output.
+ *
+ * NOTE: If this flag is not set, the codec is guaranteed to never be fed with
+ * with NULL data. The user can still send NULL data to the public encode
+ * or decode function, but libavcodec will not pass it along to the codec
+ * unless this flag is set.
+ *
+ * Decoders:
+ * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to get the delayed data until the decoder no longer
+ * returns frames.
+ *
+ * Encoders:
+ * The encoder needs to be fed with NULL data at the end of encoding until the
+ * encoder no longer returns data.
+ *
+ * NOTE: For encoders implementing the AVCodec.encode2() function, setting this
+ * flag also means that the encoder must set the pts and duration for
+ * each output packet. If this flag is not set, the pts and duration will
+ * be determined by libavcodec from the input frame.
+ */
+#define CODEC_CAP_DELAY 0x0020
+/**
+ * Codec can be fed a final frame with a smaller size.
+ * This can be used to prevent truncation of the last audio samples.
+ */
+#define CODEC_CAP_SMALL_LAST_FRAME 0x0040
+#if FF_API_CAP_VDPAU
+/**
+ * Codec can export data for HW decoding (VDPAU).
+ */
+#define CODEC_CAP_HWACCEL_VDPAU 0x0080
+#endif
+/**
+ * Codec can output multiple frames per AVPacket
+ * Normally demuxers return one frame at a time, demuxers which do not do
+ * are connected to a parser to split what they return into proper frames.
+ * This flag is reserved to the very rare category of codecs which have a
+ * bitstream that cannot be split into frames without timeconsuming
+ * operations like full decoding. Demuxers carring such bitstreams thus
+ * may return multiple frames in a packet. This has many disadvantages like
+ * prohibiting stream copy in many cases thus it should only be considered
+ * as a last resort.
+ */
+#define CODEC_CAP_SUBFRAMES 0x0100
+/**
+ * Codec is experimental and is thus avoided in favor of non experimental
+ * encoders
+ */
+#define CODEC_CAP_EXPERIMENTAL 0x0200
+/**
+ * Codec should fill in channel configuration and samplerate instead of container
+ */
+#define CODEC_CAP_CHANNEL_CONF 0x0400
+#if FF_API_NEG_LINESIZES
+/**
+ * @deprecated no codecs use this capability
+ */
+#define CODEC_CAP_NEG_LINESIZES 0x0800
+#endif
+/**
+ * Codec supports frame-level multithreading.
+ */
+#define CODEC_CAP_FRAME_THREADS 0x1000
+/**
+ * Codec supports slice-based (or partition-based) multithreading.
+ */
+#define CODEC_CAP_SLICE_THREADS 0x2000
+/**
+ * Codec supports changed parameters at any point.
+ */
+#define CODEC_CAP_PARAM_CHANGE 0x4000
+/**
+ * Codec supports avctx->thread_count == 0 (auto).
+ */
+#define CODEC_CAP_AUTO_THREADS 0x8000
+/**
+ * Audio encoder supports receiving a different number of samples in each call.
+ */
+#define CODEC_CAP_VARIABLE_FRAME_SIZE 0x10000
+/**
+ * Codec is intra only.
+ */
+#define CODEC_CAP_INTRA_ONLY 0x40000000
+/**
+ * Codec is lossless.
+ */
+#define CODEC_CAP_LOSSLESS 0x80000000
+
+#if FF_API_MB_TYPE
+//The following defines may change, don't expect compatibility if you use them.
+#define MB_TYPE_INTRA4x4 0x0001
+#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific
+#define MB_TYPE_INTRA_PCM 0x0004 //FIXME H.264-specific
+#define MB_TYPE_16x16 0x0008
+#define MB_TYPE_16x8 0x0010
+#define MB_TYPE_8x16 0x0020
+#define MB_TYPE_8x8 0x0040
+#define MB_TYPE_INTERLACED 0x0080
+#define MB_TYPE_DIRECT2 0x0100 //FIXME
+#define MB_TYPE_ACPRED 0x0200
+#define MB_TYPE_GMC 0x0400
+#define MB_TYPE_SKIP 0x0800
+#define MB_TYPE_P0L0 0x1000
+#define MB_TYPE_P1L0 0x2000
+#define MB_TYPE_P0L1 0x4000
+#define MB_TYPE_P1L1 0x8000
+#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0)
+#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1)
+#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1)
+#define MB_TYPE_QUANT 0x00010000
+#define MB_TYPE_CBP 0x00020000
+//Note bits 24-31 are reserved for codec specific use (h264 ref0, mpeg1 0mv, ...)
+#endif
+
+/**
+ * Pan Scan area.
+ * This specifies the area which should be displayed.
+ * Note there may be multiple such areas for one frame.
+ */
+typedef struct AVPanScan{
+ /**
+ * id
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int id;
+
+ /**
+ * width and height in 1/16 pel
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int width;
+ int height;
+
+ /**
+ * position of the top left corner in 1/16 pel for up to 3 fields/frames
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int16_t position[3][2];
+}AVPanScan;
+
+#if FF_API_QSCALE_TYPE
+#define FF_QSCALE_TYPE_MPEG1 0
+#define FF_QSCALE_TYPE_MPEG2 1
+#define FF_QSCALE_TYPE_H264 2
+#define FF_QSCALE_TYPE_VP56 3
+#endif
+
+#if FF_API_GET_BUFFER
+#define FF_BUFFER_TYPE_INTERNAL 1
+#define FF_BUFFER_TYPE_USER 2 ///< direct rendering buffers (image is (de)allocated by user)
+#define FF_BUFFER_TYPE_SHARED 4 ///< Buffer from somewhere else; don't deallocate image (data/base), all other tables are not shared.
+#define FF_BUFFER_TYPE_COPY 8 ///< Just a (modified) copy of some other buffer, don't deallocate anything.
+
+#define FF_BUFFER_HINTS_VALID 0x01 // Buffer hints value is meaningful (if 0 ignore).
+#define FF_BUFFER_HINTS_READABLE 0x02 // Codec will read from buffer.
+#define FF_BUFFER_HINTS_PRESERVE 0x04 // User must not alter buffer content.
+#define FF_BUFFER_HINTS_REUSABLE 0x08 // Codec will reuse the buffer (update).
+#endif
+
+/**
+ * The decoder will keep a reference to the frame and may reuse it later.
+ */
+#define AV_GET_BUFFER_FLAG_REF (1 << 0)
+
+/**
+ * @defgroup lavc_packet AVPacket
+ *
+ * Types and functions for working with AVPacket.
+ * @{
+ */
+enum AVPacketSideDataType {
+ AV_PKT_DATA_PALETTE,
+ AV_PKT_DATA_NEW_EXTRADATA,
+
+ /**
+ * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows:
+ * @code
+ * u32le param_flags
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT)
+ * s32le channel_count
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT)
+ * u64le channel_layout
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE)
+ * s32le sample_rate
+ * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS)
+ * s32le width
+ * s32le height
+ * @endcode
+ */
+ AV_PKT_DATA_PARAM_CHANGE,
+
+ /**
+ * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of
+ * structures with info about macroblocks relevant to splitting the
+ * packet into smaller packets on macroblock edges (e.g. as for RFC 2190).
+ * That is, it does not necessarily contain info about all macroblocks,
+ * as long as the distance between macroblocks in the info is smaller
+ * than the target payload size.
+ * Each MB info structure is 12 bytes, and is laid out as follows:
+ * @code
+ * u32le bit offset from the start of the packet
+ * u8 current quantizer at the start of the macroblock
+ * u8 GOB number
+ * u16le macroblock address within the GOB
+ * u8 horizontal MV predictor
+ * u8 vertical MV predictor
+ * u8 horizontal MV predictor for block number 3
+ * u8 vertical MV predictor for block number 3
+ * @endcode
+ */
+ AV_PKT_DATA_H263_MB_INFO,
+
+ /**
+ * This side data should be associated with an audio stream and contains
+ * ReplayGain information in form of the AVReplayGain struct.
+ */
+ AV_PKT_DATA_REPLAYGAIN,
+
+ /**
+ * This side data contains a 3x3 transformation matrix describing an affine
+ * transformation that needs to be applied to the decoded video frames for
+ * correct presentation.
+ *
+ * See libavutil/display.h for a detailed description of the data.
+ */
+ AV_PKT_DATA_DISPLAYMATRIX,
+
+ /**
+ * This side data should be associated with a video stream and contains
+ * Stereoscopic 3D information in form of the AVStereo3D struct.
+ */
+ AV_PKT_DATA_STEREO3D,
+
+ /**
+ * Recommmends skipping the specified number of samples
+ * @code
+ * u32le number of samples to skip from start of this packet
+ * u32le number of samples to skip from end of this packet
+ * u8 reason for start skip
+ * u8 reason for end skip (0=padding silence, 1=convergence)
+ * @endcode
+ */
+ AV_PKT_DATA_SKIP_SAMPLES=70,
+
+ /**
+ * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that
+ * the packet may contain "dual mono" audio specific to Japanese DTV
+ * and if it is true, recommends only the selected channel to be used.
+ * @code
+ * u8 selected channels (0=mail/left, 1=sub/right, 2=both)
+ * @endcode
+ */
+ AV_PKT_DATA_JP_DUALMONO,
+
+ /**
+ * A list of zero terminated key/value strings. There is no end marker for
+ * the list, so it is required to rely on the side data size to stop.
+ */
+ AV_PKT_DATA_STRINGS_METADATA,
+
+ /**
+ * Subtitle event position
+ * @code
+ * u32le x1
+ * u32le y1
+ * u32le x2
+ * u32le y2
+ * @endcode
+ */
+ AV_PKT_DATA_SUBTITLE_POSITION,
+
+ /**
+ * Data found in BlockAdditional element of matroska container. There is
+ * no end marker for the data, so it is required to rely on the side data
+ * size to recognize the end. 8 byte id (as found in BlockAddId) followed
+ * by data.
+ */
+ AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL,
+
+ /**
+ * The optional first identifier line of a WebVTT cue.
+ */
+ AV_PKT_DATA_WEBVTT_IDENTIFIER,
+
+ /**
+ * The optional settings (rendering instructions) that immediately
+ * follow the timestamp specifier of a WebVTT cue.
+ */
+ AV_PKT_DATA_WEBVTT_SETTINGS,
+
+ /**
+ * A list of zero terminated key/value strings. There is no end marker for
+ * the list, so it is required to rely on the side data size to stop. This
+ * side data includes updated metadata which appeared in the stream.
+ */
+ AV_PKT_DATA_METADATA_UPDATE,
+};
+
+typedef struct AVPacketSideData {
+ uint8_t *data;
+ int size;
+ enum AVPacketSideDataType type;
+} AVPacketSideData;
+
+/**
+ * This structure stores compressed data. It is typically exported by demuxers
+ * and then passed as input to decoders, or received as output from encoders and
+ * then passed to muxers.
+ *
+ * For video, it should typically contain one compressed frame. For audio it may
+ * contain several compressed frames.
+ *
+ * AVPacket is one of the few structs in FFmpeg, whose size is a part of public
+ * ABI. Thus it may be allocated on stack and no new fields can be added to it
+ * without libavcodec and libavformat major bump.
+ *
+ * The semantics of data ownership depends on the buf or destruct (deprecated)
+ * fields. If either is set, the packet data is dynamically allocated and is
+ * valid indefinitely until av_free_packet() is called (which in turn calls
+ * av_buffer_unref()/the destruct callback to free the data). If neither is set,
+ * the packet data is typically backed by some static buffer somewhere and is
+ * only valid for a limited time (e.g. until the next read call when demuxing).
+ *
+ * The side data is always allocated with av_malloc() and is freed in
+ * av_free_packet().
+ */
+typedef struct AVPacket {
+ /**
+ * A reference to the reference-counted buffer where the packet data is
+ * stored.
+ * May be NULL, then the packet data is not reference-counted.
+ */
+ AVBufferRef *buf;
+ /**
+ * Presentation timestamp in AVStream->time_base units; the time at which
+ * the decompressed packet will be presented to the user.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ * pts MUST be larger or equal to dts as presentation cannot happen before
+ * decompression, unless one wants to view hex dumps. Some formats misuse
+ * the terms dts and pts/cts to mean something different. Such timestamps
+ * must be converted to true pts/dts before they are stored in AVPacket.
+ */
+ int64_t pts;
+ /**
+ * Decompression timestamp in AVStream->time_base units; the time at which
+ * the packet is decompressed.
+ * Can be AV_NOPTS_VALUE if it is not stored in the file.
+ */
+ int64_t dts;
+ uint8_t *data;
+ int size;
+ int stream_index;
+ /**
+ * A combination of AV_PKT_FLAG values
+ */
+ int flags;
+ /**
+ * Additional packet data that can be provided by the container.
+ * Packet can contain several types of side information.
+ */
+ AVPacketSideData *side_data;
+ int side_data_elems;
+
+ /**
+ * Duration of this packet in AVStream->time_base units, 0 if unknown.
+ * Equals next_pts - this_pts in presentation order.
+ */
+ int duration;
+#if FF_API_DESTRUCT_PACKET
+ attribute_deprecated
+ void (*destruct)(struct AVPacket *);
+ attribute_deprecated
+ void *priv;
+#endif
+ int64_t pos; ///< byte position in stream, -1 if unknown
+
+ /**
+ * Time difference in AVStream->time_base units from the pts of this
+ * packet to the point at which the output from the decoder has converged
+ * independent from the availability of previous frames. That is, the
+ * frames are virtually identical no matter if decoding started from
+ * the very first frame or from this keyframe.
+ * Is AV_NOPTS_VALUE if unknown.
+ * This field is not the display duration of the current packet.
+ * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY
+ * set.
+ *
+ * The purpose of this field is to allow seeking in streams that have no
+ * keyframes in the conventional sense. It corresponds to the
+ * recovery point SEI in H.264 and match_time_delta in NUT. It is also
+ * essential for some types of subtitle streams to ensure that all
+ * subtitles are correctly displayed after seeking.
+ */
+ int64_t convergence_duration;
+} AVPacket;
+#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe
+#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted
+
+enum AVSideDataParamChangeFlags {
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001,
+ AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002,
+ AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004,
+ AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008,
+};
+/**
+ * @}
+ */
+
+struct AVCodecInternal;
+
+enum AVFieldOrder {
+ AV_FIELD_UNKNOWN,
+ AV_FIELD_PROGRESSIVE,
+ AV_FIELD_TT, //< Top coded_first, top displayed first
+ AV_FIELD_BB, //< Bottom coded first, bottom displayed first
+ AV_FIELD_TB, //< Top coded first, bottom displayed first
+ AV_FIELD_BT, //< Bottom coded first, top displayed first
+};
+
+/**
+ * main external API structure.
+ * New fields can be added to the end with minor version bumps.
+ * Removal, reordering and changes to existing fields require a major
+ * version bump.
+ * Please use AVOptions (av_opt* / av_set/get*()) to access these fields from user
+ * applications.
+ * sizeof(AVCodecContext) must not be used outside libav*.
+ */
+typedef struct AVCodecContext {
+ /**
+ * information on struct for av_log
+ * - set by avcodec_alloc_context3
+ */
+ const AVClass *av_class;
+ int log_level_offset;
+
+ enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */
+ const struct AVCodec *codec;
+#if FF_API_CODEC_NAME
+ /**
+ * @deprecated this field is not used for anything in libavcodec
+ */
+ attribute_deprecated
+ char codec_name[32];
+#endif
+ enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */
+
+ /**
+ * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+ * This is used to work around some encoder bugs.
+ * A demuxer should set this to what is stored in the field used to identify the codec.
+ * If there are multiple such fields in a container then the demuxer should choose the one
+ * which maximizes the information about the used codec.
+ * If the codec tag field in a container is larger than 32 bits then the demuxer should
+ * remap the longer ID to 32 bits with a table or other structure. Alternatively a new
+ * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated
+ * first.
+ * - encoding: Set by user, if not then the default based on codec_id will be used.
+ * - decoding: Set by user, will be converted to uppercase by libavcodec during init.
+ */
+ unsigned int codec_tag;
+
+ /**
+ * fourcc from the AVI stream header (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A').
+ * This is used to work around some encoder bugs.
+ * - encoding: unused
+ * - decoding: Set by user, will be converted to uppercase by libavcodec during init.
+ */
+ unsigned int stream_codec_tag;
+
+ void *priv_data;
+
+ /**
+ * Private context used for internal data.
+ *
+ * Unlike priv_data, this is not codec-specific. It is used in general
+ * libavcodec functions.
+ */
+ struct AVCodecInternal *internal;
+
+ /**
+ * Private data of the user, can be used to carry app specific stuff.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ void *opaque;
+
+ /**
+ * the average bitrate
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: Set by libavcodec. 0 or some bitrate if this info is available in the stream.
+ */
+ int bit_rate;
+
+ /**
+ * number of bits the bitstream is allowed to diverge from the reference.
+ * the reference can be CBR (for CBR pass1) or VBR (for pass2)
+ * - encoding: Set by user; unused for constant quantizer encoding.
+ * - decoding: unused
+ */
+ int bit_rate_tolerance;
+
+ /**
+ * Global quality for codecs which cannot change it per frame.
+ * This should be proportional to MPEG-1/2/4 qscale.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int global_quality;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int compression_level;
+#define FF_COMPRESSION_DEFAULT -1
+
+ /**
+ * CODEC_FLAG_*.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags;
+
+ /**
+ * CODEC_FLAG2_*
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int flags2;
+
+ /**
+ * some codecs need / can use extradata like Huffman tables.
+ * mjpeg: Huffman tables
+ * rv10: additional flags
+ * mpeg4: global headers (they can be in the bitstream or here)
+ * The allocated memory should be FF_INPUT_BUFFER_PADDING_SIZE bytes larger
+ * than extradata_size to avoid problems if it is read with the bitstream reader.
+ * The bytewise contents of extradata must not depend on the architecture or CPU endianness.
+ * - encoding: Set/allocated/freed by libavcodec.
+ * - decoding: Set/allocated/freed by user.
+ */
+ uint8_t *extradata;
+ int extradata_size;
+
+ /**
+ * This is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented. For fixed-fps content,
+ * timebase should be 1/framerate and timestamp increments should be
+ * identically 1.
+ * This often, but not always is the inverse of the frame rate or field rate
+ * for video.
+ * - encoding: MUST be set by user.
+ * - decoding: the use of this field for decoding is deprecated.
+ * Use framerate instead.
+ */
+ AVRational time_base;
+
+ /**
+ * For some codecs, the time base is closer to the field rate than the frame rate.
+ * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration
+ * if no telecine is used ...
+ *
+ * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2.
+ */
+ int ticks_per_frame;
+
+ /**
+ * Codec delay.
+ *
+ * Encoding: Number of frames delay there will be from the encoder input to
+ * the decoder output. (we assume the decoder matches the spec)
+ * Decoding: Number of frames delay in addition to what a standard decoder
+ * as specified in the spec would produce.
+ *
+ * Video:
+ * Number of frames the decoded output will be delayed relative to the
+ * encoded input.
+ *
+ * Audio:
+ * For encoding, this field is unused (see initial_padding).
+ *
+ * For decoding, this is the number of samples the decoder needs to
+ * output before the decoder's output is valid. When seeking, you should
+ * start decoding this many samples prior to your desired seek point.
+ *
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int delay;
+
+
+ /* video only */
+ /**
+ * picture width / height.
+ * - encoding: MUST be set by user.
+ * - decoding: May be set by the user before opening the decoder if known e.g.
+ * from the container. Some decoders will require the dimensions
+ * to be set by the caller. During decoding, the decoder may
+ * overwrite those values as required.
+ */
+ int width, height;
+
+ /**
+ * Bitstream width / height, may be different from width/height e.g. when
+ * the decoded frame is cropped before being output or lowres is enabled.
+ * - encoding: unused
+ * - decoding: May be set by the user before opening the decoder if known
+ * e.g. from the container. During decoding, the decoder may
+ * overwrite those values as required.
+ */
+ int coded_width, coded_height;
+
+#if FF_API_ASPECT_EXTENDED
+#define FF_ASPECT_EXTENDED 15
+#endif
+
+ /**
+ * the number of pictures in a group of pictures, or 0 for intra_only
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int gop_size;
+
+ /**
+ * Pixel format, see AV_PIX_FMT_xxx.
+ * May be set by the demuxer if known from headers.
+ * May be overridden by the decoder if it knows better.
+ * - encoding: Set by user.
+ * - decoding: Set by user if known, overridden by libavcodec if known
+ */
+ enum AVPixelFormat pix_fmt;
+
+ /**
+ * Motion estimation algorithm used for video coding.
+ * 1 (zero), 2 (full), 3 (log), 4 (phods), 5 (epzs), 6 (x1), 7 (hex),
+ * 8 (umh), 9 (iter), 10 (tesa) [7, 8, 10 are x264 specific, 9 is snow specific]
+ * - encoding: MUST be set by user.
+ * - decoding: unused
+ */
+ int me_method;
+
+ /**
+ * If non NULL, 'draw_horiz_band' is called by the libavcodec
+ * decoder to draw a horizontal band. It improves cache usage. Not
+ * all codecs can do that. You must check the codec capabilities
+ * beforehand.
+ * When multithreading is used, it may be called from multiple threads
+ * at the same time; threads might draw different parts of the same AVFrame,
+ * or multiple AVFrames, and there is no guarantee that slices will be drawn
+ * in order.
+ * The function is also used by hardware acceleration APIs.
+ * It is called at least once during frame decoding to pass
+ * the data needed for hardware render.
+ * In that mode instead of pixel data, AVFrame points to
+ * a structure specific to the acceleration API. The application
+ * reads the structure and can change some fields to indicate progress
+ * or mark state.
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @param height the height of the slice
+ * @param y the y position of the slice
+ * @param type 1->top field, 2->bottom field, 3->frame
+ * @param offset offset into the AVFrame.data from which the slice should be read
+ */
+ void (*draw_horiz_band)(struct AVCodecContext *s,
+ const AVFrame *src, int offset[AV_NUM_DATA_POINTERS],
+ int y, int type, int height);
+
+ /**
+ * callback to negotiate the pixelFormat
+ * @param fmt is the list of formats which are supported by the codec,
+ * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality.
+ * The first is always the native one.
+ * @note The callback may be called again immediately if initialization for
+ * the selected (hardware-accelerated) pixel format failed.
+ * @warning Behavior is undefined if the callback returns a value not
+ * in the fmt list of formats.
+ * @return the chosen format
+ * - encoding: unused
+ * - decoding: Set by user, if not set the native format will be chosen.
+ */
+ enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
+
+ /**
+ * maximum number of B-frames between non-B-frames
+ * Note: The output will be delayed by max_b_frames+1 relative to the input.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_b_frames;
+
+ /**
+ * qscale factor between IP and B-frames
+ * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset).
+ * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_factor;
+
+ /** obsolete FIXME remove */
+ int rc_strategy;
+#define FF_RC_STRATEGY_XVID 1
+
+ int b_frame_strategy;
+
+ /**
+ * qscale offset between IP and B-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float b_quant_offset;
+
+ /**
+ * Size of the frame reordering buffer in the decoder.
+ * For MPEG-2 it is 1 IPB or 0 low delay IP.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int has_b_frames;
+
+ /**
+ * 0-> h263 quant 1-> mpeg quant
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mpeg_quant;
+
+ /**
+ * qscale factor between P and I-frames
+ * If > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset).
+ * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset).
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_factor;
+
+ /**
+ * qscale offset between P and I-frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float i_quant_offset;
+
+ /**
+ * luminance masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float lumi_masking;
+
+ /**
+ * temporary complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float temporal_cplx_masking;
+
+ /**
+ * spatial complexity masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float spatial_cplx_masking;
+
+ /**
+ * p block masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float p_masking;
+
+ /**
+ * darkness masking (0-> disabled)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ float dark_masking;
+
+ /**
+ * slice count
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user (or 0).
+ */
+ int slice_count;
+ /**
+ * prediction method (needed for huffyuv)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int prediction_method;
+#define FF_PRED_LEFT 0
+#define FF_PRED_PLANE 1
+#define FF_PRED_MEDIAN 2
+
+ /**
+ * slice offsets in the frame in bytes
+ * - encoding: Set/allocated by libavcodec.
+ * - decoding: Set/allocated by user (or NULL).
+ */
+ int *slice_offset;
+
+ /**
+ * sample aspect ratio (0 if unknown)
+ * That is the width of a pixel divided by the height of the pixel.
+ * Numerator and denominator must be relatively prime and smaller than 256 for some video standards.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_cmp;
+ /**
+ * subpixel motion estimation comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_sub_cmp;
+ /**
+ * macroblock comparison function (not supported yet)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_cmp;
+ /**
+ * interlaced DCT comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int ildct_cmp;
+#define FF_CMP_SAD 0
+#define FF_CMP_SSE 1
+#define FF_CMP_SATD 2
+#define FF_CMP_DCT 3
+#define FF_CMP_PSNR 4
+#define FF_CMP_BIT 5
+#define FF_CMP_RD 6
+#define FF_CMP_ZERO 7
+#define FF_CMP_VSAD 8
+#define FF_CMP_VSSE 9
+#define FF_CMP_NSSE 10
+#define FF_CMP_W53 11
+#define FF_CMP_W97 12
+#define FF_CMP_DCTMAX 13
+#define FF_CMP_DCT264 14
+#define FF_CMP_CHROMA 256
+
+ /**
+ * ME diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dia_size;
+
+ /**
+ * amount of previous MV predictors (2a+1 x 2a+1 square)
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int last_predictor_count;
+
+ /**
+ * prepass for motion estimation
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int pre_me;
+
+ /**
+ * motion estimation prepass comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_pre_cmp;
+
+ /**
+ * ME prepass diamond size & shape
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int pre_dia_size;
+
+ /**
+ * subpel ME quality
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_subpel_quality;
+
+#if FF_API_AFD
+ /**
+ * DTG active format information (additional aspect ratio
+ * information only used in DVB MPEG-2 transport streams)
+ * 0 if not set.
+ *
+ * - encoding: unused
+ * - decoding: Set by decoder.
+ * @deprecated Deprecated in favor of AVSideData
+ */
+ attribute_deprecated int dtg_active_format;
+#define FF_DTG_AFD_SAME 8
+#define FF_DTG_AFD_4_3 9
+#define FF_DTG_AFD_16_9 10
+#define FF_DTG_AFD_14_9 11
+#define FF_DTG_AFD_4_3_SP_14_9 13
+#define FF_DTG_AFD_16_9_SP_14_9 14
+#define FF_DTG_AFD_SP_4_3 15
+#endif /* FF_API_AFD */
+
+ /**
+ * maximum motion estimation search range in subpel units
+ * If 0 then no limit.
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_range;
+
+ /**
+ * intra quantizer bias
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int intra_quant_bias;
+#define FF_DEFAULT_QUANT_BIAS 999999
+
+ /**
+ * inter quantizer bias
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int inter_quant_bias;
+
+ /**
+ * slice flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int slice_flags;
+#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display
+#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG2 field pics)
+#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1)
+
+#if FF_API_XVMC
+ /**
+ * XVideo Motion Acceleration
+ * - encoding: forbidden
+ * - decoding: set by decoder
+ * @deprecated XvMC doesn't need it anymore.
+ */
+ attribute_deprecated int xvmc_acceleration;
+#endif /* FF_API_XVMC */
+
+ /**
+ * macroblock decision mode
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_decision;
+#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp
+#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits
+#define FF_MB_DECISION_RD 2 ///< rate distortion
+
+ /**
+ * custom intra quantization matrix
+ * - encoding: Set by user, can be NULL.
+ * - decoding: Set by libavcodec.
+ */
+ uint16_t *intra_matrix;
+
+ /**
+ * custom inter quantization matrix
+ * - encoding: Set by user, can be NULL.
+ * - decoding: Set by libavcodec.
+ */
+ uint16_t *inter_matrix;
+
+ /**
+ * scene change detection threshold
+ * 0 is default, larger means fewer detected scene changes.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int scenechange_threshold;
+
+ /**
+ * noise reduction strength
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int noise_reduction;
+
+#if FF_API_MPV_OPT
+ /**
+ * @deprecated this field is unused
+ */
+ attribute_deprecated
+ int me_threshold;
+
+ /**
+ * @deprecated this field is unused
+ */
+ attribute_deprecated
+ int mb_threshold;
+#endif
+
+ /**
+ * precision of the intra DC coefficient - 8
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int intra_dc_precision;
+
+ /**
+ * Number of macroblock rows at the top which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_top;
+
+ /**
+ * Number of macroblock rows at the bottom which are skipped.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int skip_bottom;
+
+#if FF_API_MPV_OPT
+ /**
+ * @deprecated use encoder private options instead
+ */
+ attribute_deprecated
+ float border_masking;
+#endif
+
+ /**
+ * minimum MB lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmin;
+
+ /**
+ * maximum MB lagrange multipler
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mb_lmax;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int me_penalty_compensation;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int bidir_refine;
+
+ /**
+ *
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int brd_scale;
+
+ /**
+ * minimum GOP size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int keyint_min;
+
+ /**
+ * number of reference frames
+ * - encoding: Set by user.
+ * - decoding: Set by lavc.
+ */
+ int refs;
+
+ /**
+ * chroma qp offset from luma
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int chromaoffset;
+
+#if FF_API_UNUSED_MEMBERS
+ /**
+ * Multiplied by qscale for each frame and added to scene_change_score.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ attribute_deprecated int scenechange_factor;
+#endif
+
+ /**
+ *
+ * Note: Value depends upon the compare function used for fullpel ME.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int mv0_threshold;
+
+ /**
+ * Adjust sensitivity of b_frame_strategy 1.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int b_sensitivity;
+
+ /**
+ * Chromaticity coordinates of the source primaries.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorPrimaries color_primaries;
+
+ /**
+ * Color Transfer Characteristic.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorTransferCharacteristic color_trc;
+
+ /**
+ * YUV colorspace type.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorSpace colorspace;
+
+ /**
+ * MPEG vs JPEG YUV range.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorRange color_range;
+
+ /**
+ * This defines the location of chroma samples.
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVChromaLocation chroma_sample_location;
+
+ /**
+ * Number of slices.
+ * Indicates number of picture subdivisions. Used for parallelized
+ * decoding.
+ * - encoding: Set by user
+ * - decoding: unused
+ */
+ int slices;
+
+ /** Field order
+ * - encoding: set by libavcodec
+ * - decoding: Set by user.
+ */
+ enum AVFieldOrder field_order;
+
+ /* audio only */
+ int sample_rate; ///< samples per second
+ int channels; ///< number of audio channels
+
+ /**
+ * audio sample format
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVSampleFormat sample_fmt; ///< sample format
+
+ /* The following data should not be initialized. */
+ /**
+ * Number of samples per channel in an audio frame.
+ *
+ * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame
+ * except the last must contain exactly frame_size samples per channel.
+ * May be 0 when the codec has CODEC_CAP_VARIABLE_FRAME_SIZE set, then the
+ * frame size is not restricted.
+ * - decoding: may be set by some decoders to indicate constant frame size
+ */
+ int frame_size;
+
+ /**
+ * Frame counter, set by libavcodec.
+ *
+ * - decoding: total number of frames returned from the decoder so far.
+ * - encoding: total number of frames passed to the encoder so far.
+ *
+ * @note the counter is not incremented if encoding/decoding resulted in
+ * an error.
+ */
+ int frame_number;
+
+ /**
+ * number of bytes per packet if constant and known or 0
+ * Used by some WAV based audio codecs.
+ */
+ int block_align;
+
+ /**
+ * Audio cutoff bandwidth (0 means "automatic")
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int cutoff;
+
+#if FF_API_REQUEST_CHANNELS
+ /**
+ * Decoder should decode to this many channels if it can (0 for default)
+ * - encoding: unused
+ * - decoding: Set by user.
+ * @deprecated Deprecated in favor of request_channel_layout.
+ */
+ attribute_deprecated int request_channels;
+#endif
+
+ /**
+ * Audio channel layout.
+ * - encoding: set by user.
+ * - decoding: set by user, may be overwritten by libavcodec.
+ */
+ uint64_t channel_layout;
+
+ /**
+ * Request decoder to use this channel layout if it can (0 for default)
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ uint64_t request_channel_layout;
+
+ /**
+ * Type of service that the audio stream conveys.
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ enum AVAudioServiceType audio_service_type;
+
+ /**
+ * desired sample format
+ * - encoding: Not used.
+ * - decoding: Set by user.
+ * Decoder will decode to this format if it can.
+ */
+ enum AVSampleFormat request_sample_fmt;
+
+#if FF_API_GET_BUFFER
+ /**
+ * Called at the beginning of each frame to get a buffer for it.
+ *
+ * The function will set AVFrame.data[], AVFrame.linesize[].
+ * AVFrame.extended_data[] must also be set, but it should be the same as
+ * AVFrame.data[] except for planar audio with more channels than can fit
+ * in AVFrame.data[]. In that case, AVFrame.data[] shall still contain as
+ * many data pointers as it can hold.
+ *
+ * if CODEC_CAP_DR1 is not set then get_buffer() must call
+ * avcodec_default_get_buffer() instead of providing buffers allocated by
+ * some other means.
+ *
+ * AVFrame.data[] should be 32- or 16-byte-aligned unless the CPU doesn't
+ * need it. avcodec_default_get_buffer() aligns the output buffer properly,
+ * but if get_buffer() is overridden then alignment considerations should
+ * be taken into account.
+ *
+ * @see avcodec_default_get_buffer()
+ *
+ * Video:
+ *
+ * If pic.reference is set then the frame will be read later by libavcodec.
+ * avcodec_align_dimensions2() should be used to find the required width and
+ * height, as they normally need to be rounded up to the next multiple of 16.
+ *
+ * If frame multithreading is used and thread_safe_callbacks is set,
+ * it may be called from a different thread, but not from more than one at
+ * once. Does not need to be reentrant.
+ *
+ * @see release_buffer(), reget_buffer()
+ * @see avcodec_align_dimensions2()
+ *
+ * Audio:
+ *
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer(). The decoder may,
+ * however, utilize only part of the buffer by setting AVFrame.nb_samples
+ * to a smaller value in the output frame.
+ *
+ * Decoders cannot use the buffer after returning from
+ * avcodec_decode_audio4(), so they will not call release_buffer(), as it
+ * is assumed to be released immediately upon return. In some rare cases,
+ * a decoder may need to call get_buffer() more than once in a single
+ * call to avcodec_decode_audio4(). In that case, when get_buffer() is
+ * called again after it has already been called once, the previously
+ * acquired buffer is assumed to be released at that time and may not be
+ * reused by the decoder.
+ *
+ * As a convenience, av_samples_get_buffer_size() and
+ * av_samples_fill_arrays() in libavutil may be used by custom get_buffer()
+ * functions to find the required data size and to fill data pointers and
+ * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+ * since all planes must be the same size.
+ *
+ * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ *
+ * @deprecated use get_buffer2()
+ */
+ attribute_deprecated
+ int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic);
+
+ /**
+ * Called to release buffers which were allocated with get_buffer.
+ * A released buffer can be reused in get_buffer().
+ * pic.data[*] must be set to NULL.
+ * May be called from a different thread if frame multithreading is used,
+ * but not by more than one thread at once, so does not need to be reentrant.
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ *
+ * @deprecated custom freeing callbacks should be set from get_buffer2()
+ */
+ attribute_deprecated
+ void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic);
+
+ /**
+ * Called at the beginning of a frame to get cr buffer for it.
+ * Buffer type (size, hints) must be the same. libavcodec won't check it.
+ * libavcodec will pass previous buffer in pic, function should return
+ * same buffer or new buffer with old frame "painted" into it.
+ * If pic.data[0] == NULL must behave like get_buffer().
+ * if CODEC_CAP_DR1 is not set then reget_buffer() must call
+ * avcodec_default_reget_buffer() instead of providing buffers allocated by
+ * some other means.
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ attribute_deprecated
+ int (*reget_buffer)(struct AVCodecContext *c, AVFrame *pic);
+#endif
+
+ /**
+ * This callback is called at the beginning of each frame to get data
+ * buffer(s) for it. There may be one contiguous buffer for all the data or
+ * there may be a buffer per each data plane or anything in between. What
+ * this means is, you may set however many entries in buf[] you feel necessary.
+ * Each buffer must be reference-counted using the AVBuffer API (see description
+ * of buf[] below).
+ *
+ * The following fields will be set in the frame before this callback is
+ * called:
+ * - format
+ * - width, height (video only)
+ * - sample_rate, channel_layout, nb_samples (audio only)
+ * Their values may differ from the corresponding values in
+ * AVCodecContext. This callback must use the frame values, not the codec
+ * context values, to calculate the required buffer size.
+ *
+ * This callback must fill the following fields in the frame:
+ * - data[]
+ * - linesize[]
+ * - extended_data:
+ * * if the data is planar audio with more than 8 channels, then this
+ * callback must allocate and fill extended_data to contain all pointers
+ * to all data planes. data[] must hold as many pointers as it can.
+ * extended_data must be allocated with av_malloc() and will be freed in
+ * av_frame_unref().
+ * * otherwise exended_data must point to data
+ * - buf[] must contain one or more pointers to AVBufferRef structures. Each of
+ * the frame's data and extended_data pointers must be contained in these. That
+ * is, one AVBufferRef for each allocated chunk of memory, not necessarily one
+ * AVBufferRef per data[] entry. See: av_buffer_create(), av_buffer_alloc(),
+ * and av_buffer_ref().
+ * - extended_buf and nb_extended_buf must be allocated with av_malloc() by
+ * this callback and filled with the extra buffers if there are more
+ * buffers than buf[] can hold. extended_buf will be freed in
+ * av_frame_unref().
+ *
+ * If CODEC_CAP_DR1 is not set then get_buffer2() must call
+ * avcodec_default_get_buffer2() instead of providing buffers allocated by
+ * some other means.
+ *
+ * Each data plane must be aligned to the maximum required by the target
+ * CPU.
+ *
+ * @see avcodec_default_get_buffer2()
+ *
+ * Video:
+ *
+ * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused
+ * (read and/or written to if it is writable) later by libavcodec.
+ *
+ * avcodec_align_dimensions2() should be used to find the required width and
+ * height, as they normally need to be rounded up to the next multiple of 16.
+ *
+ * Some decoders do not support linesizes changing between frames.
+ *
+ * If frame multithreading is used and thread_safe_callbacks is set,
+ * this callback may be called from a different thread, but not from more
+ * than one at once. Does not need to be reentrant.
+ *
+ * @see avcodec_align_dimensions2()
+ *
+ * Audio:
+ *
+ * Decoders request a buffer of a particular size by setting
+ * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may,
+ * however, utilize only part of the buffer by setting AVFrame.nb_samples
+ * to a smaller value in the output frame.
+ *
+ * As a convenience, av_samples_get_buffer_size() and
+ * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2()
+ * functions to find the required data size and to fill data pointers and
+ * linesize. In AVFrame.linesize, only linesize[0] may be set for audio
+ * since all planes must be the same size.
+ *
+ * @see av_samples_get_buffer_size(), av_samples_fill_arrays()
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags);
+
+ /**
+ * If non-zero, the decoded audio and video frames returned from
+ * avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted
+ * and are valid indefinitely. The caller must free them with
+ * av_frame_unref() when they are not needed anymore.
+ * Otherwise, the decoded frames must not be freed by the caller and are
+ * only valid until the next decode call.
+ *
+ * - encoding: unused
+ * - decoding: set by the caller before avcodec_open2().
+ */
+ int refcounted_frames;
+
+ /* - encoding parameters */
+ float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0)
+ float qblur; ///< amount of qscale smoothing over time (0.0-1.0)
+
+ /**
+ * minimum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmin;
+
+ /**
+ * maximum quantizer
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int qmax;
+
+ /**
+ * maximum quantizer difference between frames
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_qdiff;
+
+#if FF_API_MPV_OPT
+ /**
+ * @deprecated use encoder private options instead
+ */
+ attribute_deprecated
+ float rc_qsquish;
+
+ attribute_deprecated
+ float rc_qmod_amp;
+ attribute_deprecated
+ int rc_qmod_freq;
+#endif
+
+ /**
+ * decoder bitstream buffer size
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_buffer_size;
+
+ /**
+ * ratecontrol override, see RcOverride
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ int rc_override_count;
+ RcOverride *rc_override;
+
+#if FF_API_MPV_OPT
+ /**
+ * @deprecated use encoder private options instead
+ */
+ attribute_deprecated
+ const char *rc_eq;
+#endif
+
+ /**
+ * maximum bitrate
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int rc_max_rate;
+
+ /**
+ * minimum bitrate
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_min_rate;
+
+#if FF_API_MPV_OPT
+ /**
+ * @deprecated use encoder private options instead
+ */
+ attribute_deprecated
+ float rc_buffer_aggressivity;
+
+ attribute_deprecated
+ float rc_initial_cplx;
+#endif
+
+ /**
+ * Ratecontrol attempt to use, at maximum, of what can be used without an underflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_max_available_vbv_use;
+
+ /**
+ * Ratecontrol attempt to use, at least, times the amount needed to prevent a vbv overflow.
+ * - encoding: Set by user.
+ * - decoding: unused.
+ */
+ float rc_min_vbv_overflow_use;
+
+ /**
+ * Number of bits which should be loaded into the rc buffer before decoding starts.
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int rc_initial_buffer_occupancy;
+
+#define FF_CODER_TYPE_VLC 0
+#define FF_CODER_TYPE_AC 1
+#define FF_CODER_TYPE_RAW 2
+#define FF_CODER_TYPE_RLE 3
+#if FF_API_UNUSED_MEMBERS
+#define FF_CODER_TYPE_DEFLATE 4
+#endif /* FF_API_UNUSED_MEMBERS */
+ /**
+ * coder type
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int coder_type;
+
+ /**
+ * context model
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int context_model;
+
+#if FF_API_MPV_OPT
+ /**
+ * @deprecated use encoder private options instead
+ */
+ attribute_deprecated
+ int lmin;
+
+ /**
+ * @deprecated use encoder private options instead
+ */
+ attribute_deprecated
+ int lmax;
+#endif
+
+ /**
+ * frame skip threshold
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_threshold;
+
+ /**
+ * frame skip factor
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_factor;
+
+ /**
+ * frame skip exponent
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_exp;
+
+ /**
+ * frame skip comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int frame_skip_cmp;
+
+ /**
+ * trellis RD quantization
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int trellis;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int min_prediction_order;
+
+ /**
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int max_prediction_order;
+
+ /**
+ * GOP timecode frame start number
+ * - encoding: Set by user, in non drop frame format
+ * - decoding: Set by libavcodec (timecode in the 25 bits format, -1 if unset)
+ */
+ int64_t timecode_frame_start;
+
+ /* The RTP callback: This function is called */
+ /* every time the encoder has a packet to send. */
+ /* It depends on the encoder if the data starts */
+ /* with a Start Code (it should). H.263 does. */
+ /* mb_nb contains the number of macroblocks */
+ /* encoded in the RTP payload. */
+ void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb);
+
+ int rtp_payload_size; /* The size of the RTP payload: the coder will */
+ /* do its best to deliver a chunk with size */
+ /* below rtp_payload_size, the chunk will start */
+ /* with a start code on some codecs like H.263. */
+ /* This doesn't take account of any particular */
+ /* headers inside the transmitted RTP payload. */
+
+ /* statistics, used for 2-pass encoding */
+ int mv_bits;
+ int header_bits;
+ int i_tex_bits;
+ int p_tex_bits;
+ int i_count;
+ int p_count;
+ int skip_count;
+ int misc_bits;
+
+ /**
+ * number of bits used for the previously encoded frame
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ int frame_bits;
+
+ /**
+ * pass1 encoding statistics output buffer
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ char *stats_out;
+
+ /**
+ * pass2 encoding statistics input buffer
+ * Concatenated stuff from stats_out of pass1 should be placed here.
+ * - encoding: Allocated/set/freed by user.
+ * - decoding: unused
+ */
+ char *stats_in;
+
+ /**
+ * Work around bugs in encoders which sometimes cannot be detected automatically.
+ * - encoding: Set by user
+ * - decoding: Set by user
+ */
+ int workaround_bugs;
+#define FF_BUG_AUTODETECT 1 ///< autodetection
+#if FF_API_OLD_MSMPEG4
+#define FF_BUG_OLD_MSMPEG4 2
+#endif
+#define FF_BUG_XVID_ILACE 4
+#define FF_BUG_UMP4 8
+#define FF_BUG_NO_PADDING 16
+#define FF_BUG_AMV 32
+#if FF_API_AC_VLC
+#define FF_BUG_AC_VLC 0 ///< Will be removed, libavcodec can now handle these non-compliant files by default.
+#endif
+#define FF_BUG_QPEL_CHROMA 64
+#define FF_BUG_STD_QPEL 128
+#define FF_BUG_QPEL_CHROMA2 256
+#define FF_BUG_DIRECT_BLOCKSIZE 512
+#define FF_BUG_EDGE 1024
+#define FF_BUG_HPEL_CHROMA 2048
+#define FF_BUG_DC_CLIP 4096
+#define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders.
+#define FF_BUG_TRUNCATED 16384
+
+ /**
+ * strictly follow the standard (MPEG4, ...).
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ * Setting this to STRICT or higher means the encoder and decoder will
+ * generally do stupid things, whereas setting it to unofficial or lower
+ * will mean the encoder might produce output that is not supported by all
+ * spec-compliant decoders. Decoders don't differentiate between normal,
+ * unofficial and experimental (that is, they always try to decode things
+ * when they can) unless they are explicitly asked to behave stupidly
+ * (=strictly conform to the specs)
+ */
+ int strict_std_compliance;
+#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software.
+#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences.
+#define FF_COMPLIANCE_NORMAL 0
+#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions
+#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things.
+
+ /**
+ * error concealment flags
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int error_concealment;
+#define FF_EC_GUESS_MVS 1
+#define FF_EC_DEBLOCK 2
+#define FF_EC_FAVOR_INTER 256
+
+ /**
+ * debug
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug;
+#define FF_DEBUG_PICT_INFO 1
+#define FF_DEBUG_RC 2
+#define FF_DEBUG_BITSTREAM 4
+#define FF_DEBUG_MB_TYPE 8
+#define FF_DEBUG_QP 16
+#if FF_API_DEBUG_MV
+/**
+ * @deprecated this option does nothing
+ */
+#define FF_DEBUG_MV 32
+#endif
+#define FF_DEBUG_DCT_COEFF 0x00000040
+#define FF_DEBUG_SKIP 0x00000080
+#define FF_DEBUG_STARTCODE 0x00000100
+#if FF_API_UNUSED_MEMBERS
+#define FF_DEBUG_PTS 0x00000200
+#endif /* FF_API_UNUSED_MEMBERS */
+#define FF_DEBUG_ER 0x00000400
+#define FF_DEBUG_MMCO 0x00000800
+#define FF_DEBUG_BUGS 0x00001000
+#if FF_API_DEBUG_MV
+#define FF_DEBUG_VIS_QP 0x00002000 ///< only access through AVOptions from outside libavcodec
+#define FF_DEBUG_VIS_MB_TYPE 0x00004000 ///< only access through AVOptions from outside libavcodec
+#endif
+#define FF_DEBUG_BUFFERS 0x00008000
+#define FF_DEBUG_THREADS 0x00010000
+#define FF_DEBUG_NOMC 0x01000000
+
+#if FF_API_DEBUG_MV
+ /**
+ * debug
+ * Code outside libavcodec should access this field using AVOptions
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug_mv;
+#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames
+#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames
+#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames
+#endif
+
+ /**
+ * Error recognition; may misdetect some more or less valid parts as errors.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int err_recognition;
+
+/**
+ * Verify checksums embedded in the bitstream (could be of either encoded or
+ * decoded data, depending on the codec) and print an error message on mismatch.
+ * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the
+ * decoder returning an error.
+ */
+#define AV_EF_CRCCHECK (1<<0)
+#define AV_EF_BITSTREAM (1<<1) ///< detect bitstream specification deviations
+#define AV_EF_BUFFER (1<<2) ///< detect improper bitstream length
+#define AV_EF_EXPLODE (1<<3) ///< abort decoding on minor error detection
+
+#define AV_EF_IGNORE_ERR (1<<15) ///< ignore errors and continue
+#define AV_EF_CAREFUL (1<<16) ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors
+#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non compliances as errors
+#define AV_EF_AGGRESSIVE (1<<18) ///< consider things that a sane encoder should not do as an error
+
+
+ /**
+ * opaque 64bit number (generally a PTS) that will be reordered and
+ * output in AVFrame.reordered_opaque
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ int64_t reordered_opaque;
+
+ /**
+ * Hardware accelerator in use
+ * - encoding: unused.
+ * - decoding: Set by libavcodec
+ */
+ struct AVHWAccel *hwaccel;
+
+ /**
+ * Hardware accelerator context.
+ * For some hardware accelerators, a global context needs to be
+ * provided by the user. In that case, this holds display-dependent
+ * data FFmpeg cannot instantiate itself. Please refer to the
+ * FFmpeg HW accelerator documentation to know how to fill this
+ * is. e.g. for VA API, this is a struct vaapi_context.
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ void *hwaccel_context;
+
+ /**
+ * error
+ * - encoding: Set by libavcodec if flags&CODEC_FLAG_PSNR.
+ * - decoding: unused
+ */
+ uint64_t error[AV_NUM_DATA_POINTERS];
+
+ /**
+ * DCT algorithm, see FF_DCT_* below
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int dct_algo;
+#define FF_DCT_AUTO 0
+#define FF_DCT_FASTINT 1
+#if FF_API_UNUSED_MEMBERS
+#define FF_DCT_INT 2
+#endif /* FF_API_UNUSED_MEMBERS */
+#define FF_DCT_MMX 3
+#define FF_DCT_ALTIVEC 5
+#define FF_DCT_FAAN 6
+
+ /**
+ * IDCT algorithm, see FF_IDCT_* below.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int idct_algo;
+#define FF_IDCT_AUTO 0
+#define FF_IDCT_INT 1
+#define FF_IDCT_SIMPLE 2
+#define FF_IDCT_SIMPLEMMX 3
+#define FF_IDCT_ARM 7
+#define FF_IDCT_ALTIVEC 8
+#if FF_API_ARCH_SH4
+#define FF_IDCT_SH4 9
+#endif
+#define FF_IDCT_SIMPLEARM 10
+#if FF_API_UNUSED_MEMBERS
+#define FF_IDCT_IPP 13
+#endif /* FF_API_UNUSED_MEMBERS */
+#define FF_IDCT_XVID 14
+#if FF_API_IDCT_XVIDMMX
+#define FF_IDCT_XVIDMMX 14
+#endif /* FF_API_IDCT_XVIDMMX */
+#define FF_IDCT_SIMPLEARMV5TE 16
+#define FF_IDCT_SIMPLEARMV6 17
+#if FF_API_ARCH_SPARC
+#define FF_IDCT_SIMPLEVIS 18
+#endif
+#define FF_IDCT_FAAN 20
+#define FF_IDCT_SIMPLENEON 22
+#if FF_API_ARCH_ALPHA
+#define FF_IDCT_SIMPLEALPHA 23
+#endif
+#define FF_IDCT_SIMPLEAUTO 128
+
+ /**
+ * bits per sample/pixel from the demuxer (needed for huffyuv).
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by user.
+ */
+ int bits_per_coded_sample;
+
+ /**
+ * Bits per sample/pixel of internal libavcodec pixel/sample format.
+ * - encoding: set by user.
+ * - decoding: set by libavcodec.
+ */
+ int bits_per_raw_sample;
+
+#if FF_API_LOWRES
+ /**
+ * low resolution decoding, 1-> 1/2 size, 2->1/4 size
+ * - encoding: unused
+ * - decoding: Set by user.
+ * Code outside libavcodec should access this field using:
+ * av_codec_{get,set}_lowres(avctx)
+ */
+ int lowres;
+#endif
+
+ /**
+ * the picture in the bitstream
+ * - encoding: Set by libavcodec.
+ * - decoding: unused
+ */
+ AVFrame *coded_frame;
+
+ /**
+ * thread count
+ * is used to decide how many independent tasks should be passed to execute()
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int thread_count;
+
+ /**
+ * Which multithreading methods to use.
+ * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread,
+ * so clients which cannot provide future frames should not use it.
+ *
+ * - encoding: Set by user, otherwise the default is used.
+ * - decoding: Set by user, otherwise the default is used.
+ */
+ int thread_type;
+#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once
+#define FF_THREAD_SLICE 2 ///< Decode more than one part of a single frame at once
+
+ /**
+ * Which multithreading methods are in use by the codec.
+ * - encoding: Set by libavcodec.
+ * - decoding: Set by libavcodec.
+ */
+ int active_thread_type;
+
+ /**
+ * Set by the client if its custom get_buffer() callback can be called
+ * synchronously from another thread, which allows faster multithreaded decoding.
+ * draw_horiz_band() will be called from other threads regardless of this setting.
+ * Ignored if the default get_buffer() is used.
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int thread_safe_callbacks;
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * @param count the number of things to execute
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size);
+
+ /**
+ * The codec may call this to execute several independent things.
+ * It will return only after finishing all tasks.
+ * The user may replace this with some multithreaded implementation,
+ * the default implementation will execute the parts serially.
+ * Also see avcodec_thread_init and e.g. the --enable-pthread configure option.
+ * @param c context passed also to func
+ * @param count the number of things to execute
+ * @param arg2 argument passed unchanged to func
+ * @param ret return values of executed functions, must have space for "count" values. May be NULL.
+ * @param func function that will be called count times, with jobnr from 0 to count-1.
+ * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no
+ * two instances of func executing at the same time will have the same threadnr.
+ * @return always 0 currently, but code should handle a future improvement where when any call to func
+ * returns < 0 no further calls to func may be done and < 0 is returned.
+ * - encoding: Set by libavcodec, user can override.
+ * - decoding: Set by libavcodec, user can override.
+ */
+ int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count);
+
+#if FF_API_THREAD_OPAQUE
+ /**
+ * @deprecated this field should not be used from outside of lavc
+ */
+ attribute_deprecated
+ void *thread_opaque;
+#endif
+
+ /**
+ * noise vs. sse weight for the nsse comparison function
+ * - encoding: Set by user.
+ * - decoding: unused
+ */
+ int nsse_weight;
+
+ /**
+ * profile
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int profile;
+#define FF_PROFILE_UNKNOWN -99
+#define FF_PROFILE_RESERVED -100
+
+#define FF_PROFILE_AAC_MAIN 0
+#define FF_PROFILE_AAC_LOW 1
+#define FF_PROFILE_AAC_SSR 2
+#define FF_PROFILE_AAC_LTP 3
+#define FF_PROFILE_AAC_HE 4
+#define FF_PROFILE_AAC_HE_V2 28
+#define FF_PROFILE_AAC_LD 22
+#define FF_PROFILE_AAC_ELD 38
+#define FF_PROFILE_MPEG2_AAC_LOW 128
+#define FF_PROFILE_MPEG2_AAC_HE 131
+
+#define FF_PROFILE_DTS 20
+#define FF_PROFILE_DTS_ES 30
+#define FF_PROFILE_DTS_96_24 40
+#define FF_PROFILE_DTS_HD_HRA 50
+#define FF_PROFILE_DTS_HD_MA 60
+
+#define FF_PROFILE_MPEG2_422 0
+#define FF_PROFILE_MPEG2_HIGH 1
+#define FF_PROFILE_MPEG2_SS 2
+#define FF_PROFILE_MPEG2_SNR_SCALABLE 3
+#define FF_PROFILE_MPEG2_MAIN 4
+#define FF_PROFILE_MPEG2_SIMPLE 5
+
+#define FF_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag
+#define FF_PROFILE_H264_INTRA (1<<11) // 8+3; constraint_set3_flag
+
+#define FF_PROFILE_H264_BASELINE 66
+#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED)
+#define FF_PROFILE_H264_MAIN 77
+#define FF_PROFILE_H264_EXTENDED 88
+#define FF_PROFILE_H264_HIGH 100
+#define FF_PROFILE_H264_HIGH_10 110
+#define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_HIGH_422 122
+#define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_HIGH_444 144
+#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244
+#define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA)
+#define FF_PROFILE_H264_CAVLC_444 44
+
+#define FF_PROFILE_VC1_SIMPLE 0
+#define FF_PROFILE_VC1_MAIN 1
+#define FF_PROFILE_VC1_COMPLEX 2
+#define FF_PROFILE_VC1_ADVANCED 3
+
+#define FF_PROFILE_MPEG4_SIMPLE 0
+#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1
+#define FF_PROFILE_MPEG4_CORE 2
+#define FF_PROFILE_MPEG4_MAIN 3
+#define FF_PROFILE_MPEG4_N_BIT 4
+#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5
+#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6
+#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7
+#define FF_PROFILE_MPEG4_HYBRID 8
+#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9
+#define FF_PROFILE_MPEG4_CORE_SCALABLE 10
+#define FF_PROFILE_MPEG4_ADVANCED_CODING 11
+#define FF_PROFILE_MPEG4_ADVANCED_CORE 12
+#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13
+#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14
+#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15
+
+#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 0
+#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 1
+#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 2
+#define FF_PROFILE_JPEG2000_DCINEMA_2K 3
+#define FF_PROFILE_JPEG2000_DCINEMA_4K 4
+
+
+#define FF_PROFILE_HEVC_MAIN 1
+#define FF_PROFILE_HEVC_MAIN_10 2
+#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3
+#define FF_PROFILE_HEVC_REXT 4
+
+ /**
+ * level
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ int level;
+#define FF_LEVEL_UNKNOWN -99
+
+ /**
+ * Skip loop filtering for selected frames.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_loop_filter;
+
+ /**
+ * Skip IDCT/dequantization for selected frames.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_idct;
+
+ /**
+ * Skip decoding for selected frames.
+ * - encoding: unused
+ * - decoding: Set by user.
+ */
+ enum AVDiscard skip_frame;
+
+ /**
+ * Header containing style information for text subtitles.
+ * For SUBTITLE_ASS subtitle type, it should contain the whole ASS
+ * [Script Info] and [V4+ Styles] section, plus the [Events] line and
+ * the Format line following. It shouldn't include any Dialogue line.
+ * - encoding: Set/allocated/freed by user (before avcodec_open2())
+ * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2())
+ */
+ uint8_t *subtitle_header;
+ int subtitle_header_size;
+
+#if FF_API_ERROR_RATE
+ /**
+ * @deprecated use the 'error_rate' private AVOption of the mpegvideo
+ * encoders
+ */
+ attribute_deprecated
+ int error_rate;
+#endif
+
+#if FF_API_CODEC_PKT
+ /**
+ * @deprecated this field is not supposed to be accessed from outside lavc
+ */
+ attribute_deprecated
+ AVPacket *pkt;
+#endif
+
+ /**
+ * VBV delay coded in the last frame (in periods of a 27 MHz clock).
+ * Used for compliant TS muxing.
+ * - encoding: Set by libavcodec.
+ * - decoding: unused.
+ */
+ uint64_t vbv_delay;
+
+ /**
+ * Encoding only. Allow encoders to output packets that do not contain any
+ * encoded data, only side data.
+ *
+ * Some encoders need to output such packets, e.g. to update some stream
+ * parameters at the end of encoding.
+ *
+ * All callers are strongly recommended to set this option to 1 and update
+ * their code to deal with such packets, since this behaviour may become
+ * always enabled in the future (then this option will be deprecated and
+ * later removed). To avoid ABI issues when this happens, the callers should
+ * use AVOptions to set this field.
+ */
+ int side_data_only_packets;
+
+ /**
+ * Audio only. The number of "priming" samples (padding) inserted by the
+ * encoder at the beginning of the audio. I.e. this number of leading
+ * decoded samples must be discarded by the caller to get the original audio
+ * without leading padding.
+ *
+ * - decoding: unused
+ * - encoding: Set by libavcodec. The timestamps on the output packets are
+ * adjusted by the encoder so that they always refer to the
+ * first sample of the data actually contained in the packet,
+ * including any added padding. E.g. if the timebase is
+ * 1/samplerate and the timestamp of the first input sample is
+ * 0, the timestamp of the first output packet will be
+ * -initial_padding.
+ */
+ int initial_padding;
+
+ /**
+ * - decoding: For codecs that store a framerate value in the compressed
+ * bitstream, the decoder may export it here. { 0, 1} when
+ * unknown.
+ * - encoding: unused
+ */
+ AVRational framerate;
+
+ /**
+ * Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx.
+ * - encoding: unused.
+ * - decoding: Set by libavcodec before calling get_format()
+ */
+ enum AVPixelFormat sw_pix_fmt;
+
+ /**
+ * Timebase in which pkt_dts/pts and AVPacket.dts/pts are.
+ * Code outside libavcodec should access this field using:
+ * av_codec_{get,set}_pkt_timebase(avctx)
+ * - encoding unused.
+ * - decoding set by user.
+ */
+ AVRational pkt_timebase;
+
+ /**
+ * AVCodecDescriptor
+ * Code outside libavcodec should access this field using:
+ * av_codec_{get,set}_codec_descriptor(avctx)
+ * - encoding: unused.
+ * - decoding: set by libavcodec.
+ */
+ const AVCodecDescriptor *codec_descriptor;
+
+#if !FF_API_LOWRES
+ /**
+ * low resolution decoding, 1-> 1/2 size, 2->1/4 size
+ * - encoding: unused
+ * - decoding: Set by user.
+ * Code outside libavcodec should access this field using:
+ * av_codec_{get,set}_lowres(avctx)
+ */
+ int lowres;
+#endif
+
+ /**
+ * Current statistics for PTS correction.
+ * - decoding: maintained and used by libavcodec, not intended to be used by user apps
+ * - encoding: unused
+ */
+ int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far
+ int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far
+ int64_t pts_correction_last_pts; /// PTS of the last frame
+ int64_t pts_correction_last_dts; /// DTS of the last frame
+
+ /**
+ * Character encoding of the input subtitles file.
+ * - decoding: set by user
+ * - encoding: unused
+ */
+ char *sub_charenc;
+
+ /**
+ * Subtitles character encoding mode. Formats or codecs might be adjusting
+ * this setting (if they are doing the conversion themselves for instance).
+ * - decoding: set by libavcodec
+ * - encoding: unused
+ */
+ int sub_charenc_mode;
+#define FF_SUB_CHARENC_MODE_DO_NOTHING -1 ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance)
+#define FF_SUB_CHARENC_MODE_AUTOMATIC 0 ///< libavcodec will select the mode itself
+#define FF_SUB_CHARENC_MODE_PRE_DECODER 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv
+
+ /**
+ * Skip processing alpha if supported by codec.
+ * Note that if the format uses pre-multiplied alpha (common with VP6,
+ * and recommended due to better video quality/compression)
+ * the image will look as if alpha-blended onto a black background.
+ * However for formats that do not use pre-multiplied alpha
+ * there might be serious artefacts (though e.g. libswscale currently
+ * assumes pre-multiplied alpha anyway).
+ * Code outside libavcodec should access this field using AVOptions
+ *
+ * - decoding: set by user
+ * - encoding: unused
+ */
+ int skip_alpha;
+
+ /**
+ * Number of samples to skip after a discontinuity
+ * - decoding: unused
+ * - encoding: set by libavcodec
+ */
+ int seek_preroll;
+
+#if !FF_API_DEBUG_MV
+ /**
+ * debug motion vectors
+ * Code outside libavcodec should access this field using AVOptions
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ int debug_mv;
+#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames
+#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames
+#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames
+#endif
+
+ /**
+ * custom intra quantization matrix
+ * Code outside libavcodec should access this field using av_codec_g/set_chroma_intra_matrix()
+ * - encoding: Set by user, can be NULL.
+ * - decoding: unused.
+ */
+ uint16_t *chroma_intra_matrix;
+
+ /**
+ * dump format separator.
+ * can be ", " or "\n " or anything else
+ * Code outside libavcodec should access this field using AVOptions
+ * (NO direct access).
+ * - encoding: Set by user.
+ * - decoding: Set by user.
+ */
+ uint8_t *dump_separator;
+
+ /**
+ * ',' separated list of allowed decoders.
+ * If NULL then all are allowed
+ * - encoding: unused
+ * - decoding: set by user through AVOPtions (NO direct access)
+ */
+ char *codec_whitelist;
+} AVCodecContext;
+
+AVRational av_codec_get_pkt_timebase (const AVCodecContext *avctx);
+void av_codec_set_pkt_timebase (AVCodecContext *avctx, AVRational val);
+
+const AVCodecDescriptor *av_codec_get_codec_descriptor(const AVCodecContext *avctx);
+void av_codec_set_codec_descriptor(AVCodecContext *avctx, const AVCodecDescriptor *desc);
+
+int av_codec_get_lowres(const AVCodecContext *avctx);
+void av_codec_set_lowres(AVCodecContext *avctx, int val);
+
+int av_codec_get_seek_preroll(const AVCodecContext *avctx);
+void av_codec_set_seek_preroll(AVCodecContext *avctx, int val);
+
+uint16_t *av_codec_get_chroma_intra_matrix(const AVCodecContext *avctx);
+void av_codec_set_chroma_intra_matrix(AVCodecContext *avctx, uint16_t *val);
+
+/**
+ * AVProfile.
+ */
+typedef struct AVProfile {
+ int profile;
+ const char *name; ///< short name for the profile
+} AVProfile;
+
+typedef struct AVCodecDefault AVCodecDefault;
+
+struct AVSubtitle;
+
+/**
+ * AVCodec.
+ */
+typedef struct AVCodec {
+ /**
+ * Name of the codec implementation.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ * This is the primary way to find a codec from the user perspective.
+ */
+ const char *name;
+ /**
+ * Descriptive name for the codec, meant to be more human readable than name.
+ * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
+ */
+ const char *long_name;
+ enum AVMediaType type;
+ enum AVCodecID id;
+ /**
+ * Codec capabilities.
+ * see CODEC_CAP_*
+ */
+ int capabilities;
+ const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0}
+ const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1
+ const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0
+ const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1
+ const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0
+#if FF_API_LOWRES
+ uint8_t max_lowres; ///< maximum value for lowres supported by the decoder, no direct access, use av_codec_get_max_lowres()
+#endif
+ const AVClass *priv_class; ///< AVClass for the private context
+ const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN}
+
+ /*****************************************************************
+ * No fields below this line are part of the public API. They
+ * may not be used outside of libavcodec and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+ int priv_data_size;
+ struct AVCodec *next;
+ /**
+ * @name Frame-level threading support functions
+ * @{
+ */
+ /**
+ * If defined, called on thread contexts when they are created.
+ * If the codec allocates writable tables in init(), re-allocate them here.
+ * priv_data will be set to a copy of the original.
+ */
+ int (*init_thread_copy)(AVCodecContext *);
+ /**
+ * Copy necessary context variables from a previous thread context to the current one.
+ * If not defined, the next thread will start automatically; otherwise, the codec
+ * must call ff_thread_finish_setup().
+ *
+ * dst and src will (rarely) point to the same context, in which case memcpy should be skipped.
+ */
+ int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src);
+ /** @} */
+
+ /**
+ * Private codec-specific defaults.
+ */
+ const AVCodecDefault *defaults;
+
+ /**
+ * Initialize codec static data, called from avcodec_register().
+ */
+ void (*init_static_data)(struct AVCodec *codec);
+
+ int (*init)(AVCodecContext *);
+ int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size,
+ const struct AVSubtitle *sub);
+ /**
+ * Encode data to an AVPacket.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket (may contain a user-provided buffer)
+ * @param[in] frame AVFrame containing the raw data to be encoded
+ * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
+ * non-empty packet was returned in avpkt.
+ * @return 0 on success, negative error code on failure
+ */
+ int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame,
+ int *got_packet_ptr);
+ int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt);
+ int (*close)(AVCodecContext *);
+ /**
+ * Flush buffers.
+ * Will be called when seeking
+ */
+ void (*flush)(AVCodecContext *);
+} AVCodec;
+
+int av_codec_get_max_lowres(const AVCodec *codec);
+
+struct MpegEncContext;
+
+/**
+ * @defgroup lavc_hwaccel AVHWAccel
+ * @{
+ */
+typedef struct AVHWAccel {
+ /**
+ * Name of the hardware accelerated codec.
+ * The name is globally unique among encoders and among decoders (but an
+ * encoder and a decoder can share the same name).
+ */
+ const char *name;
+
+ /**
+ * Type of codec implemented by the hardware accelerator.
+ *
+ * See AVMEDIA_TYPE_xxx
+ */
+ enum AVMediaType type;
+
+ /**
+ * Codec implemented by the hardware accelerator.
+ *
+ * See AV_CODEC_ID_xxx
+ */
+ enum AVCodecID id;
+
+ /**
+ * Supported pixel format.
+ *
+ * Only hardware accelerated formats are supported here.
+ */
+ enum AVPixelFormat pix_fmt;
+
+ /**
+ * Hardware accelerated codec capabilities.
+ * see FF_HWACCEL_CODEC_CAP_*
+ */
+ int capabilities;
+
+ /*****************************************************************
+ * No fields below this line are part of the public API. They
+ * may not be used outside of libavcodec and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+ struct AVHWAccel *next;
+
+ /**
+ * Allocate a custom buffer
+ */
+ int (*alloc_frame)(AVCodecContext *avctx, AVFrame *frame);
+
+ /**
+ * Called at the beginning of each frame or field picture.
+ *
+ * Meaningful frame information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ *
+ * Note that buf can be NULL along with buf_size set to 0.
+ * Otherwise, this means the whole frame is available at this point.
+ *
+ * @param avctx the codec context
+ * @param buf the frame data buffer base
+ * @param buf_size the size of the frame in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+ /**
+ * Callback for each slice.
+ *
+ * Meaningful slice information (codec specific) is guaranteed to
+ * be parsed at this point. This function is mandatory.
+ * The only exception is XvMC, that works on MB level.
+ *
+ * @param avctx the codec context
+ * @param buf the slice data buffer base
+ * @param buf_size the size of the slice in bytes
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size);
+
+ /**
+ * Called at the end of each frame or field picture.
+ *
+ * The whole picture is parsed at this point and can now be sent
+ * to the hardware accelerator. This function is mandatory.
+ *
+ * @param avctx the codec context
+ * @return zero if successful, a negative value otherwise
+ */
+ int (*end_frame)(AVCodecContext *avctx);
+
+ /**
+ * Size of per-frame hardware accelerator private data.
+ *
+ * Private data is allocated with av_mallocz() before
+ * AVCodecContext.get_buffer() and deallocated after
+ * AVCodecContext.release_buffer().
+ */
+ int frame_priv_data_size;
+
+ /**
+ * Called for every Macroblock in a slice.
+ *
+ * XvMC uses it to replace the ff_mpv_decode_mb().
+ * Instead of decoding to raw picture, MB parameters are
+ * stored in an array provided by the video driver.
+ *
+ * @param s the mpeg context
+ */
+ void (*decode_mb)(struct MpegEncContext *s);
+
+ /**
+ * Initialize the hwaccel private data.
+ *
+ * This will be called from ff_get_format(), after hwaccel and
+ * hwaccel_context are set and the hwaccel private data in AVCodecInternal
+ * is allocated.
+ */
+ int (*init)(AVCodecContext *avctx);
+
+ /**
+ * Uninitialize the hwaccel private data.
+ *
+ * This will be called from get_format() or avcodec_close(), after hwaccel
+ * and hwaccel_context are already uninitialized.
+ */
+ int (*uninit)(AVCodecContext *avctx);
+
+ /**
+ * Size of the private data to allocate in
+ * AVCodecInternal.hwaccel_priv_data.
+ */
+ int priv_data_size;
+} AVHWAccel;
+
+/**
+ * Hardware acceleration should be used for decoding even if the codec level
+ * used is unknown or higher than the maximum supported level reported by the
+ * hardware driver.
+ */
+#define AV_HWACCEL_FLAG_IGNORE_LEVEL (1 << 0)
+
+/**
+ * Hardware acceleration can output YUV pixel formats with a different chroma
+ * sampling than 4:2:0 and/or other than 8 bits per component.
+ */
+#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH (1 << 1)
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavc_picture AVPicture
+ *
+ * Functions for working with AVPicture
+ * @{
+ */
+
+/**
+ * Picture data structure.
+ *
+ * Up to four components can be stored into it, the last component is
+ * alpha.
+ */
+typedef struct AVPicture {
+ uint8_t *data[AV_NUM_DATA_POINTERS]; ///< pointers to the image data planes
+ int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line
+} AVPicture;
+
+/**
+ * @}
+ */
+
+enum AVSubtitleType {
+ SUBTITLE_NONE,
+
+ SUBTITLE_BITMAP, ///< A bitmap, pict will be set
+
+ /**
+ * Plain text, the text field must be set by the decoder and is
+ * authoritative. ass and pict fields may contain approximations.
+ */
+ SUBTITLE_TEXT,
+
+ /**
+ * Formatted text, the ass field must be set by the decoder and is
+ * authoritative. pict and text fields may contain approximations.
+ */
+ SUBTITLE_ASS,
+};
+
+#define AV_SUBTITLE_FLAG_FORCED 0x00000001
+
+typedef struct AVSubtitleRect {
+ int x; ///< top left corner of pict, undefined when pict is not set
+ int y; ///< top left corner of pict, undefined when pict is not set
+ int w; ///< width of pict, undefined when pict is not set
+ int h; ///< height of pict, undefined when pict is not set
+ int nb_colors; ///< number of colors in pict, undefined when pict is not set
+
+ /**
+ * data+linesize for the bitmap of this subtitle.
+ * can be set for text/ass as well once they are rendered
+ */
+ AVPicture pict;
+ enum AVSubtitleType type;
+
+ char *text; ///< 0 terminated plain UTF-8 text
+
+ /**
+ * 0 terminated ASS/SSA compatible event line.
+ * The presentation of this is unaffected by the other values in this
+ * struct.
+ */
+ char *ass;
+
+ int flags;
+} AVSubtitleRect;
+
+typedef struct AVSubtitle {
+ uint16_t format; /* 0 = graphics */
+ uint32_t start_display_time; /* relative to packet pts, in ms */
+ uint32_t end_display_time; /* relative to packet pts, in ms */
+ unsigned num_rects;
+ AVSubtitleRect **rects;
+ int64_t pts; ///< Same as packet pts, in AV_TIME_BASE
+} AVSubtitle;
+
+/**
+ * If c is NULL, returns the first registered codec,
+ * if c is non-NULL, returns the next registered codec after c,
+ * or NULL if c is the last one.
+ */
+AVCodec *av_codec_next(const AVCodec *c);
+
+/**
+ * Return the LIBAVCODEC_VERSION_INT constant.
+ */
+unsigned avcodec_version(void);
+
+/**
+ * Return the libavcodec build-time configuration.
+ */
+const char *avcodec_configuration(void);
+
+/**
+ * Return the libavcodec license.
+ */
+const char *avcodec_license(void);
+
+/**
+ * Register the codec codec and initialize libavcodec.
+ *
+ * @warning either this function or avcodec_register_all() must be called
+ * before any other libavcodec functions.
+ *
+ * @see avcodec_register_all()
+ */
+void avcodec_register(AVCodec *codec);
+
+/**
+ * Register all the codecs, parsers and bitstream filters which were enabled at
+ * configuration time. If you do not call this function you can select exactly
+ * which formats you want to support, by using the individual registration
+ * functions.
+ *
+ * @see avcodec_register
+ * @see av_register_codec_parser
+ * @see av_register_bitstream_filter
+ */
+void avcodec_register_all(void);
+
+/**
+ * Allocate an AVCodecContext and set its fields to default values. The
+ * resulting struct should be freed with avcodec_free_context().
+ *
+ * @param codec if non-NULL, allocate private data and initialize defaults
+ * for the given codec. It is illegal to then call avcodec_open2()
+ * with a different codec.
+ * If NULL, then the codec-specific defaults won't be initialized,
+ * which may result in suboptimal default settings (this is
+ * important mainly for encoders, e.g. libx264).
+ *
+ * @return An AVCodecContext filled with default values or NULL on failure.
+ * @see avcodec_get_context_defaults
+ */
+AVCodecContext *avcodec_alloc_context3(const AVCodec *codec);
+
+/**
+ * Free the codec context and everything associated with it and write NULL to
+ * the provided pointer.
+ */
+void avcodec_free_context(AVCodecContext **avctx);
+
+/**
+ * Set the fields of the given AVCodecContext to default values corresponding
+ * to the given codec (defaults may be codec-dependent).
+ *
+ * Do not call this function if a non-NULL codec has been passed
+ * to avcodec_alloc_context3() that allocated this AVCodecContext.
+ * If codec is non-NULL, it is illegal to call avcodec_open2() with a
+ * different codec on this AVCodecContext.
+ */
+int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec);
+
+/**
+ * Get the AVClass for AVCodecContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avcodec_get_class(void);
+
+/**
+ * Get the AVClass for AVFrame. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avcodec_get_frame_class(void);
+
+/**
+ * Get the AVClass for AVSubtitleRect. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avcodec_get_subtitle_rect_class(void);
+
+/**
+ * Copy the settings of the source AVCodecContext into the destination
+ * AVCodecContext. The resulting destination codec context will be
+ * unopened, i.e. you are required to call avcodec_open2() before you
+ * can use this AVCodecContext to decode/encode video/audio data.
+ *
+ * @param dest target codec context, should be initialized with
+ * avcodec_alloc_context3(NULL), but otherwise uninitialized
+ * @param src source codec context
+ * @return AVERROR() on error (e.g. memory allocation error), 0 on success
+ */
+int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src);
+
+#if FF_API_AVFRAME_LAVC
+/**
+ * @deprecated use av_frame_alloc()
+ */
+attribute_deprecated
+AVFrame *avcodec_alloc_frame(void);
+
+/**
+ * Set the fields of the given AVFrame to default values.
+ *
+ * @param frame The AVFrame of which the fields should be set to default values.
+ *
+ * @deprecated use av_frame_unref()
+ */
+attribute_deprecated
+void avcodec_get_frame_defaults(AVFrame *frame);
+
+/**
+ * Free the frame and any dynamically allocated objects in it,
+ * e.g. extended_data.
+ *
+ * @param frame frame to be freed. The pointer will be set to NULL.
+ *
+ * @warning this function does NOT free the data buffers themselves
+ * (it does not know how, since they might have been allocated with
+ * a custom get_buffer()).
+ *
+ * @deprecated use av_frame_free()
+ */
+attribute_deprecated
+void avcodec_free_frame(AVFrame **frame);
+#endif
+
+/**
+ * Initialize the AVCodecContext to use the given AVCodec. Prior to using this
+ * function the context has to be allocated with avcodec_alloc_context3().
+ *
+ * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(),
+ * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for
+ * retrieving a codec.
+ *
+ * @warning This function is not thread safe!
+ *
+ * @code
+ * avcodec_register_all();
+ * av_dict_set(&opts, "b", "2.5M", 0);
+ * codec = avcodec_find_decoder(AV_CODEC_ID_H264);
+ * if (!codec)
+ * exit(1);
+ *
+ * context = avcodec_alloc_context3(codec);
+ *
+ * if (avcodec_open2(context, codec, opts) < 0)
+ * exit(1);
+ * @endcode
+ *
+ * @param avctx The context to initialize.
+ * @param codec The codec to open this context for. If a non-NULL codec has been
+ * previously passed to avcodec_alloc_context3() or
+ * avcodec_get_context_defaults3() for this context, then this
+ * parameter MUST be either NULL or equal to the previously passed
+ * codec.
+ * @param options A dictionary filled with AVCodecContext and codec-private options.
+ * On return this object will be filled with options that were not found.
+ *
+ * @return zero on success, a negative value on error
+ * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(),
+ * av_dict_set(), av_opt_find().
+ */
+int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options);
+
+/**
+ * Close a given AVCodecContext and free all the data associated with it
+ * (but not the AVCodecContext itself).
+ *
+ * Calling this function on an AVCodecContext that hasn't been opened will free
+ * the codec-specific data allocated in avcodec_alloc_context3() /
+ * avcodec_get_context_defaults3() with a non-NULL codec. Subsequent calls will
+ * do nothing.
+ */
+int avcodec_close(AVCodecContext *avctx);
+
+/**
+ * Free all allocated data in the given subtitle struct.
+ *
+ * @param sub AVSubtitle to free.
+ */
+void avsubtitle_free(AVSubtitle *sub);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_packet
+ * @{
+ */
+
+#if FF_API_DESTRUCT_PACKET
+/**
+ * Default packet destructor.
+ * @deprecated use the AVBuffer API instead
+ */
+attribute_deprecated
+void av_destruct_packet(AVPacket *pkt);
+#endif
+
+/**
+ * Initialize optional fields of a packet with default values.
+ *
+ * Note, this does not touch the data and size members, which have to be
+ * initialized separately.
+ *
+ * @param pkt packet
+ */
+void av_init_packet(AVPacket *pkt);
+
+/**
+ * Allocate the payload of a packet and initialize its fields with
+ * default values.
+ *
+ * @param pkt packet
+ * @param size wanted payload size
+ * @return 0 if OK, AVERROR_xxx otherwise
+ */
+int av_new_packet(AVPacket *pkt, int size);
+
+/**
+ * Reduce packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param size new size
+ */
+void av_shrink_packet(AVPacket *pkt, int size);
+
+/**
+ * Increase packet size, correctly zeroing padding
+ *
+ * @param pkt packet
+ * @param grow_by number of bytes by which to increase the size of the packet
+ */
+int av_grow_packet(AVPacket *pkt, int grow_by);
+
+/**
+ * Initialize a reference-counted packet from av_malloc()ed data.
+ *
+ * @param pkt packet to be initialized. This function will set the data, size,
+ * buf and destruct fields, all others are left untouched.
+ * @param data Data allocated by av_malloc() to be used as packet data. If this
+ * function returns successfully, the data is owned by the underlying AVBuffer.
+ * The caller may not access the data through other means.
+ * @param size size of data in bytes, without the padding. I.e. the full buffer
+ * size is assumed to be size + FF_INPUT_BUFFER_PADDING_SIZE.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size);
+
+/**
+ * @warning This is a hack - the packet memory allocation stuff is broken. The
+ * packet is allocated if it was not really allocated.
+ */
+int av_dup_packet(AVPacket *pkt);
+
+/**
+ * Copy packet, including contents
+ *
+ * @return 0 on success, negative AVERROR on fail
+ */
+int av_copy_packet(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Copy packet side data
+ *
+ * @return 0 on success, negative AVERROR on fail
+ */
+int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Free a packet.
+ *
+ * @param pkt packet to free
+ */
+void av_free_packet(AVPacket *pkt);
+
+/**
+ * Allocate new information of a packet.
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size side information size
+ * @return pointer to fresh allocated data or NULL otherwise
+ */
+uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int size);
+
+/**
+ * Shrink the already allocated side data buffer
+ *
+ * @param pkt packet
+ * @param type side information type
+ * @param size new side information size
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int size);
+
+/**
+ * Get side information from packet.
+ *
+ * @param pkt packet
+ * @param type desired side information type
+ * @param size pointer for side information size to store (optional)
+ * @return pointer to data if present or NULL otherwise
+ */
+uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type,
+ int *size);
+
+int av_packet_merge_side_data(AVPacket *pkt);
+
+int av_packet_split_side_data(AVPacket *pkt);
+
+/**
+ * Pack a dictionary for use in side_data.
+ *
+ * @param dict The dictionary to pack.
+ * @param size pointer to store the size of the returned data
+ * @return pointer to data if successful, NULL otherwise
+ */
+uint8_t *av_packet_pack_dictionary(AVDictionary *dict, int *size);
+/**
+ * Unpack a dictionary from side_data.
+ *
+ * @param data data from side_data
+ * @param size size of the data
+ * @param dict the metadata storage dictionary
+ * @return 0 on success, < 0 on failure
+ */
+int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict);
+
+
+/**
+ * Convenience function to free all the side data stored.
+ * All the other fields stay untouched.
+ *
+ * @param pkt packet
+ */
+void av_packet_free_side_data(AVPacket *pkt);
+
+/**
+ * Setup a new reference to the data described by a given packet
+ *
+ * If src is reference-counted, setup dst as a new reference to the
+ * buffer in src. Otherwise allocate a new buffer in dst and copy the
+ * data from src into it.
+ *
+ * All the other fields are copied from src.
+ *
+ * @see av_packet_unref
+ *
+ * @param dst Destination packet
+ * @param src Source packet
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ */
+int av_packet_ref(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Wipe the packet.
+ *
+ * Unreference the buffer referenced by the packet and reset the
+ * remaining packet fields to their default values.
+ *
+ * @param pkt The packet to be unreferenced.
+ */
+void av_packet_unref(AVPacket *pkt);
+
+/**
+ * Move every field in src to dst and reset src.
+ *
+ * @see av_packet_unref
+ *
+ * @param src Source packet, will be reset
+ * @param dst Destination packet
+ */
+void av_packet_move_ref(AVPacket *dst, AVPacket *src);
+
+/**
+ * Copy only "properties" fields from src to dst.
+ *
+ * Properties for the purpose of this function are all the fields
+ * beside those related to the packet data (buf, data, size)
+ *
+ * @param dst Destination packet
+ * @param src Source packet
+ *
+ * @return 0 on success AVERROR on failure.
+ *
+ */
+int av_packet_copy_props(AVPacket *dst, const AVPacket *src);
+
+/**
+ * Convert valid timing fields (timestamps / durations) in a packet from one
+ * timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be
+ * ignored.
+ *
+ * @param pkt packet on which the conversion will be performed
+ * @param tb_src source timebase, in which the timing fields in pkt are
+ * expressed
+ * @param tb_dst destination timebase, to which the timing fields will be
+ * converted
+ */
+void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_decoding
+ * @{
+ */
+
+/**
+ * Find a registered decoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_decoder(enum AVCodecID id);
+
+/**
+ * Find a registered decoder with the specified name.
+ *
+ * @param name name of the requested decoder
+ * @return A decoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_decoder_by_name(const char *name);
+
+#if FF_API_GET_BUFFER
+attribute_deprecated int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic);
+attribute_deprecated void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic);
+attribute_deprecated int avcodec_default_reget_buffer(AVCodecContext *s, AVFrame *pic);
+#endif
+
+/**
+ * The default callback for AVCodecContext.get_buffer2(). It is made public so
+ * it can be called by custom get_buffer2() implementations for decoders without
+ * CODEC_CAP_DR1 set.
+ */
+int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags);
+
+#if FF_API_EMU_EDGE
+/**
+ * Return the amount of padding in pixels which the get_buffer callback must
+ * provide around the edge of the image for codecs which do not have the
+ * CODEC_FLAG_EMU_EDGE flag.
+ *
+ * @return Required padding in pixels.
+ *
+ * @deprecated CODEC_FLAG_EMU_EDGE is deprecated, so this function is no longer
+ * needed
+ */
+attribute_deprecated
+unsigned avcodec_get_edge_width(void);
+#endif
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you do not use any horizontal
+ * padding.
+ *
+ * May only be used if a codec with CODEC_CAP_DR1 has been opened.
+ */
+void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height);
+
+/**
+ * Modify width and height values so that they will result in a memory
+ * buffer that is acceptable for the codec if you also ensure that all
+ * line sizes are a multiple of the respective linesize_align[i].
+ *
+ * May only be used if a codec with CODEC_CAP_DR1 has been opened.
+ */
+void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height,
+ int linesize_align[AV_NUM_DATA_POINTERS]);
+
+/**
+ * Converts AVChromaLocation to swscale x/y chroma position.
+ *
+ * The positions represent the chroma (0,0) position in a coordinates system
+ * with luma (0,0) representing the origin and luma(1,1) representing 256,256
+ *
+ * @param xpos horizontal chroma sample position
+ * @param ypos vertical chroma sample position
+ */
+int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos);
+
+/**
+ * Converts swscale x/y chroma position to AVChromaLocation.
+ *
+ * The positions represent the chroma (0,0) position in a coordinates system
+ * with luma (0,0) representing the origin and luma(1,1) representing 256,256
+ *
+ * @param xpos horizontal chroma sample position
+ * @param ypos vertical chroma sample position
+ */
+enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos);
+
+#if FF_API_OLD_DECODE_AUDIO
+/**
+ * Wrapper function which calls avcodec_decode_audio4.
+ *
+ * @deprecated Use avcodec_decode_audio4 instead.
+ *
+ * Decode the audio frame of size avpkt->size from avpkt->data into samples.
+ * Some decoders may support multiple frames in a single AVPacket, such
+ * decoders would then just decode the first frame. In this case,
+ * avcodec_decode_audio3 has to be called again with an AVPacket that contains
+ * the remaining data in order to decode the second frame etc.
+ * If no frame
+ * could be outputted, frame_size_ptr is zero. Otherwise, it is the
+ * decompressed frame size in bytes.
+ *
+ * @warning You must set frame_size_ptr to the allocated size of the
+ * output buffer before calling avcodec_decode_audio3().
+ *
+ * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than
+ * the actual read bytes because some optimized bitstream readers read 32 or 64
+ * bits at once and could read over the end.
+ *
+ * @warning The end of the input buffer avpkt->data should be set to 0 to ensure that
+ * no overreading happens for damaged MPEG streams.
+ *
+ * @warning You must not provide a custom get_buffer() when using
+ * avcodec_decode_audio3(). Doing so will override it with
+ * avcodec_default_get_buffer. Use avcodec_decode_audio4() instead,
+ * which does allow the application to provide a custom get_buffer().
+ *
+ * @note You might have to align the input buffer avpkt->data and output buffer
+ * samples. The alignment requirements depend on the CPU: On some CPUs it isn't
+ * necessary at all, on others it won't work at all if not aligned and on others
+ * it will work but it will have an impact on performance.
+ *
+ * In practice, avpkt->data should have 4 byte alignment at minimum and
+ * samples should be 16 byte aligned unless the CPU doesn't need it
+ * (AltiVec and SSE do).
+ *
+ * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay
+ * between input and output, these need to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to return the remaining frames.
+ *
+ * @param avctx the codec context
+ * @param[out] samples the output buffer, sample type in avctx->sample_fmt
+ * If the sample format is planar, each channel plane will
+ * be the same size, with no padding between channels.
+ * @param[in,out] frame_size_ptr the output buffer size in bytes
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ * You can create such packet with av_init_packet() and by then setting
+ * data and size, some decoders might in addition need other fields.
+ * All decoders are designed to use the least fields possible though.
+ * @return On error a negative value is returned, otherwise the number of bytes
+ * used or zero if no frame data was decompressed (used) from the input AVPacket.
+ */
+attribute_deprecated int avcodec_decode_audio3(AVCodecContext *avctx, int16_t *samples,
+ int *frame_size_ptr,
+ AVPacket *avpkt);
+#endif
+
+/**
+ * Decode the audio frame of size avpkt->size from avpkt->data into frame.
+ *
+ * Some decoders may support multiple frames in a single AVPacket. Such
+ * decoders would then just decode the first frame and the return value would be
+ * less than the packet size. In this case, avcodec_decode_audio4 has to be
+ * called again with an AVPacket containing the remaining data in order to
+ * decode the second frame, etc... Even if no frames are returned, the packet
+ * needs to be fed to the decoder with remaining data until it is completely
+ * consumed or an error occurs.
+ *
+ * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input
+ * and output. This means that for some packets they will not immediately
+ * produce decoded output and need to be flushed at the end of decoding to get
+ * all the decoded data. Flushing is done by calling this function with packets
+ * with avpkt->data set to NULL and avpkt->size set to 0 until it stops
+ * returning samples. It is safe to flush even those decoders that are not
+ * marked with CODEC_CAP_DELAY, then no samples will be returned.
+ *
+ * @warning The input buffer, avpkt->data must be FF_INPUT_BUFFER_PADDING_SIZE
+ * larger than the actual read bytes because some optimized bitstream
+ * readers read 32 or 64 bits at once and could read over the end.
+ *
+ * @param avctx the codec context
+ * @param[out] frame The AVFrame in which to store decoded audio samples.
+ * The decoder will allocate a buffer for the decoded frame by
+ * calling the AVCodecContext.get_buffer2() callback.
+ * When AVCodecContext.refcounted_frames is set to 1, the frame is
+ * reference counted and the returned reference belongs to the
+ * caller. The caller must release the frame using av_frame_unref()
+ * when the frame is no longer needed. The caller may safely write
+ * to the frame if av_frame_is_writable() returns 1.
+ * When AVCodecContext.refcounted_frames is set to 0, the returned
+ * reference belongs to the decoder and is valid only until the
+ * next call to this function or until closing or flushing the
+ * decoder. The caller may not write to it.
+ * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is
+ * non-zero. Note that this field being set to zero
+ * does not mean that an error has occurred. For
+ * decoders with CODEC_CAP_DELAY set, no given decode
+ * call is guaranteed to produce a frame.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ * At least avpkt->data and avpkt->size should be set. Some
+ * decoders might also require additional fields to be set.
+ * @return A negative error code is returned if an error occurred during
+ * decoding, otherwise the number of bytes consumed from the input
+ * AVPacket is returned.
+ */
+int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame,
+ int *got_frame_ptr, const AVPacket *avpkt);
+
+/**
+ * Decode the video frame of size avpkt->size from avpkt->data into picture.
+ * Some decoders may support multiple frames in a single AVPacket, such
+ * decoders would then just decode the first frame.
+ *
+ * @warning The input buffer must be FF_INPUT_BUFFER_PADDING_SIZE larger than
+ * the actual read bytes because some optimized bitstream readers read 32 or 64
+ * bits at once and could read over the end.
+ *
+ * @warning The end of the input buffer buf should be set to 0 to ensure that
+ * no overreading happens for damaged MPEG streams.
+ *
+ * @note Codecs which have the CODEC_CAP_DELAY capability set have a delay
+ * between input and output, these need to be fed with avpkt->data=NULL,
+ * avpkt->size=0 at the end to return the remaining frames.
+ *
+ * @param avctx the codec context
+ * @param[out] picture The AVFrame in which the decoded video frame will be stored.
+ * Use av_frame_alloc() to get an AVFrame. The codec will
+ * allocate memory for the actual bitmap by calling the
+ * AVCodecContext.get_buffer2() callback.
+ * When AVCodecContext.refcounted_frames is set to 1, the frame is
+ * reference counted and the returned reference belongs to the
+ * caller. The caller must release the frame using av_frame_unref()
+ * when the frame is no longer needed. The caller may safely write
+ * to the frame if av_frame_is_writable() returns 1.
+ * When AVCodecContext.refcounted_frames is set to 0, the returned
+ * reference belongs to the decoder and is valid only until the
+ * next call to this function or until closing or flushing the
+ * decoder. The caller may not write to it.
+ *
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ * You can create such packet with av_init_packet() and by then setting
+ * data and size, some decoders might in addition need other fields like
+ * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least
+ * fields possible.
+ * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero.
+ * @return On error a negative value is returned, otherwise the number of bytes
+ * used or zero if no frame could be decompressed.
+ */
+int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture,
+ int *got_picture_ptr,
+ const AVPacket *avpkt);
+
+/**
+ * Decode a subtitle message.
+ * Return a negative value on error, otherwise return the number of bytes used.
+ * If no subtitle could be decompressed, got_sub_ptr is zero.
+ * Otherwise, the subtitle is stored in *sub.
+ * Note that CODEC_CAP_DR1 is not available for subtitle codecs. This is for
+ * simplicity, because the performance difference is expect to be negligible
+ * and reusing a get_buffer written for video codecs would probably perform badly
+ * due to a potentially very different allocation pattern.
+ *
+ * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input
+ * and output. This means that for some packets they will not immediately
+ * produce decoded output and need to be flushed at the end of decoding to get
+ * all the decoded data. Flushing is done by calling this function with packets
+ * with avpkt->data set to NULL and avpkt->size set to 0 until it stops
+ * returning subtitles. It is safe to flush even those decoders that are not
+ * marked with CODEC_CAP_DELAY, then no subtitles will be returned.
+ *
+ * @param avctx the codec context
+ * @param[out] sub The Preallocated AVSubtitle in which the decoded subtitle will be stored,
+ * must be freed with avsubtitle_free if *got_sub_ptr is set.
+ * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero.
+ * @param[in] avpkt The input AVPacket containing the input buffer.
+ */
+int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
+ int *got_sub_ptr,
+ AVPacket *avpkt);
+
+/**
+ * @defgroup lavc_parsing Frame parsing
+ * @{
+ */
+
+enum AVPictureStructure {
+ AV_PICTURE_STRUCTURE_UNKNOWN, //< unknown
+ AV_PICTURE_STRUCTURE_TOP_FIELD, //< coded as top field
+ AV_PICTURE_STRUCTURE_BOTTOM_FIELD, //< coded as bottom field
+ AV_PICTURE_STRUCTURE_FRAME, //< coded as frame
+};
+
+typedef struct AVCodecParserContext {
+ void *priv_data;
+ struct AVCodecParser *parser;
+ int64_t frame_offset; /* offset of the current frame */
+ int64_t cur_offset; /* current offset
+ (incremented by each av_parser_parse()) */
+ int64_t next_frame_offset; /* offset of the next frame */
+ /* video info */
+ int pict_type; /* XXX: Put it back in AVCodecContext. */
+ /**
+ * This field is used for proper frame duration computation in lavf.
+ * It signals, how much longer the frame duration of the current frame
+ * is compared to normal frame duration.
+ *
+ * frame_duration = (1 + repeat_pict) * time_base
+ *
+ * It is used by codecs like H.264 to display telecined material.
+ */
+ int repeat_pict; /* XXX: Put it back in AVCodecContext. */
+ int64_t pts; /* pts of the current frame */
+ int64_t dts; /* dts of the current frame */
+
+ /* private data */
+ int64_t last_pts;
+ int64_t last_dts;
+ int fetch_timestamp;
+
+#define AV_PARSER_PTS_NB 4
+ int cur_frame_start_index;
+ int64_t cur_frame_offset[AV_PARSER_PTS_NB];
+ int64_t cur_frame_pts[AV_PARSER_PTS_NB];
+ int64_t cur_frame_dts[AV_PARSER_PTS_NB];
+
+ int flags;
+#define PARSER_FLAG_COMPLETE_FRAMES 0x0001
+#define PARSER_FLAG_ONCE 0x0002
+/// Set if the parser has a valid file offset
+#define PARSER_FLAG_FETCHED_OFFSET 0x0004
+#define PARSER_FLAG_USE_CODEC_TS 0x1000
+
+ int64_t offset; ///< byte offset from starting packet start
+ int64_t cur_frame_end[AV_PARSER_PTS_NB];
+
+ /**
+ * Set by parser to 1 for key frames and 0 for non-key frames.
+ * It is initialized to -1, so if the parser doesn't set this flag,
+ * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames
+ * will be used.
+ */
+ int key_frame;
+
+ /**
+ * Time difference in stream time base units from the pts of this
+ * packet to the point at which the output from the decoder has converged
+ * independent from the availability of previous frames. That is, the
+ * frames are virtually identical no matter if decoding started from
+ * the very first frame or from this keyframe.
+ * Is AV_NOPTS_VALUE if unknown.
+ * This field is not the display duration of the current frame.
+ * This field has no meaning if the packet does not have AV_PKT_FLAG_KEY
+ * set.
+ *
+ * The purpose of this field is to allow seeking in streams that have no
+ * keyframes in the conventional sense. It corresponds to the
+ * recovery point SEI in H.264 and match_time_delta in NUT. It is also
+ * essential for some types of subtitle streams to ensure that all
+ * subtitles are correctly displayed after seeking.
+ */
+ int64_t convergence_duration;
+
+ // Timestamp generation support:
+ /**
+ * Synchronization point for start of timestamp generation.
+ *
+ * Set to >0 for sync point, 0 for no sync point and <0 for undefined
+ * (default).
+ *
+ * For example, this corresponds to presence of H.264 buffering period
+ * SEI message.
+ */
+ int dts_sync_point;
+
+ /**
+ * Offset of the current timestamp against last timestamp sync point in
+ * units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain a valid timestamp offset.
+ *
+ * Note that the timestamp of sync point has usually a nonzero
+ * dts_ref_dts_delta, which refers to the previous sync point. Offset of
+ * the next frame after timestamp sync point will be usually 1.
+ *
+ * For example, this corresponds to H.264 cpb_removal_delay.
+ */
+ int dts_ref_dts_delta;
+
+ /**
+ * Presentation delay of current frame in units of AVCodecContext.time_base.
+ *
+ * Set to INT_MIN when dts_sync_point unused. Otherwise, it must
+ * contain valid non-negative timestamp delta (presentation time of a frame
+ * must not lie in the past).
+ *
+ * This delay represents the difference between decoding and presentation
+ * time of the frame.
+ *
+ * For example, this corresponds to H.264 dpb_output_delay.
+ */
+ int pts_dts_delta;
+
+ /**
+ * Position of the packet in file.
+ *
+ * Analogous to cur_frame_pts/dts
+ */
+ int64_t cur_frame_pos[AV_PARSER_PTS_NB];
+
+ /**
+ * Byte position of currently parsed frame in stream.
+ */
+ int64_t pos;
+
+ /**
+ * Previous frame byte position.
+ */
+ int64_t last_pos;
+
+ /**
+ * Duration of the current frame.
+ * For audio, this is in units of 1 / AVCodecContext.sample_rate.
+ * For all other types, this is in units of AVCodecContext.time_base.
+ */
+ int duration;
+
+ enum AVFieldOrder field_order;
+
+ /**
+ * Indicate whether a picture is coded as a frame, top field or bottom field.
+ *
+ * For example, H.264 field_pic_flag equal to 0 corresponds to
+ * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag
+ * equal to 1 and bottom_field_flag equal to 0 corresponds to
+ * AV_PICTURE_STRUCTURE_TOP_FIELD.
+ */
+ enum AVPictureStructure picture_structure;
+
+ /**
+ * Picture number incremented in presentation or output order.
+ * This field may be reinitialized at the first picture of a new sequence.
+ *
+ * For example, this corresponds to H.264 PicOrderCnt.
+ */
+ int output_picture_number;
+} AVCodecParserContext;
+
+typedef struct AVCodecParser {
+ int codec_ids[5]; /* several codec IDs are permitted */
+ int priv_data_size;
+ int (*parser_init)(AVCodecParserContext *s);
+ /* This callback never returns an error, a negative value means that
+ * the frame start was in a previous packet. */
+ int (*parser_parse)(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ const uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size);
+ void (*parser_close)(AVCodecParserContext *s);
+ int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size);
+ struct AVCodecParser *next;
+} AVCodecParser;
+
+AVCodecParser *av_parser_next(const AVCodecParser *c);
+
+void av_register_codec_parser(AVCodecParser *parser);
+AVCodecParserContext *av_parser_init(int codec_id);
+
+/**
+ * Parse a packet.
+ *
+ * @param s parser context.
+ * @param avctx codec context.
+ * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished.
+ * @param poutbuf_size set to size of parsed buffer or zero if not yet finished.
+ * @param buf input buffer.
+ * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output).
+ * @param pts input presentation timestamp.
+ * @param dts input decoding timestamp.
+ * @param pos input byte position in stream.
+ * @return the number of bytes of the input bitstream used.
+ *
+ * Example:
+ * @code
+ * while(in_len){
+ * len = av_parser_parse2(myparser, AVCodecContext, &data, &size,
+ * in_data, in_len,
+ * pts, dts, pos);
+ * in_data += len;
+ * in_len -= len;
+ *
+ * if(size)
+ * decode_frame(data, size);
+ * }
+ * @endcode
+ */
+int av_parser_parse2(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size,
+ int64_t pts, int64_t dts,
+ int64_t pos);
+
+/**
+ * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed
+ * @deprecated use AVBitStreamFilter
+ */
+int av_parser_change(AVCodecParserContext *s,
+ AVCodecContext *avctx,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+void av_parser_close(AVCodecParserContext *s);
+
+/**
+ * @}
+ * @}
+ */
+
+/**
+ * @addtogroup lavc_encoding
+ * @{
+ */
+
+/**
+ * Find a registered encoder with a matching codec ID.
+ *
+ * @param id AVCodecID of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder(enum AVCodecID id);
+
+/**
+ * Find a registered encoder with the specified name.
+ *
+ * @param name name of the requested encoder
+ * @return An encoder if one was found, NULL otherwise.
+ */
+AVCodec *avcodec_find_encoder_by_name(const char *name);
+
+#if FF_API_OLD_ENCODE_AUDIO
+/**
+ * Encode an audio frame from samples into buf.
+ *
+ * @deprecated Use avcodec_encode_audio2 instead.
+ *
+ * @note The output buffer should be at least FF_MIN_BUFFER_SIZE bytes large.
+ * However, for codecs with avctx->frame_size equal to 0 (e.g. PCM) the user
+ * will know how much space is needed because it depends on the value passed
+ * in buf_size as described below. In that case a lower value can be used.
+ *
+ * @param avctx the codec context
+ * @param[out] buf the output buffer
+ * @param[in] buf_size the output buffer size
+ * @param[in] samples the input buffer containing the samples
+ * The number of samples read from this buffer is frame_size*channels,
+ * both of which are defined in avctx.
+ * For codecs which have avctx->frame_size equal to 0 (e.g. PCM) the number of
+ * samples read from samples is equal to:
+ * buf_size * 8 / (avctx->channels * av_get_bits_per_sample(avctx->codec_id))
+ * This also implies that av_get_bits_per_sample() must not return 0 for these
+ * codecs.
+ * @return On error a negative value is returned, on success zero or the number
+ * of bytes used to encode the data read from the input buffer.
+ */
+int attribute_deprecated avcodec_encode_audio(AVCodecContext *avctx,
+ uint8_t *buf, int buf_size,
+ const short *samples);
+#endif
+
+/**
+ * Encode a frame of audio.
+ *
+ * Takes input samples from frame and writes the next output packet, if
+ * available, to avpkt. The output packet does not necessarily contain data for
+ * the most recent frame, as encoders can delay, split, and combine input frames
+ * internally as needed.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket.
+ * The user can supply an output buffer by setting
+ * avpkt->data and avpkt->size prior to calling the
+ * function, but if the size of the user-provided data is not
+ * large enough, encoding will fail. If avpkt->data and
+ * avpkt->size are set, avpkt->destruct must also be set. All
+ * other AVPacket fields will be reset by the encoder using
+ * av_init_packet(). If avpkt->data is NULL, the encoder will
+ * allocate it. The encoder will set avpkt->size to the size
+ * of the output packet.
+ *
+ * If this function fails or produces no output, avpkt will be
+ * freed using av_free_packet() (i.e. avpkt->destruct will be
+ * called to free the user supplied buffer).
+ * @param[in] frame AVFrame containing the raw audio data to be encoded.
+ * May be NULL when flushing an encoder that has the
+ * CODEC_CAP_DELAY capability set.
+ * If CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame
+ * can have any number of samples.
+ * If it is not set, frame->nb_samples must be equal to
+ * avctx->frame_size for all frames except the last.
+ * The final frame may be smaller than avctx->frame_size.
+ * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
+ * output packet is non-empty, and to 0 if it is
+ * empty. If the function returns an error, the
+ * packet can be assumed to be invalid, and the
+ * value of got_packet_ptr is undefined and should
+ * not be used.
+ * @return 0 on success, negative error code on failure
+ */
+int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr);
+
+#if FF_API_OLD_ENCODE_VIDEO
+/**
+ * @deprecated use avcodec_encode_video2() instead.
+ *
+ * Encode a video frame from pict into buf.
+ * The input picture should be
+ * stored using a specific format, namely avctx.pix_fmt.
+ *
+ * @param avctx the codec context
+ * @param[out] buf the output buffer for the bitstream of encoded frame
+ * @param[in] buf_size the size of the output buffer in bytes
+ * @param[in] pict the input picture to encode
+ * @return On error a negative value is returned, on success zero or the number
+ * of bytes used from the output buffer.
+ */
+attribute_deprecated
+int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVFrame *pict);
+#endif
+
+/**
+ * Encode a frame of video.
+ *
+ * Takes input raw video data from frame and writes the next output packet, if
+ * available, to avpkt. The output packet does not necessarily contain data for
+ * the most recent frame, as encoders can delay and reorder input frames
+ * internally as needed.
+ *
+ * @param avctx codec context
+ * @param avpkt output AVPacket.
+ * The user can supply an output buffer by setting
+ * avpkt->data and avpkt->size prior to calling the
+ * function, but if the size of the user-provided data is not
+ * large enough, encoding will fail. All other AVPacket fields
+ * will be reset by the encoder using av_init_packet(). If
+ * avpkt->data is NULL, the encoder will allocate it.
+ * The encoder will set avpkt->size to the size of the
+ * output packet. The returned data (if any) belongs to the
+ * caller, he is responsible for freeing it.
+ *
+ * If this function fails or produces no output, avpkt will be
+ * freed using av_free_packet() (i.e. avpkt->destruct will be
+ * called to free the user supplied buffer).
+ * @param[in] frame AVFrame containing the raw video data to be encoded.
+ * May be NULL when flushing an encoder that has the
+ * CODEC_CAP_DELAY capability set.
+ * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the
+ * output packet is non-empty, and to 0 if it is
+ * empty. If the function returns an error, the
+ * packet can be assumed to be invalid, and the
+ * value of got_packet_ptr is undefined and should
+ * not be used.
+ * @return 0 on success, negative error code on failure
+ */
+int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt,
+ const AVFrame *frame, int *got_packet_ptr);
+
+int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
+ const AVSubtitle *sub);
+
+
+/**
+ * @}
+ */
+
+#if FF_API_AVCODEC_RESAMPLE
+/**
+ * @defgroup lavc_resample Audio resampling
+ * @ingroup libavc
+ * @deprecated use libswresample instead
+ *
+ * @{
+ */
+struct ReSampleContext;
+struct AVResampleContext;
+
+typedef struct ReSampleContext ReSampleContext;
+
+/**
+ * Initialize audio resampling context.
+ *
+ * @param output_channels number of output channels
+ * @param input_channels number of input channels
+ * @param output_rate output sample rate
+ * @param input_rate input sample rate
+ * @param sample_fmt_out requested output sample format
+ * @param sample_fmt_in input sample format
+ * @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency
+ * @param log2_phase_count log2 of the number of entries in the polyphase filterbank
+ * @param linear if 1 then the used FIR filter will be linearly interpolated
+ between the 2 closest, if 0 the closest will be used
+ * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
+ * @return allocated ReSampleContext, NULL if error occurred
+ */
+attribute_deprecated
+ReSampleContext *av_audio_resample_init(int output_channels, int input_channels,
+ int output_rate, int input_rate,
+ enum AVSampleFormat sample_fmt_out,
+ enum AVSampleFormat sample_fmt_in,
+ int filter_length, int log2_phase_count,
+ int linear, double cutoff);
+
+attribute_deprecated
+int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples);
+
+/**
+ * Free resample context.
+ *
+ * @param s a non-NULL pointer to a resample context previously
+ * created with av_audio_resample_init()
+ */
+attribute_deprecated
+void audio_resample_close(ReSampleContext *s);
+
+
+/**
+ * Initialize an audio resampler.
+ * Note, if either rate is not an integer then simply scale both rates up so they are.
+ * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq
+ * @param log2_phase_count log2 of the number of entries in the polyphase filterbank
+ * @param linear If 1 then the used FIR filter will be linearly interpolated
+ between the 2 closest, if 0 the closest will be used
+ * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate
+ */
+attribute_deprecated
+struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff);
+
+/**
+ * Resample an array of samples using a previously configured context.
+ * @param src an array of unconsumed samples
+ * @param consumed the number of samples of src which have been consumed are returned here
+ * @param src_size the number of unconsumed samples available
+ * @param dst_size the amount of space in samples available in dst
+ * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context.
+ * @return the number of samples written in dst or -1 if an error occurred
+ */
+attribute_deprecated
+int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx);
+
+
+/**
+ * Compensate samplerate/timestamp drift. The compensation is done by changing
+ * the resampler parameters, so no audible clicks or similar distortions occur
+ * @param compensation_distance distance in output samples over which the compensation should be performed
+ * @param sample_delta number of output samples which should be output less
+ *
+ * example: av_resample_compensate(c, 10, 500)
+ * here instead of 510 samples only 500 samples would be output
+ *
+ * note, due to rounding the actual compensation might be slightly different,
+ * especially if the compensation_distance is large and the in_rate used during init is small
+ */
+attribute_deprecated
+void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance);
+attribute_deprecated
+void av_resample_close(struct AVResampleContext *c);
+
+/**
+ * @}
+ */
+#endif
+
+/**
+ * @addtogroup lavc_picture
+ * @{
+ */
+
+/**
+ * Allocate memory for the pixels of a picture and setup the AVPicture
+ * fields for it.
+ *
+ * Call avpicture_free() to free it.
+ *
+ * @param picture the picture structure to be filled in
+ * @param pix_fmt the pixel format of the picture
+ * @param width the width of the picture
+ * @param height the height of the picture
+ * @return zero if successful, a negative error code otherwise
+ *
+ * @see av_image_alloc(), avpicture_fill()
+ */
+int avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Free a picture previously allocated by avpicture_alloc().
+ * The data buffer used by the AVPicture is freed, but the AVPicture structure
+ * itself is not.
+ *
+ * @param picture the AVPicture to be freed
+ */
+void avpicture_free(AVPicture *picture);
+
+/**
+ * Setup the picture fields based on the specified image parameters
+ * and the provided image data buffer.
+ *
+ * The picture fields are filled in by using the image data buffer
+ * pointed to by ptr.
+ *
+ * If ptr is NULL, the function will fill only the picture linesize
+ * array and return the required size for the image buffer.
+ *
+ * To allocate an image buffer and fill the picture data in one call,
+ * use avpicture_alloc().
+ *
+ * @param picture the picture to be filled in
+ * @param ptr buffer where the image data is stored, or NULL
+ * @param pix_fmt the pixel format of the image
+ * @param width the width of the image in pixels
+ * @param height the height of the image in pixels
+ * @return the size in bytes required for src, a negative error code
+ * in case of failure
+ *
+ * @see av_image_fill_arrays()
+ */
+int avpicture_fill(AVPicture *picture, const uint8_t *ptr,
+ enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Copy pixel data from an AVPicture into a buffer.
+ *
+ * avpicture_get_size() can be used to compute the required size for
+ * the buffer to fill.
+ *
+ * @param src source picture with filled data
+ * @param pix_fmt picture pixel format
+ * @param width picture width
+ * @param height picture height
+ * @param dest destination buffer
+ * @param dest_size destination buffer size in bytes
+ * @return the number of bytes written to dest, or a negative value
+ * (error code) on error, for example if the destination buffer is not
+ * big enough
+ *
+ * @see av_image_copy_to_buffer()
+ */
+int avpicture_layout(const AVPicture *src, enum AVPixelFormat pix_fmt,
+ int width, int height,
+ unsigned char *dest, int dest_size);
+
+/**
+ * Calculate the size in bytes that a picture of the given width and height
+ * would occupy if stored in the given picture format.
+ *
+ * @param pix_fmt picture pixel format
+ * @param width picture width
+ * @param height picture height
+ * @return the computed picture buffer size or a negative error code
+ * in case of error
+ *
+ * @see av_image_get_buffer_size().
+ */
+int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height);
+
+#if FF_API_DEINTERLACE
+/**
+ * deinterlace - if not supported return -1
+ *
+ * @deprecated - use yadif (in libavfilter) instead
+ */
+attribute_deprecated
+int avpicture_deinterlace(AVPicture *dst, const AVPicture *src,
+ enum AVPixelFormat pix_fmt, int width, int height);
+#endif
+/**
+ * Copy image src to dst. Wraps av_image_copy().
+ */
+void av_picture_copy(AVPicture *dst, const AVPicture *src,
+ enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Crop image top and left side.
+ */
+int av_picture_crop(AVPicture *dst, const AVPicture *src,
+ enum AVPixelFormat pix_fmt, int top_band, int left_band);
+
+/**
+ * Pad image.
+ */
+int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt,
+ int padtop, int padbottom, int padleft, int padright, int *color);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavc_misc Utility functions
+ * @ingroup libavc
+ *
+ * Miscellaneous utility functions related to both encoding and decoding
+ * (or neither).
+ * @{
+ */
+
+/**
+ * @defgroup lavc_misc_pixfmt Pixel formats
+ *
+ * Functions for working with pixel formats.
+ * @{
+ */
+
+/**
+ * Utility function to access log2_chroma_w log2_chroma_h from
+ * the pixel format AVPixFmtDescriptor.
+ *
+ * This function asserts that pix_fmt is valid. See av_pix_fmt_get_chroma_sub_sample
+ * for one that returns a failure code and continues in case of invalid
+ * pix_fmts.
+ *
+ * @param[in] pix_fmt the pixel format
+ * @param[out] h_shift store log2_chroma_w
+ * @param[out] v_shift store log2_chroma_h
+ *
+ * @see av_pix_fmt_get_chroma_sub_sample
+ */
+
+void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift);
+
+/**
+ * Return a value representing the fourCC code associated to the
+ * pixel format pix_fmt, or 0 if no associated fourCC code can be
+ * found.
+ */
+unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt);
+
+/**
+ * @deprecated see av_get_pix_fmt_loss()
+ */
+int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt,
+ int has_alpha);
+
+/**
+ * Find the best pixel format to convert to given a certain source pixel
+ * format. When converting from one pixel format to another, information loss
+ * may occur. For example, when converting from RGB24 to GRAY, the color
+ * information will be lost. Similarly, other losses occur when converting from
+ * some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches which of
+ * the given pixel formats should be used to suffer the least amount of loss.
+ * The pixel formats from which it chooses one, are determined by the
+ * pix_fmt_list parameter.
+ *
+ *
+ * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur.
+ * @return The best pixel format to convert to or -1 if none was found.
+ */
+enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list,
+ enum AVPixelFormat src_pix_fmt,
+ int has_alpha, int *loss_ptr);
+
+/**
+ * @deprecated see av_find_best_pix_fmt_of_2()
+ */
+enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
+ enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
+
+attribute_deprecated
+#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI
+enum AVPixelFormat avcodec_find_best_pix_fmt2(const enum AVPixelFormat *pix_fmt_list,
+ enum AVPixelFormat src_pix_fmt,
+ int has_alpha, int *loss_ptr);
+#else
+enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
+ enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
+#endif
+
+
+enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt);
+
+/**
+ * @}
+ */
+
+#if FF_API_SET_DIMENSIONS
+/**
+ * @deprecated this function is not supposed to be used from outside of lavc
+ */
+attribute_deprecated
+void avcodec_set_dimensions(AVCodecContext *s, int width, int height);
+#endif
+
+/**
+ * Put a string representing the codec tag codec_tag in buf.
+ *
+ * @param buf buffer to place codec tag in
+ * @param buf_size size in bytes of buf
+ * @param codec_tag codec tag to assign
+ * @return the length of the string that would have been generated if
+ * enough space had been available, excluding the trailing null
+ */
+size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag);
+
+void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode);
+
+/**
+ * Return a name for the specified profile, if available.
+ *
+ * @param codec the codec that is searched for the given profile
+ * @param profile the profile value for which a name is requested
+ * @return A name for the profile if found, NULL otherwise.
+ */
+const char *av_get_profile_name(const AVCodec *codec, int profile);
+
+int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size);
+int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count);
+//FIXME func typedef
+
+/**
+ * Fill AVFrame audio data and linesize pointers.
+ *
+ * The buffer buf must be a preallocated buffer with a size big enough
+ * to contain the specified samples amount. The filled AVFrame data
+ * pointers will point to this buffer.
+ *
+ * AVFrame extended_data channel pointers are allocated if necessary for
+ * planar audio.
+ *
+ * @param frame the AVFrame
+ * frame->nb_samples must be set prior to calling the
+ * function. This function fills in frame->data,
+ * frame->extended_data, frame->linesize[0].
+ * @param nb_channels channel count
+ * @param sample_fmt sample format
+ * @param buf buffer to use for frame data
+ * @param buf_size size of buffer
+ * @param align plane size sample alignment (0 = default)
+ * @return >=0 on success, negative error code on failure
+ * @todo return the size in bytes required to store the samples in
+ * case of success, at the next libavutil bump
+ */
+int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels,
+ enum AVSampleFormat sample_fmt, const uint8_t *buf,
+ int buf_size, int align);
+
+/**
+ * Reset the internal decoder state / flush internal buffers. Should be called
+ * e.g. when seeking or when switching to a different stream.
+ *
+ * @note when refcounted frames are not used (i.e. avctx->refcounted_frames is 0),
+ * this invalidates the frames previously returned from the decoder. When
+ * refcounted frames are used, the decoder just releases any references it might
+ * keep internally, but the caller's reference remains valid.
+ */
+void avcodec_flush_buffers(AVCodecContext *avctx);
+
+/**
+ * Return codec bits per sample.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return the PCM codec associated with a sample format.
+ * @param be endianness, 0 for little, 1 for big,
+ * -1 (or anything else) for native
+ * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE
+ */
+enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be);
+
+/**
+ * Return codec bits per sample.
+ * Only return non-zero if the bits per sample is exactly correct, not an
+ * approximation.
+ *
+ * @param[in] codec_id the codec
+ * @return Number of bits per sample or zero if unknown for the given codec.
+ */
+int av_get_exact_bits_per_sample(enum AVCodecID codec_id);
+
+/**
+ * Return audio frame duration.
+ *
+ * @param avctx codec context
+ * @param frame_bytes size of the frame, or 0 if unknown
+ * @return frame duration, in samples, if known. 0 if not able to
+ * determine.
+ */
+int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes);
+
+
+typedef struct AVBitStreamFilterContext {
+ void *priv_data;
+ struct AVBitStreamFilter *filter;
+ AVCodecParserContext *parser;
+ struct AVBitStreamFilterContext *next;
+} AVBitStreamFilterContext;
+
+
+typedef struct AVBitStreamFilter {
+ const char *name;
+ int priv_data_size;
+ int (*filter)(AVBitStreamFilterContext *bsfc,
+ AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+ void (*close)(AVBitStreamFilterContext *bsfc);
+ struct AVBitStreamFilter *next;
+} AVBitStreamFilter;
+
+/**
+ * Register a bitstream filter.
+ *
+ * The filter will be accessible to the application code through
+ * av_bitstream_filter_next() or can be directly initialized with
+ * av_bitstream_filter_init().
+ *
+ * @see avcodec_register_all()
+ */
+void av_register_bitstream_filter(AVBitStreamFilter *bsf);
+
+/**
+ * Create and initialize a bitstream filter context given a bitstream
+ * filter name.
+ *
+ * The returned context must be freed with av_bitstream_filter_close().
+ *
+ * @param name the name of the bitstream filter
+ * @return a bitstream filter context if a matching filter was found
+ * and successfully initialized, NULL otherwise
+ */
+AVBitStreamFilterContext *av_bitstream_filter_init(const char *name);
+
+/**
+ * Filter bitstream.
+ *
+ * This function filters the buffer buf with size buf_size, and places the
+ * filtered buffer in the buffer pointed to by poutbuf.
+ *
+ * The output buffer must be freed by the caller.
+ *
+ * @param bsfc bitstream filter context created by av_bitstream_filter_init()
+ * @param avctx AVCodecContext accessed by the filter, may be NULL.
+ * If specified, this must point to the encoder context of the
+ * output stream the packet is sent to.
+ * @param args arguments which specify the filter configuration, may be NULL
+ * @param poutbuf pointer which is updated to point to the filtered buffer
+ * @param poutbuf_size pointer which is updated to the filtered buffer size in bytes
+ * @param buf buffer containing the data to filter
+ * @param buf_size size in bytes of buf
+ * @param keyframe set to non-zero if the buffer to filter corresponds to a key-frame packet data
+ * @return >= 0 in case of success, or a negative error code in case of failure
+ *
+ * If the return value is positive, an output buffer is allocated and
+ * is available in *poutbuf, and is distinct from the input buffer.
+ *
+ * If the return value is 0, the output buffer is not allocated and
+ * should be considered identical to the input buffer, or in case
+ * *poutbuf was set it points to the input buffer (not necessarily to
+ * its starting address).
+ */
+int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc,
+ AVCodecContext *avctx, const char *args,
+ uint8_t **poutbuf, int *poutbuf_size,
+ const uint8_t *buf, int buf_size, int keyframe);
+
+/**
+ * Release bitstream filter context.
+ *
+ * @param bsf the bitstream filter context created with
+ * av_bitstream_filter_init(), can be NULL
+ */
+void av_bitstream_filter_close(AVBitStreamFilterContext *bsf);
+
+/**
+ * If f is NULL, return the first registered bitstream filter,
+ * if f is non-NULL, return the next registered bitstream filter
+ * after f, or NULL if f is the last one.
+ *
+ * This function can be used to iterate over all registered bitstream
+ * filters.
+ */
+AVBitStreamFilter *av_bitstream_filter_next(const AVBitStreamFilter *f);
+
+/* memory */
+
+/**
+ * Same behaviour av_fast_malloc but the buffer has additional
+ * FF_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0.
+ *
+ * In addition the whole buffer will initially and after resizes
+ * be 0-initialized so that no uninitialized data will ever appear.
+ */
+void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Same behaviour av_fast_padded_malloc except that buffer will always
+ * be 0-initialized after call.
+ */
+void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Encode extradata length to a buffer. Used by xiph codecs.
+ *
+ * @param s buffer to write to; must be at least (v/255+1) bytes long
+ * @param v size of extradata in bytes
+ * @return number of bytes written to the buffer.
+ */
+unsigned int av_xiphlacing(unsigned char *s, unsigned int v);
+
+#if FF_API_MISSING_SAMPLE
+/**
+ * Log a generic warning message about a missing feature. This function is
+ * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
+ * only, and would normally not be used by applications.
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ * a pointer to an AVClass struct
+ * @param[in] feature string containing the name of the missing feature
+ * @param[in] want_sample indicates if samples are wanted which exhibit this feature.
+ * If want_sample is non-zero, additional verbage will be added to the log
+ * message which tells the user how to report samples to the development
+ * mailing list.
+ * @deprecated Use avpriv_report_missing_feature() instead.
+ */
+attribute_deprecated
+void av_log_missing_feature(void *avc, const char *feature, int want_sample);
+
+/**
+ * Log a generic warning message asking for a sample. This function is
+ * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.)
+ * only, and would normally not be used by applications.
+ * @param[in] avc a pointer to an arbitrary struct of which the first field is
+ * a pointer to an AVClass struct
+ * @param[in] msg string containing an optional message, or NULL if no message
+ * @deprecated Use avpriv_request_sample() instead.
+ */
+attribute_deprecated
+void av_log_ask_for_sample(void *avc, const char *msg, ...) av_printf_format(2, 3);
+#endif /* FF_API_MISSING_SAMPLE */
+
+/**
+ * Register the hardware accelerator hwaccel.
+ */
+void av_register_hwaccel(AVHWAccel *hwaccel);
+
+/**
+ * If hwaccel is NULL, returns the first registered hardware accelerator,
+ * if hwaccel is non-NULL, returns the next registered hardware accelerator
+ * after hwaccel, or NULL if hwaccel is the last one.
+ */
+AVHWAccel *av_hwaccel_next(const AVHWAccel *hwaccel);
+
+
+/**
+ * Lock operation used by lockmgr
+ */
+enum AVLockOp {
+ AV_LOCK_CREATE, ///< Create a mutex
+ AV_LOCK_OBTAIN, ///< Lock the mutex
+ AV_LOCK_RELEASE, ///< Unlock the mutex
+ AV_LOCK_DESTROY, ///< Free mutex resources
+};
+
+/**
+ * Register a user provided lock manager supporting the operations
+ * specified by AVLockOp. The "mutex" argument to the function points
+ * to a (void *) where the lockmgr should store/get a pointer to a user
+ * allocated mutex. It is NULL upon AV_LOCK_CREATE and equal to the
+ * value left by the last call for all other ops. If the lock manager is
+ * unable to perform the op then it should leave the mutex in the same
+ * state as when it was called and return a non-zero value. However,
+ * when called with AV_LOCK_DESTROY the mutex will always be assumed to
+ * have been successfully destroyed. If av_lockmgr_register succeeds
+ * it will return a non-negative value, if it fails it will return a
+ * negative value and destroy all mutex and unregister all callbacks.
+ * av_lockmgr_register is not thread-safe, it must be called from a
+ * single thread before any calls which make use of locking are used.
+ *
+ * @param cb User defined callback. av_lockmgr_register invokes calls
+ * to this callback and the previously registered callback.
+ * The callback will be used to create more than one mutex
+ * each of which must be backed by its own underlying locking
+ * mechanism (i.e. do not use a single static object to
+ * implement your lock manager). If cb is set to NULL the
+ * lockmgr will be unregistered.
+ */
+int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op));
+
+/**
+ * Get the type of the given codec.
+ */
+enum AVMediaType avcodec_get_type(enum AVCodecID codec_id);
+
+/**
+ * Get the name of a codec.
+ * @return a static string identifying the codec; never NULL
+ */
+const char *avcodec_get_name(enum AVCodecID id);
+
+/**
+ * @return a positive value if s is open (i.e. avcodec_open2() was called on it
+ * with no corresponding avcodec_close()), 0 otherwise.
+ */
+int avcodec_is_open(AVCodecContext *s);
+
+/**
+ * @return a non-zero number if codec is an encoder, zero otherwise
+ */
+int av_codec_is_encoder(const AVCodec *codec);
+
+/**
+ * @return a non-zero number if codec is a decoder, zero otherwise
+ */
+int av_codec_is_decoder(const AVCodec *codec);
+
+/**
+ * @return descriptor for given codec ID or NULL if no descriptor exists.
+ */
+const AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id);
+
+/**
+ * Iterate over all codec descriptors known to libavcodec.
+ *
+ * @param prev previous descriptor. NULL to get the first descriptor.
+ *
+ * @return next descriptor or NULL after the last descriptor
+ */
+const AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev);
+
+/**
+ * @return codec descriptor with the given name or NULL if no such descriptor
+ * exists.
+ */
+const AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_AVCODEC_H */
diff --git a/Externals/ffmpeg/dev/include/libavcodec/avfft.h b/Externals/ffmpeg/dev/include/libavcodec/avfft.h
new file mode 100644
index 0000000000..0c0f9b8d8d
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavcodec/avfft.h
@@ -0,0 +1,118 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_AVFFT_H
+#define AVCODEC_AVFFT_H
+
+/**
+ * @file
+ * @ingroup lavc_fft
+ * FFT functions
+ */
+
+/**
+ * @defgroup lavc_fft FFT functions
+ * @ingroup lavc_misc
+ *
+ * @{
+ */
+
+typedef float FFTSample;
+
+typedef struct FFTComplex {
+ FFTSample re, im;
+} FFTComplex;
+
+typedef struct FFTContext FFTContext;
+
+/**
+ * Set up a complex FFT.
+ * @param nbits log2 of the length of the input array
+ * @param inverse if 0 perform the forward transform, if 1 perform the inverse
+ */
+FFTContext *av_fft_init(int nbits, int inverse);
+
+/**
+ * Do the permutation needed BEFORE calling ff_fft_calc().
+ */
+void av_fft_permute(FFTContext *s, FFTComplex *z);
+
+/**
+ * Do a complex FFT with the parameters defined in av_fft_init(). The
+ * input data must be permuted before. No 1.0/sqrt(n) normalization is done.
+ */
+void av_fft_calc(FFTContext *s, FFTComplex *z);
+
+void av_fft_end(FFTContext *s);
+
+FFTContext *av_mdct_init(int nbits, int inverse, double scale);
+void av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input);
+void av_mdct_end(FFTContext *s);
+
+/* Real Discrete Fourier Transform */
+
+enum RDFTransformType {
+ DFT_R2C,
+ IDFT_C2R,
+ IDFT_R2C,
+ DFT_C2R,
+};
+
+typedef struct RDFTContext RDFTContext;
+
+/**
+ * Set up a real FFT.
+ * @param nbits log2 of the length of the input array
+ * @param trans the type of transform
+ */
+RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans);
+void av_rdft_calc(RDFTContext *s, FFTSample *data);
+void av_rdft_end(RDFTContext *s);
+
+/* Discrete Cosine Transform */
+
+typedef struct DCTContext DCTContext;
+
+enum DCTTransformType {
+ DCT_II = 0,
+ DCT_III,
+ DCT_I,
+ DST_I,
+};
+
+/**
+ * Set up DCT.
+ *
+ * @param nbits size of the input array:
+ * (1 << nbits) for DCT-II, DCT-III and DST-I
+ * (1 << nbits) + 1 for DCT-I
+ * @param type the type of transform
+ *
+ * @note the first element of the input of DST-I is ignored
+ */
+DCTContext *av_dct_init(int nbits, enum DCTTransformType type);
+void av_dct_calc(DCTContext *s, FFTSample *data);
+void av_dct_end (DCTContext *s);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_AVFFT_H */
diff --git a/Externals/ffmpeg/dev/include/libavcodec/dv_profile.h b/Externals/ffmpeg/dev/include/libavcodec/dv_profile.h
new file mode 100644
index 0000000000..d22ad2663f
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavcodec/dv_profile.h
@@ -0,0 +1,92 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_DV_PROFILE_H
+#define AVCODEC_DV_PROFILE_H
+
+#include
+
+#include "libavutil/pixfmt.h"
+#include "libavutil/rational.h"
+#include "avcodec.h"
+
+/* minimum number of bytes to read from a DV stream in order to
+ * determine the profile */
+#define DV_PROFILE_BYTES (6 * 80) /* 6 DIF blocks */
+
+
+/*
+ * AVDVProfile is used to express the differences between various
+ * DV flavors. For now it's primarily used for differentiating
+ * 525/60 and 625/50, but the plans are to use it for various
+ * DV specs as well (e.g. SMPTE314M vs. IEC 61834).
+ */
+typedef struct AVDVProfile {
+ int dsf; /* value of the dsf in the DV header */
+ int video_stype; /* stype for VAUX source pack */
+ int frame_size; /* total size of one frame in bytes */
+ int difseg_size; /* number of DIF segments per DIF channel */
+ int n_difchan; /* number of DIF channels per frame */
+ AVRational time_base; /* 1/framerate */
+ int ltc_divisor; /* FPS from the LTS standpoint */
+ int height; /* picture height in pixels */
+ int width; /* picture width in pixels */
+ AVRational sar[2]; /* sample aspect ratios for 4:3 and 16:9 */
+ enum AVPixelFormat pix_fmt; /* picture pixel format */
+ int bpm; /* blocks per macroblock */
+ const uint8_t *block_sizes; /* AC block sizes, in bits */
+ int audio_stride; /* size of audio_shuffle table */
+ int audio_min_samples[3]; /* min amount of audio samples */
+ /* for 48kHz, 44.1kHz and 32kHz */
+ int audio_samples_dist[5]; /* how many samples are supposed to be */
+ /* in each frame in a 5 frames window */
+ const uint8_t (*audio_shuffle)[9]; /* PCM shuffling table */
+} AVDVProfile;
+
+#if FF_API_DV_FRAME_PROFILE
+/**
+ * @deprecated use av_dv_frame_profile()
+ */
+attribute_deprecated
+const AVDVProfile* avpriv_dv_frame_profile2(AVCodecContext* codec, const AVDVProfile *sys,
+ const uint8_t* frame, unsigned buf_size);
+#endif
+
+/**
+ * Get a DV profile for the provided compressed frame.
+ *
+ * @param sys the profile used for the previous frame, may be NULL
+ * @param frame the compressed data buffer
+ * @param buf_size size of the buffer in bytes
+ * @return the DV profile for the supplied data or NULL on failure
+ */
+const AVDVProfile *av_dv_frame_profile(const AVDVProfile *sys,
+ const uint8_t *frame, unsigned buf_size);
+
+/**
+ * Get a DV profile for the provided stream parameters.
+ */
+const AVDVProfile *av_dv_codec_profile(int width, int height, enum AVPixelFormat pix_fmt);
+
+/**
+ * Get a DV profile for the provided stream parameters.
+ * The frame rate is used as a best-effort parameter.
+ */
+const AVDVProfile *av_dv_codec_profile2(int width, int height, enum AVPixelFormat pix_fmt, AVRational frame_rate);
+
+#endif /* AVCODEC_DV_PROFILE_H */
diff --git a/Externals/ffmpeg/dev/include/libavcodec/dxva2.h b/Externals/ffmpeg/dev/include/libavcodec/dxva2.h
new file mode 100644
index 0000000000..be246d7198
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavcodec/dxva2.h
@@ -0,0 +1,93 @@
+/*
+ * DXVA2 HW acceleration
+ *
+ * copyright (c) 2009 Laurent Aimar
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_DXVA_H
+#define AVCODEC_DXVA_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_dxva2
+ * Public libavcodec DXVA2 header.
+ */
+
+#if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0600
+#undef _WIN32_WINNT
+#define _WIN32_WINNT 0x0600
+#endif
+
+#include
+#include
+#include
+
+/**
+ * @defgroup lavc_codec_hwaccel_dxva2 DXVA2
+ * @ingroup lavc_codec_hwaccel
+ *
+ * @{
+ */
+
+#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards
+#define FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO 2 ///< Work around for DXVA2 and old Intel GPUs with ClearVideo interface
+
+/**
+ * This structure is used to provides the necessary configurations and data
+ * to the DXVA2 FFmpeg HWAccel implementation.
+ *
+ * The application must make it available as AVCodecContext.hwaccel_context.
+ */
+struct dxva_context {
+ /**
+ * DXVA2 decoder object
+ */
+ IDirectXVideoDecoder *decoder;
+
+ /**
+ * DXVA2 configuration used to create the decoder
+ */
+ const DXVA2_ConfigPictureDecode *cfg;
+
+ /**
+ * The number of surface in the surface array
+ */
+ unsigned surface_count;
+
+ /**
+ * The array of Direct3D surfaces used to create the decoder
+ */
+ LPDIRECT3DSURFACE9 *surface;
+
+ /**
+ * A bit field configuring the workarounds needed for using the decoder
+ */
+ uint64_t workaround;
+
+ /**
+ * Private to the FFmpeg AVHWAccel implementation
+ */
+ unsigned report_id;
+};
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_DXVA_H */
diff --git a/Externals/ffmpeg/dev/include/libavcodec/old_codec_ids.h b/Externals/ffmpeg/dev/include/libavcodec/old_codec_ids.h
new file mode 100644
index 0000000000..c7aa0e0a12
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavcodec/old_codec_ids.h
@@ -0,0 +1,397 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_OLD_CODEC_IDS_H
+#define AVCODEC_OLD_CODEC_IDS_H
+
+/*
+ * This header exists to prevent new codec IDs from being accidentally added to
+ * the deprecated list.
+ * Do not include it directly. It will be removed on next major bump
+ *
+ * Do not add new items to this list. Use the AVCodecID enum instead.
+ */
+
+ CODEC_ID_NONE = AV_CODEC_ID_NONE,
+
+ /* video codecs */
+ CODEC_ID_MPEG1VIDEO,
+ CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding
+#if FF_API_XVMC
+ CODEC_ID_MPEG2VIDEO_XVMC,
+#endif
+ CODEC_ID_H261,
+ CODEC_ID_H263,
+ CODEC_ID_RV10,
+ CODEC_ID_RV20,
+ CODEC_ID_MJPEG,
+ CODEC_ID_MJPEGB,
+ CODEC_ID_LJPEG,
+ CODEC_ID_SP5X,
+ CODEC_ID_JPEGLS,
+ CODEC_ID_MPEG4,
+ CODEC_ID_RAWVIDEO,
+ CODEC_ID_MSMPEG4V1,
+ CODEC_ID_MSMPEG4V2,
+ CODEC_ID_MSMPEG4V3,
+ CODEC_ID_WMV1,
+ CODEC_ID_WMV2,
+ CODEC_ID_H263P,
+ CODEC_ID_H263I,
+ CODEC_ID_FLV1,
+ CODEC_ID_SVQ1,
+ CODEC_ID_SVQ3,
+ CODEC_ID_DVVIDEO,
+ CODEC_ID_HUFFYUV,
+ CODEC_ID_CYUV,
+ CODEC_ID_H264,
+ CODEC_ID_INDEO3,
+ CODEC_ID_VP3,
+ CODEC_ID_THEORA,
+ CODEC_ID_ASV1,
+ CODEC_ID_ASV2,
+ CODEC_ID_FFV1,
+ CODEC_ID_4XM,
+ CODEC_ID_VCR1,
+ CODEC_ID_CLJR,
+ CODEC_ID_MDEC,
+ CODEC_ID_ROQ,
+ CODEC_ID_INTERPLAY_VIDEO,
+ CODEC_ID_XAN_WC3,
+ CODEC_ID_XAN_WC4,
+ CODEC_ID_RPZA,
+ CODEC_ID_CINEPAK,
+ CODEC_ID_WS_VQA,
+ CODEC_ID_MSRLE,
+ CODEC_ID_MSVIDEO1,
+ CODEC_ID_IDCIN,
+ CODEC_ID_8BPS,
+ CODEC_ID_SMC,
+ CODEC_ID_FLIC,
+ CODEC_ID_TRUEMOTION1,
+ CODEC_ID_VMDVIDEO,
+ CODEC_ID_MSZH,
+ CODEC_ID_ZLIB,
+ CODEC_ID_QTRLE,
+ CODEC_ID_TSCC,
+ CODEC_ID_ULTI,
+ CODEC_ID_QDRAW,
+ CODEC_ID_VIXL,
+ CODEC_ID_QPEG,
+ CODEC_ID_PNG,
+ CODEC_ID_PPM,
+ CODEC_ID_PBM,
+ CODEC_ID_PGM,
+ CODEC_ID_PGMYUV,
+ CODEC_ID_PAM,
+ CODEC_ID_FFVHUFF,
+ CODEC_ID_RV30,
+ CODEC_ID_RV40,
+ CODEC_ID_VC1,
+ CODEC_ID_WMV3,
+ CODEC_ID_LOCO,
+ CODEC_ID_WNV1,
+ CODEC_ID_AASC,
+ CODEC_ID_INDEO2,
+ CODEC_ID_FRAPS,
+ CODEC_ID_TRUEMOTION2,
+ CODEC_ID_BMP,
+ CODEC_ID_CSCD,
+ CODEC_ID_MMVIDEO,
+ CODEC_ID_ZMBV,
+ CODEC_ID_AVS,
+ CODEC_ID_SMACKVIDEO,
+ CODEC_ID_NUV,
+ CODEC_ID_KMVC,
+ CODEC_ID_FLASHSV,
+ CODEC_ID_CAVS,
+ CODEC_ID_JPEG2000,
+ CODEC_ID_VMNC,
+ CODEC_ID_VP5,
+ CODEC_ID_VP6,
+ CODEC_ID_VP6F,
+ CODEC_ID_TARGA,
+ CODEC_ID_DSICINVIDEO,
+ CODEC_ID_TIERTEXSEQVIDEO,
+ CODEC_ID_TIFF,
+ CODEC_ID_GIF,
+ CODEC_ID_DXA,
+ CODEC_ID_DNXHD,
+ CODEC_ID_THP,
+ CODEC_ID_SGI,
+ CODEC_ID_C93,
+ CODEC_ID_BETHSOFTVID,
+ CODEC_ID_PTX,
+ CODEC_ID_TXD,
+ CODEC_ID_VP6A,
+ CODEC_ID_AMV,
+ CODEC_ID_VB,
+ CODEC_ID_PCX,
+ CODEC_ID_SUNRAST,
+ CODEC_ID_INDEO4,
+ CODEC_ID_INDEO5,
+ CODEC_ID_MIMIC,
+ CODEC_ID_RL2,
+ CODEC_ID_ESCAPE124,
+ CODEC_ID_DIRAC,
+ CODEC_ID_BFI,
+ CODEC_ID_CMV,
+ CODEC_ID_MOTIONPIXELS,
+ CODEC_ID_TGV,
+ CODEC_ID_TGQ,
+ CODEC_ID_TQI,
+ CODEC_ID_AURA,
+ CODEC_ID_AURA2,
+ CODEC_ID_V210X,
+ CODEC_ID_TMV,
+ CODEC_ID_V210,
+ CODEC_ID_DPX,
+ CODEC_ID_MAD,
+ CODEC_ID_FRWU,
+ CODEC_ID_FLASHSV2,
+ CODEC_ID_CDGRAPHICS,
+ CODEC_ID_R210,
+ CODEC_ID_ANM,
+ CODEC_ID_BINKVIDEO,
+ CODEC_ID_IFF_ILBM,
+ CODEC_ID_IFF_BYTERUN1,
+ CODEC_ID_KGV1,
+ CODEC_ID_YOP,
+ CODEC_ID_VP8,
+ CODEC_ID_PICTOR,
+ CODEC_ID_ANSI,
+ CODEC_ID_A64_MULTI,
+ CODEC_ID_A64_MULTI5,
+ CODEC_ID_R10K,
+ CODEC_ID_MXPEG,
+ CODEC_ID_LAGARITH,
+ CODEC_ID_PRORES,
+ CODEC_ID_JV,
+ CODEC_ID_DFA,
+ CODEC_ID_WMV3IMAGE,
+ CODEC_ID_VC1IMAGE,
+ CODEC_ID_UTVIDEO,
+ CODEC_ID_BMV_VIDEO,
+ CODEC_ID_VBLE,
+ CODEC_ID_DXTORY,
+ CODEC_ID_V410,
+ CODEC_ID_XWD,
+ CODEC_ID_CDXL,
+ CODEC_ID_XBM,
+ CODEC_ID_ZEROCODEC,
+ CODEC_ID_MSS1,
+ CODEC_ID_MSA1,
+ CODEC_ID_TSCC2,
+ CODEC_ID_MTS2,
+ CODEC_ID_CLLC,
+ CODEC_ID_Y41P = MKBETAG('Y','4','1','P'),
+ CODEC_ID_ESCAPE130 = MKBETAG('E','1','3','0'),
+ CODEC_ID_EXR = MKBETAG('0','E','X','R'),
+ CODEC_ID_AVRP = MKBETAG('A','V','R','P'),
+
+ CODEC_ID_G2M = MKBETAG( 0 ,'G','2','M'),
+ CODEC_ID_AVUI = MKBETAG('A','V','U','I'),
+ CODEC_ID_AYUV = MKBETAG('A','Y','U','V'),
+ CODEC_ID_V308 = MKBETAG('V','3','0','8'),
+ CODEC_ID_V408 = MKBETAG('V','4','0','8'),
+ CODEC_ID_YUV4 = MKBETAG('Y','U','V','4'),
+ CODEC_ID_SANM = MKBETAG('S','A','N','M'),
+ CODEC_ID_PAF_VIDEO = MKBETAG('P','A','F','V'),
+ CODEC_ID_SNOW = AV_CODEC_ID_SNOW,
+
+ /* various PCM "codecs" */
+ CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs
+ CODEC_ID_PCM_S16LE = 0x10000,
+ CODEC_ID_PCM_S16BE,
+ CODEC_ID_PCM_U16LE,
+ CODEC_ID_PCM_U16BE,
+ CODEC_ID_PCM_S8,
+ CODEC_ID_PCM_U8,
+ CODEC_ID_PCM_MULAW,
+ CODEC_ID_PCM_ALAW,
+ CODEC_ID_PCM_S32LE,
+ CODEC_ID_PCM_S32BE,
+ CODEC_ID_PCM_U32LE,
+ CODEC_ID_PCM_U32BE,
+ CODEC_ID_PCM_S24LE,
+ CODEC_ID_PCM_S24BE,
+ CODEC_ID_PCM_U24LE,
+ CODEC_ID_PCM_U24BE,
+ CODEC_ID_PCM_S24DAUD,
+ CODEC_ID_PCM_ZORK,
+ CODEC_ID_PCM_S16LE_PLANAR,
+ CODEC_ID_PCM_DVD,
+ CODEC_ID_PCM_F32BE,
+ CODEC_ID_PCM_F32LE,
+ CODEC_ID_PCM_F64BE,
+ CODEC_ID_PCM_F64LE,
+ CODEC_ID_PCM_BLURAY,
+ CODEC_ID_PCM_LXF,
+ CODEC_ID_S302M,
+ CODEC_ID_PCM_S8_PLANAR,
+
+ /* various ADPCM codecs */
+ CODEC_ID_ADPCM_IMA_QT = 0x11000,
+ CODEC_ID_ADPCM_IMA_WAV,
+ CODEC_ID_ADPCM_IMA_DK3,
+ CODEC_ID_ADPCM_IMA_DK4,
+ CODEC_ID_ADPCM_IMA_WS,
+ CODEC_ID_ADPCM_IMA_SMJPEG,
+ CODEC_ID_ADPCM_MS,
+ CODEC_ID_ADPCM_4XM,
+ CODEC_ID_ADPCM_XA,
+ CODEC_ID_ADPCM_ADX,
+ CODEC_ID_ADPCM_EA,
+ CODEC_ID_ADPCM_G726,
+ CODEC_ID_ADPCM_CT,
+ CODEC_ID_ADPCM_SWF,
+ CODEC_ID_ADPCM_YAMAHA,
+ CODEC_ID_ADPCM_SBPRO_4,
+ CODEC_ID_ADPCM_SBPRO_3,
+ CODEC_ID_ADPCM_SBPRO_2,
+ CODEC_ID_ADPCM_THP,
+ CODEC_ID_ADPCM_IMA_AMV,
+ CODEC_ID_ADPCM_EA_R1,
+ CODEC_ID_ADPCM_EA_R3,
+ CODEC_ID_ADPCM_EA_R2,
+ CODEC_ID_ADPCM_IMA_EA_SEAD,
+ CODEC_ID_ADPCM_IMA_EA_EACS,
+ CODEC_ID_ADPCM_EA_XAS,
+ CODEC_ID_ADPCM_EA_MAXIS_XA,
+ CODEC_ID_ADPCM_IMA_ISS,
+ CODEC_ID_ADPCM_G722,
+ CODEC_ID_ADPCM_IMA_APC,
+ CODEC_ID_VIMA = MKBETAG('V','I','M','A'),
+
+ /* AMR */
+ CODEC_ID_AMR_NB = 0x12000,
+ CODEC_ID_AMR_WB,
+
+ /* RealAudio codecs*/
+ CODEC_ID_RA_144 = 0x13000,
+ CODEC_ID_RA_288,
+
+ /* various DPCM codecs */
+ CODEC_ID_ROQ_DPCM = 0x14000,
+ CODEC_ID_INTERPLAY_DPCM,
+ CODEC_ID_XAN_DPCM,
+ CODEC_ID_SOL_DPCM,
+
+ /* audio codecs */
+ CODEC_ID_MP2 = 0x15000,
+ CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3
+ CODEC_ID_AAC,
+ CODEC_ID_AC3,
+ CODEC_ID_DTS,
+ CODEC_ID_VORBIS,
+ CODEC_ID_DVAUDIO,
+ CODEC_ID_WMAV1,
+ CODEC_ID_WMAV2,
+ CODEC_ID_MACE3,
+ CODEC_ID_MACE6,
+ CODEC_ID_VMDAUDIO,
+ CODEC_ID_FLAC,
+ CODEC_ID_MP3ADU,
+ CODEC_ID_MP3ON4,
+ CODEC_ID_SHORTEN,
+ CODEC_ID_ALAC,
+ CODEC_ID_WESTWOOD_SND1,
+ CODEC_ID_GSM, ///< as in Berlin toast format
+ CODEC_ID_QDM2,
+ CODEC_ID_COOK,
+ CODEC_ID_TRUESPEECH,
+ CODEC_ID_TTA,
+ CODEC_ID_SMACKAUDIO,
+ CODEC_ID_QCELP,
+ CODEC_ID_WAVPACK,
+ CODEC_ID_DSICINAUDIO,
+ CODEC_ID_IMC,
+ CODEC_ID_MUSEPACK7,
+ CODEC_ID_MLP,
+ CODEC_ID_GSM_MS, /* as found in WAV */
+ CODEC_ID_ATRAC3,
+ CODEC_ID_VOXWARE,
+ CODEC_ID_APE,
+ CODEC_ID_NELLYMOSER,
+ CODEC_ID_MUSEPACK8,
+ CODEC_ID_SPEEX,
+ CODEC_ID_WMAVOICE,
+ CODEC_ID_WMAPRO,
+ CODEC_ID_WMALOSSLESS,
+ CODEC_ID_ATRAC3P,
+ CODEC_ID_EAC3,
+ CODEC_ID_SIPR,
+ CODEC_ID_MP1,
+ CODEC_ID_TWINVQ,
+ CODEC_ID_TRUEHD,
+ CODEC_ID_MP4ALS,
+ CODEC_ID_ATRAC1,
+ CODEC_ID_BINKAUDIO_RDFT,
+ CODEC_ID_BINKAUDIO_DCT,
+ CODEC_ID_AAC_LATM,
+ CODEC_ID_QDMC,
+ CODEC_ID_CELT,
+ CODEC_ID_G723_1,
+ CODEC_ID_G729,
+ CODEC_ID_8SVX_EXP,
+ CODEC_ID_8SVX_FIB,
+ CODEC_ID_BMV_AUDIO,
+ CODEC_ID_RALF,
+ CODEC_ID_IAC,
+ CODEC_ID_ILBC,
+ CODEC_ID_FFWAVESYNTH = MKBETAG('F','F','W','S'),
+ CODEC_ID_SONIC = MKBETAG('S','O','N','C'),
+ CODEC_ID_SONIC_LS = MKBETAG('S','O','N','L'),
+ CODEC_ID_PAF_AUDIO = MKBETAG('P','A','F','A'),
+ CODEC_ID_OPUS = MKBETAG('O','P','U','S'),
+
+ /* subtitle codecs */
+ CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs.
+ CODEC_ID_DVD_SUBTITLE = 0x17000,
+ CODEC_ID_DVB_SUBTITLE,
+ CODEC_ID_TEXT, ///< raw UTF-8 text
+ CODEC_ID_XSUB,
+ CODEC_ID_SSA,
+ CODEC_ID_MOV_TEXT,
+ CODEC_ID_HDMV_PGS_SUBTITLE,
+ CODEC_ID_DVB_TELETEXT,
+ CODEC_ID_SRT,
+ CODEC_ID_MICRODVD = MKBETAG('m','D','V','D'),
+ CODEC_ID_EIA_608 = MKBETAG('c','6','0','8'),
+ CODEC_ID_JACOSUB = MKBETAG('J','S','U','B'),
+ CODEC_ID_SAMI = MKBETAG('S','A','M','I'),
+ CODEC_ID_REALTEXT = MKBETAG('R','T','X','T'),
+ CODEC_ID_SUBVIEWER = MKBETAG('S','u','b','V'),
+
+ /* other specific kind of codecs (generally used for attachments) */
+ CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs.
+ CODEC_ID_TTF = 0x18000,
+ CODEC_ID_BINTEXT = MKBETAG('B','T','X','T'),
+ CODEC_ID_XBIN = MKBETAG('X','B','I','N'),
+ CODEC_ID_IDF = MKBETAG( 0 ,'I','D','F'),
+ CODEC_ID_OTF = MKBETAG( 0 ,'O','T','F'),
+
+ CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like CODEC_ID_NONE) but lavf should attempt to identify it
+
+ CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS
+ * stream (only used by libavformat) */
+ CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems
+ * stream (only used by libavformat) */
+ CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information.
+
+#endif /* AVCODEC_OLD_CODEC_IDS_H */
diff --git a/Externals/ffmpeg/dev/include/libavcodec/vaapi.h b/Externals/ffmpeg/dev/include/libavcodec/vaapi.h
new file mode 100644
index 0000000000..815a27e226
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavcodec/vaapi.h
@@ -0,0 +1,173 @@
+/*
+ * Video Acceleration API (shared data between FFmpeg and the video player)
+ * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1
+ *
+ * Copyright (C) 2008-2009 Splitted-Desktop Systems
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VAAPI_H
+#define AVCODEC_VAAPI_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vaapi
+ * Public libavcodec VA API header.
+ */
+
+#include
+
+/**
+ * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding
+ * @ingroup lavc_codec_hwaccel
+ * @{
+ */
+
+/**
+ * This structure is used to share data between the FFmpeg library and
+ * the client video application.
+ * This shall be zero-allocated and available as
+ * AVCodecContext.hwaccel_context. All user members can be set once
+ * during initialization or through each AVCodecContext.get_buffer()
+ * function call. In any case, they must be valid prior to calling
+ * decoding functions.
+ */
+struct vaapi_context {
+ /**
+ * Window system dependent data
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ void *display;
+
+ /**
+ * Configuration ID
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ uint32_t config_id;
+
+ /**
+ * Context ID (video decode pipeline)
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ uint32_t context_id;
+
+ /**
+ * VAPictureParameterBuffer ID
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t pic_param_buf_id;
+
+ /**
+ * VAIQMatrixBuffer ID
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t iq_matrix_buf_id;
+
+ /**
+ * VABitPlaneBuffer ID (for VC-1 decoding)
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t bitplane_buf_id;
+
+ /**
+ * Slice parameter/data buffer IDs
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t *slice_buf_ids;
+
+ /**
+ * Number of effective slice buffer IDs to send to the HW
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int n_slice_buf_ids;
+
+ /**
+ * Size of pre-allocated slice_buf_ids
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_buf_ids_alloc;
+
+ /**
+ * Pointer to VASliceParameterBuffers
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ void *slice_params;
+
+ /**
+ * Size of a VASliceParameterBuffer element
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_param_size;
+
+ /**
+ * Size of pre-allocated slice_params
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_params_alloc;
+
+ /**
+ * Number of slices currently filled in
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ unsigned int slice_count;
+
+ /**
+ * Pointer to slice data buffer base
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ const uint8_t *slice_data;
+
+ /**
+ * Current size of slice data
+ *
+ * - encoding: unused
+ * - decoding: Set by libavcodec
+ */
+ uint32_t slice_data_size;
+};
+
+/* @} */
+
+#endif /* AVCODEC_VAAPI_H */
diff --git a/Externals/ffmpeg/dev/include/libavcodec/vda.h b/Externals/ffmpeg/dev/include/libavcodec/vda.h
new file mode 100644
index 0000000000..12330aa363
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavcodec/vda.h
@@ -0,0 +1,213 @@
+/*
+ * VDA HW acceleration
+ *
+ * copyright (c) 2011 Sebastien Zwickert
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VDA_H
+#define AVCODEC_VDA_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vda
+ * Public libavcodec VDA header.
+ */
+
+#include "libavcodec/avcodec.h"
+
+#include
+
+// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes
+// http://openradar.appspot.com/8026390
+#undef __GNUC_STDC_INLINE__
+
+#define Picture QuickdrawPicture
+#include
+#undef Picture
+
+#include "libavcodec/version.h"
+
+// extra flags not defined in VDADecoder.h
+enum {
+ kVDADecodeInfo_Asynchronous = 1UL << 0,
+ kVDADecodeInfo_FrameDropped = 1UL << 1
+};
+
+/**
+ * @defgroup lavc_codec_hwaccel_vda VDA
+ * @ingroup lavc_codec_hwaccel
+ *
+ * @{
+ */
+
+/**
+ * This structure is used to provide the necessary configurations and data
+ * to the VDA FFmpeg HWAccel implementation.
+ *
+ * The application must make it available as AVCodecContext.hwaccel_context.
+ */
+struct vda_context {
+ /**
+ * VDA decoder object.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by libavcodec.
+ */
+ VDADecoder decoder;
+
+ /**
+ * The Core Video pixel buffer that contains the current image data.
+ *
+ * encoding: unused
+ * decoding: Set by libavcodec. Unset by user.
+ */
+ CVPixelBufferRef cv_buffer;
+
+ /**
+ * Use the hardware decoder in synchronous mode.
+ *
+ * encoding: unused
+ * decoding: Set by user.
+ */
+ int use_sync_decoding;
+
+ /**
+ * The frame width.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ int width;
+
+ /**
+ * The frame height.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ int height;
+
+ /**
+ * The frame format.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ int format;
+
+ /**
+ * The pixel format for output image buffers.
+ *
+ * - encoding: unused
+ * - decoding: Set/Unset by user.
+ */
+ OSType cv_pix_fmt_type;
+
+ /**
+ * unused
+ */
+ uint8_t *priv_bitstream;
+
+ /**
+ * unused
+ */
+ int priv_bitstream_size;
+
+ /**
+ * unused
+ */
+ int priv_allocated_size;
+
+ /**
+ * Use av_buffer to manage buffer.
+ * When the flag is set, the CVPixelBuffers returned by the decoder will
+ * be released automatically, so you have to retain them if necessary.
+ * Not setting this flag may cause memory leak.
+ *
+ * encoding: unused
+ * decoding: Set by user.
+ */
+ int use_ref_buffer;
+};
+
+/** Create the video decoder. */
+int ff_vda_create_decoder(struct vda_context *vda_ctx,
+ uint8_t *extradata,
+ int extradata_size);
+
+/** Destroy the video decoder. */
+int ff_vda_destroy_decoder(struct vda_context *vda_ctx);
+
+/**
+ * This struct holds all the information that needs to be passed
+ * between the caller and libavcodec for initializing VDA decoding.
+ * Its size is not a part of the public ABI, it must be allocated with
+ * av_vda_alloc_context() and freed with av_free().
+ */
+typedef struct AVVDAContext {
+ /**
+ * VDA decoder object. Created and freed by the caller.
+ */
+ VDADecoder decoder;
+
+ /**
+ * The output callback that must be passed to VDADecoderCreate.
+ * Set by av_vda_alloc_context().
+ */
+ VDADecoderOutputCallback output_callback;
+} AVVDAContext;
+
+/**
+ * Allocate and initialize a VDA context.
+ *
+ * This function should be called from the get_format() callback when the caller
+ * selects the AV_PIX_FMT_VDA format. The caller must then create the decoder
+ * object (using the output callback provided by libavcodec) that will be used
+ * for VDA-accelerated decoding.
+ *
+ * When decoding with VDA is finished, the caller must destroy the decoder
+ * object and free the VDA context using av_free().
+ *
+ * @return the newly allocated context or NULL on failure
+ */
+AVVDAContext *av_vda_alloc_context(void);
+
+/**
+ * This is a convenience function that creates and sets up the VDA context using
+ * an internal implementation.
+ *
+ * @param avctx the corresponding codec context
+ *
+ * @return >= 0 on success, a negative AVERROR code on failure
+ */
+int av_vda_default_init(AVCodecContext *avctx);
+
+/**
+ * This function must be called to free the VDA context initialized with
+ * av_vda_default_init().
+ *
+ * @param avctx the corresponding codec context
+ */
+void av_vda_default_free(AVCodecContext *avctx);
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_VDA_H */
diff --git a/Externals/ffmpeg/dev/include/libavcodec/vdpau.h b/Externals/ffmpeg/dev/include/libavcodec/vdpau.h
new file mode 100644
index 0000000000..a42ca013f2
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavcodec/vdpau.h
@@ -0,0 +1,255 @@
+/*
+ * The Video Decode and Presentation API for UNIX (VDPAU) is used for
+ * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1.
+ *
+ * Copyright (C) 2008 NVIDIA
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VDPAU_H
+#define AVCODEC_VDPAU_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vdpau
+ * Public libavcodec VDPAU header.
+ */
+
+
+/**
+ * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer
+ * @ingroup lavc_codec_hwaccel
+ *
+ * VDPAU hardware acceleration has two modules
+ * - VDPAU decoding
+ * - VDPAU presentation
+ *
+ * The VDPAU decoding module parses all headers using FFmpeg
+ * parsing mechanisms and uses VDPAU for the actual decoding.
+ *
+ * As per the current implementation, the actual decoding
+ * and rendering (API calls) are done as part of the VDPAU
+ * presentation (vo_vdpau.c) module.
+ *
+ * @{
+ */
+
+#include
+#include
+#include "libavutil/avconfig.h"
+#include "libavutil/attributes.h"
+
+#include "avcodec.h"
+#include "version.h"
+
+#if FF_API_BUFS_VDPAU
+union AVVDPAUPictureInfo {
+ VdpPictureInfoH264 h264;
+ VdpPictureInfoMPEG1Or2 mpeg;
+ VdpPictureInfoVC1 vc1;
+ VdpPictureInfoMPEG4Part2 mpeg4;
+};
+#endif
+
+struct AVCodecContext;
+struct AVFrame;
+
+typedef int (*AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *,
+ const VdpPictureInfo *, uint32_t,
+ const VdpBitstreamBuffer *);
+
+/**
+ * This structure is used to share data between the libavcodec library and
+ * the client video application.
+ * The user shall allocate the structure via the av_alloc_vdpau_hwaccel
+ * function and make it available as
+ * AVCodecContext.hwaccel_context. Members can be set by the user once
+ * during initialization or through each AVCodecContext.get_buffer()
+ * function call. In any case, they must be valid prior to calling
+ * decoding functions.
+ *
+ * The size of this structure is not a part of the public ABI and must not
+ * be used outside of libavcodec. Use av_vdpau_alloc_context() to allocate an
+ * AVVDPAUContext.
+ */
+typedef struct AVVDPAUContext {
+ /**
+ * VDPAU decoder handle
+ *
+ * Set by user.
+ */
+ VdpDecoder decoder;
+
+ /**
+ * VDPAU decoder render callback
+ *
+ * Set by the user.
+ */
+ VdpDecoderRender *render;
+
+#if FF_API_BUFS_VDPAU
+ /**
+ * VDPAU picture information
+ *
+ * Set by libavcodec.
+ */
+ attribute_deprecated
+ union AVVDPAUPictureInfo info;
+
+ /**
+ * Allocated size of the bitstream_buffers table.
+ *
+ * Set by libavcodec.
+ */
+ attribute_deprecated
+ int bitstream_buffers_allocated;
+
+ /**
+ * Useful bitstream buffers in the bitstream buffers table.
+ *
+ * Set by libavcodec.
+ */
+ attribute_deprecated
+ int bitstream_buffers_used;
+
+ /**
+ * Table of bitstream buffers.
+ * The user is responsible for freeing this buffer using av_freep().
+ *
+ * Set by libavcodec.
+ */
+ attribute_deprecated
+ VdpBitstreamBuffer *bitstream_buffers;
+#endif
+ AVVDPAU_Render2 render2;
+} AVVDPAUContext;
+
+/**
+ * @brief allocation function for AVVDPAUContext
+ *
+ * Allows extending the struct without breaking API/ABI
+ */
+AVVDPAUContext *av_alloc_vdpaucontext(void);
+
+AVVDPAU_Render2 av_vdpau_hwaccel_get_render2(const AVVDPAUContext *);
+void av_vdpau_hwaccel_set_render2(AVVDPAUContext *, AVVDPAU_Render2);
+
+/**
+ * Associate a VDPAU device with a codec context for hardware acceleration.
+ * This function is meant to be called from the get_format() codec callback,
+ * or earlier. It can also be called after avcodec_flush_buffers() to change
+ * the underlying VDPAU device mid-stream (e.g. to recover from non-transparent
+ * display preemption).
+ *
+ * @note get_format() must return AV_PIX_FMT_VDPAU if this function completes
+ * successfully.
+ *
+ * @param avctx decoding context whose get_format() callback is invoked
+ * @param device VDPAU device handle to use for hardware acceleration
+ * @param get_proc_address VDPAU device driver
+ * @param flags zero of more OR'd AV_HWACCEL_FLAG_* flags
+ *
+ * @return 0 on success, an AVERROR code on failure.
+ */
+int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device,
+ VdpGetProcAddress *get_proc_address, unsigned flags);
+
+/**
+ * Gets the parameters to create an adequate VDPAU video surface for the codec
+ * context using VDPAU hardware decoding acceleration.
+ *
+ * @note Behavior is undefined if the context was not successfully bound to a
+ * VDPAU device using av_vdpau_bind_context().
+ *
+ * @param avctx the codec context being used for decoding the stream
+ * @param type storage space for the VDPAU video surface chroma type
+ * (or NULL to ignore)
+ * @param width storage space for the VDPAU video surface pixel width
+ * (or NULL to ignore)
+ * @param height storage space for the VDPAU video surface pixel height
+ * (or NULL to ignore)
+ *
+ * @return 0 on success, a negative AVERROR code on failure.
+ */
+int av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType *type,
+ uint32_t *width, uint32_t *height);
+
+/**
+ * Allocate an AVVDPAUContext.
+ *
+ * @return Newly-allocated AVVDPAUContext or NULL on failure.
+ */
+AVVDPAUContext *av_vdpau_alloc_context(void);
+
+/**
+ * Get a decoder profile that should be used for initializing a VDPAU decoder.
+ * Should be called from the AVCodecContext.get_format() callback.
+ *
+ * @param avctx the codec context being used for decoding the stream
+ * @param profile a pointer into which the result will be written on success.
+ * The contents of profile are undefined if this function returns
+ * an error.
+ *
+ * @return 0 on success (non-negative), a negative AVERROR on failure.
+ */
+int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile);
+
+#if FF_API_CAP_VDPAU
+/** @brief The videoSurface is used for rendering. */
+#define FF_VDPAU_STATE_USED_FOR_RENDER 1
+
+/**
+ * @brief The videoSurface is needed for reference/prediction.
+ * The codec manipulates this.
+ */
+#define FF_VDPAU_STATE_USED_FOR_REFERENCE 2
+
+/**
+ * @brief This structure is used as a callback between the FFmpeg
+ * decoder (vd_) and presentation (vo_) module.
+ * This is used for defining a video frame containing surface,
+ * picture parameter, bitstream information etc which are passed
+ * between the FFmpeg decoder and its clients.
+ */
+struct vdpau_render_state {
+ VdpVideoSurface surface; ///< Used as rendered surface, never changed.
+
+ int state; ///< Holds FF_VDPAU_STATE_* values.
+
+#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI
+ /** picture parameter information for all supported codecs */
+ union AVVDPAUPictureInfo info;
+#endif
+
+ /** Describe size/location of the compressed video data.
+ Set to 0 when freeing bitstream_buffers. */
+ int bitstream_buffers_allocated;
+ int bitstream_buffers_used;
+ /** The user is responsible for freeing this buffer using av_freep(). */
+ VdpBitstreamBuffer *bitstream_buffers;
+
+#if !AV_HAVE_INCOMPATIBLE_LIBAV_ABI
+ /** picture parameter information for all supported codecs */
+ union AVVDPAUPictureInfo info;
+#endif
+};
+#endif
+
+/* @}*/
+
+#endif /* AVCODEC_VDPAU_H */
diff --git a/Externals/ffmpeg/dev/include/libavcodec/version.h b/Externals/ffmpeg/dev/include/libavcodec/version.h
new file mode 100644
index 0000000000..7e51f3b39e
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavcodec/version.h
@@ -0,0 +1,188 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VERSION_H
+#define AVCODEC_VERSION_H
+
+/**
+ * @file
+ * @ingroup libavc
+ * Libavcodec version macros.
+ */
+
+#include "libavutil/version.h"
+
+#define LIBAVCODEC_VERSION_MAJOR 56
+#define LIBAVCODEC_VERSION_MINOR 20
+#define LIBAVCODEC_VERSION_MICRO 100
+
+#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
+ LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \
+ LIBAVCODEC_VERSION_MINOR, \
+ LIBAVCODEC_VERSION_MICRO)
+#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT
+
+#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION)
+
+/**
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ */
+
+#ifndef FF_API_REQUEST_CHANNELS
+#define FF_API_REQUEST_CHANNELS (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_OLD_DECODE_AUDIO
+#define FF_API_OLD_DECODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_OLD_ENCODE_AUDIO
+#define FF_API_OLD_ENCODE_AUDIO (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_OLD_ENCODE_VIDEO
+#define FF_API_OLD_ENCODE_VIDEO (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_CODEC_ID
+#define FF_API_CODEC_ID (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_AUDIO_CONVERT
+#define FF_API_AUDIO_CONVERT (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_AVCODEC_RESAMPLE
+#define FF_API_AVCODEC_RESAMPLE FF_API_AUDIO_CONVERT
+#endif
+#ifndef FF_API_DEINTERLACE
+#define FF_API_DEINTERLACE (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_DESTRUCT_PACKET
+#define FF_API_DESTRUCT_PACKET (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_GET_BUFFER
+#define FF_API_GET_BUFFER (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_MISSING_SAMPLE
+#define FF_API_MISSING_SAMPLE (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_LOWRES
+#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_CAP_VDPAU
+#define FF_API_CAP_VDPAU (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_BUFS_VDPAU
+#define FF_API_BUFS_VDPAU (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_VOXWARE
+#define FF_API_VOXWARE (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_SET_DIMENSIONS
+#define FF_API_SET_DIMENSIONS (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_DEBUG_MV
+#define FF_API_DEBUG_MV (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_AC_VLC
+#define FF_API_AC_VLC (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_OLD_MSMPEG4
+#define FF_API_OLD_MSMPEG4 (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_ASPECT_EXTENDED
+#define FF_API_ASPECT_EXTENDED (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_THREAD_OPAQUE
+#define FF_API_THREAD_OPAQUE (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_CODEC_PKT
+#define FF_API_CODEC_PKT (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_ARCH_ALPHA
+#define FF_API_ARCH_ALPHA (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_XVMC
+#define FF_API_XVMC (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_ERROR_RATE
+#define FF_API_ERROR_RATE (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_QSCALE_TYPE
+#define FF_API_QSCALE_TYPE (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_MB_TYPE
+#define FF_API_MB_TYPE (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_MAX_BFRAMES
+#define FF_API_MAX_BFRAMES (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_NEG_LINESIZES
+#define FF_API_NEG_LINESIZES (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_EMU_EDGE
+#define FF_API_EMU_EDGE (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_ARCH_SH4
+#define FF_API_ARCH_SH4 (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_ARCH_SPARC
+#define FF_API_ARCH_SPARC (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_UNUSED_MEMBERS
+#define FF_API_UNUSED_MEMBERS (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_IDCT_XVIDMMX
+#define FF_API_IDCT_XVIDMMX (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_INPUT_PRESERVED
+#define FF_API_INPUT_PRESERVED (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_NORMALIZE_AQP
+#define FF_API_NORMALIZE_AQP (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_GMC
+#define FF_API_GMC (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_MV0
+#define FF_API_MV0 (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_CODEC_NAME
+#define FF_API_CODEC_NAME (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_AFD
+#define FF_API_AFD (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_VISMV
+/* XXX: don't forget to drop the -vismv documentation */
+#define FF_API_VISMV (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_DV_FRAME_PROFILE
+#define FF_API_DV_FRAME_PROFILE (LIBAVCODEC_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_AUDIOENC_DELAY
+#define FF_API_AUDIOENC_DELAY (LIBAVCODEC_VERSION_MAJOR < 58)
+#endif
+#ifndef FF_API_AVCTX_TIMEBASE
+#define FF_API_AVCTX_TIMEBASE (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+#ifndef FF_API_MPV_OPT
+#define FF_API_MPV_OPT (LIBAVCODEC_VERSION_MAJOR < 59)
+#endif
+
+#endif /* AVCODEC_VERSION_H */
diff --git a/Externals/ffmpeg/dev/include/libavcodec/vorbis_parser.h b/Externals/ffmpeg/dev/include/libavcodec/vorbis_parser.h
new file mode 100644
index 0000000000..0f73537ae5
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavcodec/vorbis_parser.h
@@ -0,0 +1,77 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * A public API for Vorbis parsing
+ *
+ * Determines the duration for each packet.
+ */
+
+#ifndef AVCODEC_VORBIS_PARSE_H
+#define AVCODEC_VORBIS_PARSE_H
+
+#include
+
+typedef struct AVVorbisParseContext AVVorbisParseContext;
+
+/**
+ * Allocate and initialize the Vorbis parser using headers in the extradata.
+ *
+ * @param avctx codec context
+ * @param s Vorbis parser context
+ */
+AVVorbisParseContext *av_vorbis_parse_init(const uint8_t *extradata,
+ int extradata_size);
+
+/**
+ * Free the parser and everything associated with it.
+ */
+void av_vorbis_parse_free(AVVorbisParseContext **s);
+
+#define VORBIS_FLAG_HEADER 0x00000001
+#define VORBIS_FLAG_COMMENT 0x00000002
+
+/**
+ * Get the duration for a Vorbis packet.
+ *
+ * If @p flags is @c NULL,
+ * special frames are considered invalid.
+ *
+ * @param s Vorbis parser context
+ * @param buf buffer containing a Vorbis frame
+ * @param buf_size size of the buffer
+ * @param flags flags for special frames
+ */
+int av_vorbis_parse_frame_flags(AVVorbisParseContext *s, const uint8_t *buf,
+ int buf_size, int *flags);
+
+/**
+ * Get the duration for a Vorbis packet.
+ *
+ * @param s Vorbis parser context
+ * @param buf buffer containing a Vorbis frame
+ * @param buf_size size of the buffer
+ */
+int av_vorbis_parse_frame(AVVorbisParseContext *s, const uint8_t *buf,
+ int buf_size);
+
+void av_vorbis_parse_reset(AVVorbisParseContext *s);
+
+#endif /* AVCODEC_VORBIS_PARSE_H */
diff --git a/Externals/ffmpeg/dev/include/libavcodec/xvmc.h b/Externals/ffmpeg/dev/include/libavcodec/xvmc.h
new file mode 100644
index 0000000000..c2e187cc16
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavcodec/xvmc.h
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2003 Ivan Kalvachev
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_XVMC_H
+#define AVCODEC_XVMC_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_xvmc
+ * Public libavcodec XvMC header.
+ */
+
+#include
+
+#include "libavutil/attributes.h"
+#include "version.h"
+#include "avcodec.h"
+
+/**
+ * @defgroup lavc_codec_hwaccel_xvmc XvMC
+ * @ingroup lavc_codec_hwaccel
+ *
+ * @{
+ */
+
+#define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct
+ the number is 1337 speak for the letters IDCT MCo (motion compensation) */
+
+attribute_deprecated struct xvmc_pix_fmt {
+ /** The field contains the special constant value AV_XVMC_ID.
+ It is used as a test that the application correctly uses the API,
+ and that there is no corruption caused by pixel routines.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int xvmc_id;
+
+ /** Pointer to the block array allocated by XvMCCreateBlocks().
+ The array has to be freed by XvMCDestroyBlocks().
+ Each group of 64 values represents one data block of differential
+ pixel information (in MoCo mode) or coefficients for IDCT.
+ - application - set the pointer during initialization
+ - libavcodec - fills coefficients/pixel data into the array
+ */
+ short* data_blocks;
+
+ /** Pointer to the macroblock description array allocated by
+ XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks().
+ - application - set the pointer during initialization
+ - libavcodec - fills description data into the array
+ */
+ XvMCMacroBlock* mv_blocks;
+
+ /** Number of macroblock descriptions that can be stored in the mv_blocks
+ array.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int allocated_mv_blocks;
+
+ /** Number of blocks that can be stored at once in the data_blocks array.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int allocated_data_blocks;
+
+ /** Indicate that the hardware would interpret data_blocks as IDCT
+ coefficients and perform IDCT on them.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int idct;
+
+ /** In MoCo mode it indicates that intra macroblocks are assumed to be in
+ unsigned format; same as the XVMC_INTRA_UNSIGNED flag.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ int unsigned_intra;
+
+ /** Pointer to the surface allocated by XvMCCreateSurface().
+ It has to be freed by XvMCDestroySurface() on application exit.
+ It identifies the frame and its state on the video hardware.
+ - application - set during initialization
+ - libavcodec - unchanged
+ */
+ XvMCSurface* p_surface;
+
+/** Set by the decoder before calling ff_draw_horiz_band(),
+ needed by the XvMCRenderSurface function. */
+//@{
+ /** Pointer to the surface used as past reference
+ - application - unchanged
+ - libavcodec - set
+ */
+ XvMCSurface* p_past_surface;
+
+ /** Pointer to the surface used as future reference
+ - application - unchanged
+ - libavcodec - set
+ */
+ XvMCSurface* p_future_surface;
+
+ /** top/bottom field or frame
+ - application - unchanged
+ - libavcodec - set
+ */
+ unsigned int picture_structure;
+
+ /** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence
+ - application - unchanged
+ - libavcodec - set
+ */
+ unsigned int flags;
+//}@
+
+ /** Number of macroblock descriptions in the mv_blocks array
+ that have already been passed to the hardware.
+ - application - zeroes it on get_buffer().
+ A successful ff_draw_horiz_band() may increment it
+ with filled_mb_block_num or zero both.
+ - libavcodec - unchanged
+ */
+ int start_mv_blocks_num;
+
+ /** Number of new macroblock descriptions in the mv_blocks array (after
+ start_mv_blocks_num) that are filled by libavcodec and have to be
+ passed to the hardware.
+ - application - zeroes it on get_buffer() or after successful
+ ff_draw_horiz_band().
+ - libavcodec - increment with one of each stored MB
+ */
+ int filled_mv_blocks_num;
+
+ /** Number of the next free data block; one data block consists of
+ 64 short values in the data_blocks array.
+ All blocks before this one have already been claimed by placing their
+ position into the corresponding block description structure field,
+ that are part of the mv_blocks array.
+ - application - zeroes it on get_buffer().
+ A successful ff_draw_horiz_band() may zero it together
+ with start_mb_blocks_num.
+ - libavcodec - each decoded macroblock increases it by the number
+ of coded blocks it contains.
+ */
+ int next_free_data_block_num;
+};
+
+/**
+ * @}
+ */
+
+#endif /* AVCODEC_XVMC_H */
diff --git a/Externals/ffmpeg/dev/include/libavdevice/avdevice.h b/Externals/ffmpeg/dev/include/libavdevice/avdevice.h
new file mode 100644
index 0000000000..2d675b012d
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavdevice/avdevice.h
@@ -0,0 +1,509 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVDEVICE_AVDEVICE_H
+#define AVDEVICE_AVDEVICE_H
+
+#include "version.h"
+
+/**
+ * @file
+ * @ingroup lavd
+ * Main libavdevice API header
+ */
+
+/**
+ * @defgroup lavd Special devices muxing/demuxing library
+ * @{
+ * Libavdevice is a complementary library to @ref libavf "libavformat". It
+ * provides various "special" platform-specific muxers and demuxers, e.g. for
+ * grabbing devices, audio capture and playback etc. As a consequence, the
+ * (de)muxers in libavdevice are of the AVFMT_NOFILE type (they use their own
+ * I/O functions). The filename passed to avformat_open_input() often does not
+ * refer to an actually existing file, but has some special device-specific
+ * meaning - e.g. for x11grab it is the display name.
+ *
+ * To use libavdevice, simply call avdevice_register_all() to register all
+ * compiled muxers and demuxers. They all use standard libavformat API.
+ * @}
+ */
+
+#include "libavutil/log.h"
+#include "libavutil/opt.h"
+#include "libavutil/dict.h"
+#include "libavformat/avformat.h"
+
+/**
+ * Return the LIBAVDEVICE_VERSION_INT constant.
+ */
+unsigned avdevice_version(void);
+
+/**
+ * Return the libavdevice build-time configuration.
+ */
+const char *avdevice_configuration(void);
+
+/**
+ * Return the libavdevice license.
+ */
+const char *avdevice_license(void);
+
+/**
+ * Initialize libavdevice and register all the input and output devices.
+ * @warning This function is not thread safe.
+ */
+void avdevice_register_all(void);
+
+/**
+ * Audio input devices iterator.
+ *
+ * If d is NULL, returns the first registered input audio/video device,
+ * if d is non-NULL, returns the next registered input audio/video device after d
+ * or NULL if d is the last one.
+ */
+AVInputFormat *av_input_audio_device_next(AVInputFormat *d);
+
+/**
+ * Video input devices iterator.
+ *
+ * If d is NULL, returns the first registered input audio/video device,
+ * if d is non-NULL, returns the next registered input audio/video device after d
+ * or NULL if d is the last one.
+ */
+AVInputFormat *av_input_video_device_next(AVInputFormat *d);
+
+/**
+ * Audio output devices iterator.
+ *
+ * If d is NULL, returns the first registered output audio/video device,
+ * if d is non-NULL, returns the next registered output audio/video device after d
+ * or NULL if d is the last one.
+ */
+AVOutputFormat *av_output_audio_device_next(AVOutputFormat *d);
+
+/**
+ * Video output devices iterator.
+ *
+ * If d is NULL, returns the first registered output audio/video device,
+ * if d is non-NULL, returns the next registered output audio/video device after d
+ * or NULL if d is the last one.
+ */
+AVOutputFormat *av_output_video_device_next(AVOutputFormat *d);
+
+typedef struct AVDeviceRect {
+ int x; /**< x coordinate of top left corner */
+ int y; /**< y coordinate of top left corner */
+ int width; /**< width */
+ int height; /**< height */
+} AVDeviceRect;
+
+/**
+ * Message types used by avdevice_app_to_dev_control_message().
+ */
+enum AVAppToDevMessageType {
+ /**
+ * Dummy message.
+ */
+ AV_APP_TO_DEV_NONE = MKBETAG('N','O','N','E'),
+
+ /**
+ * Window size change message.
+ *
+ * Message is sent to the device every time the application changes the size
+ * of the window device renders to.
+ * Message should also be sent right after window is created.
+ *
+ * data: AVDeviceRect: new window size.
+ */
+ AV_APP_TO_DEV_WINDOW_SIZE = MKBETAG('G','E','O','M'),
+
+ /**
+ * Repaint request message.
+ *
+ * Message is sent to the device when window has to be repainted.
+ *
+ * data: AVDeviceRect: area required to be repainted.
+ * NULL: whole area is required to be repainted.
+ */
+ AV_APP_TO_DEV_WINDOW_REPAINT = MKBETAG('R','E','P','A'),
+
+ /**
+ * Request pause/play.
+ *
+ * Application requests pause/unpause playback.
+ * Mostly usable with devices that have internal buffer.
+ * By default devices are not paused.
+ *
+ * data: NULL
+ */
+ AV_APP_TO_DEV_PAUSE = MKBETAG('P', 'A', 'U', ' '),
+ AV_APP_TO_DEV_PLAY = MKBETAG('P', 'L', 'A', 'Y'),
+ AV_APP_TO_DEV_TOGGLE_PAUSE = MKBETAG('P', 'A', 'U', 'T'),
+
+ /**
+ * Volume control message.
+ *
+ * Set volume level. It may be device-dependent if volume
+ * is changed per stream or system wide. Per stream volume
+ * change is expected when possible.
+ *
+ * data: double: new volume with range of 0.0 - 1.0.
+ */
+ AV_APP_TO_DEV_SET_VOLUME = MKBETAG('S', 'V', 'O', 'L'),
+
+ /**
+ * Mute control messages.
+ *
+ * Change mute state. It may be device-dependent if mute status
+ * is changed per stream or system wide. Per stream mute status
+ * change is expected when possible.
+ *
+ * data: NULL.
+ */
+ AV_APP_TO_DEV_MUTE = MKBETAG(' ', 'M', 'U', 'T'),
+ AV_APP_TO_DEV_UNMUTE = MKBETAG('U', 'M', 'U', 'T'),
+ AV_APP_TO_DEV_TOGGLE_MUTE = MKBETAG('T', 'M', 'U', 'T'),
+
+ /**
+ * Get volume/mute messages.
+ *
+ * Force the device to send AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED or
+ * AV_DEV_TO_APP_MUTE_STATE_CHANGED command respectively.
+ *
+ * data: NULL.
+ */
+ AV_APP_TO_DEV_GET_VOLUME = MKBETAG('G', 'V', 'O', 'L'),
+ AV_APP_TO_DEV_GET_MUTE = MKBETAG('G', 'M', 'U', 'T'),
+};
+
+/**
+ * Message types used by avdevice_dev_to_app_control_message().
+ */
+enum AVDevToAppMessageType {
+ /**
+ * Dummy message.
+ */
+ AV_DEV_TO_APP_NONE = MKBETAG('N','O','N','E'),
+
+ /**
+ * Create window buffer message.
+ *
+ * Device requests to create a window buffer. Exact meaning is device-
+ * and application-dependent. Message is sent before rendering first
+ * frame and all one-shot initializations should be done here.
+ * Application is allowed to ignore preferred window buffer size.
+ *
+ * @note: Application is obligated to inform about window buffer size
+ * with AV_APP_TO_DEV_WINDOW_SIZE message.
+ *
+ * data: AVDeviceRect: preferred size of the window buffer.
+ * NULL: no preferred size of the window buffer.
+ */
+ AV_DEV_TO_APP_CREATE_WINDOW_BUFFER = MKBETAG('B','C','R','E'),
+
+ /**
+ * Prepare window buffer message.
+ *
+ * Device requests to prepare a window buffer for rendering.
+ * Exact meaning is device- and application-dependent.
+ * Message is sent before rendering of each frame.
+ *
+ * data: NULL.
+ */
+ AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER = MKBETAG('B','P','R','E'),
+
+ /**
+ * Display window buffer message.
+ *
+ * Device requests to display a window buffer.
+ * Message is sent when new frame is ready to be displayed.
+ * Usually buffers need to be swapped in handler of this message.
+ *
+ * data: NULL.
+ */
+ AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER = MKBETAG('B','D','I','S'),
+
+ /**
+ * Destroy window buffer message.
+ *
+ * Device requests to destroy a window buffer.
+ * Message is sent when device is about to be destroyed and window
+ * buffer is not required anymore.
+ *
+ * data: NULL.
+ */
+ AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER = MKBETAG('B','D','E','S'),
+
+ /**
+ * Buffer fullness status messages.
+ *
+ * Device signals buffer overflow/underflow.
+ *
+ * data: NULL.
+ */
+ AV_DEV_TO_APP_BUFFER_OVERFLOW = MKBETAG('B','O','F','L'),
+ AV_DEV_TO_APP_BUFFER_UNDERFLOW = MKBETAG('B','U','F','L'),
+
+ /**
+ * Buffer readable/writable.
+ *
+ * Device informs that buffer is readable/writable.
+ * When possible, device informs how many bytes can be read/write.
+ *
+ * @warning Device may not inform when number of bytes than can be read/write changes.
+ *
+ * data: int64_t: amount of bytes available to read/write.
+ * NULL: amount of bytes available to read/write is not known.
+ */
+ AV_DEV_TO_APP_BUFFER_READABLE = MKBETAG('B','R','D',' '),
+ AV_DEV_TO_APP_BUFFER_WRITABLE = MKBETAG('B','W','R',' '),
+
+ /**
+ * Mute state change message.
+ *
+ * Device informs that mute state has changed.
+ *
+ * data: int: 0 for not muted state, non-zero for muted state.
+ */
+ AV_DEV_TO_APP_MUTE_STATE_CHANGED = MKBETAG('C','M','U','T'),
+
+ /**
+ * Volume level change message.
+ *
+ * Device informs that volume level has changed.
+ *
+ * data: double: new volume with range of 0.0 - 1.0.
+ */
+ AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED = MKBETAG('C','V','O','L'),
+};
+
+/**
+ * Send control message from application to device.
+ *
+ * @param s device context.
+ * @param type message type.
+ * @param data message data. Exact type depends on message type.
+ * @param data_size size of message data.
+ * @return >= 0 on success, negative on error.
+ * AVERROR(ENOSYS) when device doesn't implement handler of the message.
+ */
+int avdevice_app_to_dev_control_message(struct AVFormatContext *s,
+ enum AVAppToDevMessageType type,
+ void *data, size_t data_size);
+
+/**
+ * Send control message from device to application.
+ *
+ * @param s device context.
+ * @param type message type.
+ * @param data message data. Can be NULL.
+ * @param data_size size of message data.
+ * @return >= 0 on success, negative on error.
+ * AVERROR(ENOSYS) when application doesn't implement handler of the message.
+ */
+int avdevice_dev_to_app_control_message(struct AVFormatContext *s,
+ enum AVDevToAppMessageType type,
+ void *data, size_t data_size);
+
+/**
+ * Following API allows user to probe device capabilities (supported codecs,
+ * pixel formats, sample formats, resolutions, channel counts, etc).
+ * It is build on top op AVOption API.
+ * Queried capabilities allows to set up converters of video or audio
+ * parameters that fit to the device.
+ *
+ * List of capabilities that can be queried:
+ * - Capabilities valid for both audio and video devices:
+ * - codec: supported audio/video codecs.
+ * type: AV_OPT_TYPE_INT (AVCodecID value)
+ * - Capabilities valid for audio devices:
+ * - sample_format: supported sample formats.
+ * type: AV_OPT_TYPE_INT (AVSampleFormat value)
+ * - sample_rate: supported sample rates.
+ * type: AV_OPT_TYPE_INT
+ * - channels: supported number of channels.
+ * type: AV_OPT_TYPE_INT
+ * - channel_layout: supported channel layouts.
+ * type: AV_OPT_TYPE_INT64
+ * - Capabilities valid for video devices:
+ * - pixel_format: supported pixel formats.
+ * type: AV_OPT_TYPE_INT (AVPixelFormat value)
+ * - window_size: supported window sizes (describes size of the window size presented to the user).
+ * type: AV_OPT_TYPE_IMAGE_SIZE
+ * - frame_size: supported frame sizes (describes size of provided video frames).
+ * type: AV_OPT_TYPE_IMAGE_SIZE
+ * - fps: supported fps values
+ * type: AV_OPT_TYPE_RATIONAL
+ *
+ * Value of the capability may be set by user using av_opt_set() function
+ * and AVDeviceCapabilitiesQuery object. Following queries will
+ * limit results to the values matching already set capabilities.
+ * For example, setting a codec may impact number of formats or fps values
+ * returned during next query. Setting invalid value may limit results to zero.
+ *
+ * Example of the usage basing on opengl output device:
+ *
+ * @code
+ * AVFormatContext *oc = NULL;
+ * AVDeviceCapabilitiesQuery *caps = NULL;
+ * AVOptionRanges *ranges;
+ * int ret;
+ *
+ * if ((ret = avformat_alloc_output_context2(&oc, NULL, "opengl", NULL)) < 0)
+ * goto fail;
+ * if (avdevice_capabilities_create(&caps, oc, NULL) < 0)
+ * goto fail;
+ *
+ * //query codecs
+ * if (av_opt_query_ranges(&ranges, caps, "codec", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
+ * goto fail;
+ * //pick codec here and set it
+ * av_opt_set(caps, "codec", AV_CODEC_ID_RAWVIDEO, 0);
+ *
+ * //query format
+ * if (av_opt_query_ranges(&ranges, caps, "pixel_format", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
+ * goto fail;
+ * //pick format here and set it
+ * av_opt_set(caps, "pixel_format", AV_PIX_FMT_YUV420P, 0);
+ *
+ * //query and set more capabilities
+ *
+ * fail:
+ * //clean up code
+ * avdevice_capabilities_free(&query, oc);
+ * avformat_free_context(oc);
+ * @endcode
+ */
+
+/**
+ * Structure describes device capabilities.
+ *
+ * It is used by devices in conjunction with av_device_capabilities AVOption table
+ * to implement capabilities probing API based on AVOption API. Should not be used directly.
+ */
+typedef struct AVDeviceCapabilitiesQuery {
+ const AVClass *av_class;
+ AVFormatContext *device_context;
+ enum AVCodecID codec;
+ enum AVSampleFormat sample_format;
+ enum AVPixelFormat pixel_format;
+ int sample_rate;
+ int channels;
+ int64_t channel_layout;
+ int window_width;
+ int window_height;
+ int frame_width;
+ int frame_height;
+ AVRational fps;
+} AVDeviceCapabilitiesQuery;
+
+/**
+ * AVOption table used by devices to implement device capabilities API. Should not be used by a user.
+ */
+extern const AVOption av_device_capabilities[];
+
+/**
+ * Initialize capabilities probing API based on AVOption API.
+ *
+ * avdevice_capabilities_free() must be called when query capabilities API is
+ * not used anymore.
+ *
+ * @param[out] caps Device capabilities data. Pointer to a NULL pointer must be passed.
+ * @param s Context of the device.
+ * @param device_options An AVDictionary filled with device-private options.
+ * On return this parameter will be destroyed and replaced with a dict
+ * containing options that were not found. May be NULL.
+ * The same options must be passed later to avformat_write_header() for output
+ * devices or avformat_open_input() for input devices, or at any other place
+ * that affects device-private options.
+ *
+ * @return >= 0 on success, negative otherwise.
+ */
+int avdevice_capabilities_create(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s,
+ AVDictionary **device_options);
+
+/**
+ * Free resources created by avdevice_capabilities_create()
+ *
+ * @param caps Device capabilities data to be freed.
+ * @param s Context of the device.
+ */
+void avdevice_capabilities_free(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s);
+
+/**
+ * Structure describes basic parameters of the device.
+ */
+typedef struct AVDeviceInfo {
+ char *device_name; /**< device name, format depends on device */
+ char *device_description; /**< human friendly name */
+} AVDeviceInfo;
+
+/**
+ * List of devices.
+ */
+typedef struct AVDeviceInfoList {
+ AVDeviceInfo **devices; /**< list of autodetected devices */
+ int nb_devices; /**< number of autodetected devices */
+ int default_device; /**< index of default device or -1 if no default */
+} AVDeviceInfoList;
+
+/**
+ * List devices.
+ *
+ * Returns available device names and their parameters.
+ *
+ * @note: Some devices may accept system-dependent device names that cannot be
+ * autodetected. The list returned by this function cannot be assumed to
+ * be always completed.
+ *
+ * @param s device context.
+ * @param[out] device_list list of autodetected devices.
+ * @return count of autodetected devices, negative on error.
+ */
+int avdevice_list_devices(struct AVFormatContext *s, AVDeviceInfoList **device_list);
+
+/**
+ * Convenient function to free result of avdevice_list_devices().
+ *
+ * @param devices device list to be freed.
+ */
+void avdevice_free_list_devices(AVDeviceInfoList **device_list);
+
+/**
+ * List devices.
+ *
+ * Returns available device names and their parameters.
+ * These are convinient wrappers for avdevice_list_devices().
+ * Device context is allocated and deallocated internally.
+ *
+ * @param device device format. May be NULL if device name is set.
+ * @param device_name device name. May be NULL if device format is set.
+ * @param device_options An AVDictionary filled with device-private options. May be NULL.
+ * The same options must be passed later to avformat_write_header() for output
+ * devices or avformat_open_input() for input devices, or at any other place
+ * that affects device-private options.
+ * @param[out] device_list list of autodetected devices
+ * @return count of autodetected devices, negative on error.
+ * @note device argument takes precedence over device_name when both are set.
+ */
+int avdevice_list_input_sources(struct AVInputFormat *device, const char *device_name,
+ AVDictionary *device_options, AVDeviceInfoList **device_list);
+int avdevice_list_output_sinks(struct AVOutputFormat *device, const char *device_name,
+ AVDictionary *device_options, AVDeviceInfoList **device_list);
+
+#endif /* AVDEVICE_AVDEVICE_H */
diff --git a/Externals/ffmpeg/dev/include/libavdevice/version.h b/Externals/ffmpeg/dev/include/libavdevice/version.h
new file mode 100644
index 0000000000..8de07f08b2
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavdevice/version.h
@@ -0,0 +1,50 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVDEVICE_VERSION_H
+#define AVDEVICE_VERSION_H
+
+/**
+ * @file
+ * @ingroup lavd
+ * Libavdevice version macros
+ */
+
+#include "libavutil/version.h"
+
+#define LIBAVDEVICE_VERSION_MAJOR 56
+#define LIBAVDEVICE_VERSION_MINOR 4
+#define LIBAVDEVICE_VERSION_MICRO 100
+
+#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
+ LIBAVDEVICE_VERSION_MINOR, \
+ LIBAVDEVICE_VERSION_MICRO)
+#define LIBAVDEVICE_VERSION AV_VERSION(LIBAVDEVICE_VERSION_MAJOR, \
+ LIBAVDEVICE_VERSION_MINOR, \
+ LIBAVDEVICE_VERSION_MICRO)
+#define LIBAVDEVICE_BUILD LIBAVDEVICE_VERSION_INT
+
+#define LIBAVDEVICE_IDENT "Lavd" AV_STRINGIFY(LIBAVDEVICE_VERSION)
+
+/**
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ */
+
+#endif /* AVDEVICE_VERSION_H */
diff --git a/Externals/ffmpeg/dev/include/libavfilter/asrc_abuffer.h b/Externals/ffmpeg/dev/include/libavfilter/asrc_abuffer.h
new file mode 100644
index 0000000000..aa3446166f
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavfilter/asrc_abuffer.h
@@ -0,0 +1,91 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_ASRC_ABUFFER_H
+#define AVFILTER_ASRC_ABUFFER_H
+
+#include "avfilter.h"
+
+/**
+ * @file
+ * memory buffer source for audio
+ *
+ * @deprecated use buffersrc.h instead.
+ */
+
+/**
+ * Queue an audio buffer to the audio buffer source.
+ *
+ * @param abuffersrc audio source buffer context
+ * @param data pointers to the samples planes
+ * @param linesize linesizes of each audio buffer plane
+ * @param nb_samples number of samples per channel
+ * @param sample_fmt sample format of the audio data
+ * @param ch_layout channel layout of the audio data
+ * @param planar flag to indicate if audio data is planar or packed
+ * @param pts presentation timestamp of the audio buffer
+ * @param flags unused
+ *
+ * @deprecated use av_buffersrc_add_ref() instead.
+ */
+attribute_deprecated
+int av_asrc_buffer_add_samples(AVFilterContext *abuffersrc,
+ uint8_t *data[8], int linesize[8],
+ int nb_samples, int sample_rate,
+ int sample_fmt, int64_t ch_layout, int planar,
+ int64_t pts, int av_unused flags);
+
+/**
+ * Queue an audio buffer to the audio buffer source.
+ *
+ * This is similar to av_asrc_buffer_add_samples(), but the samples
+ * are stored in a buffer with known size.
+ *
+ * @param abuffersrc audio source buffer context
+ * @param buf pointer to the samples data, packed is assumed
+ * @param size the size in bytes of the buffer, it must contain an
+ * integer number of samples
+ * @param sample_fmt sample format of the audio data
+ * @param ch_layout channel layout of the audio data
+ * @param pts presentation timestamp of the audio buffer
+ * @param flags unused
+ *
+ * @deprecated use av_buffersrc_add_ref() instead.
+ */
+attribute_deprecated
+int av_asrc_buffer_add_buffer(AVFilterContext *abuffersrc,
+ uint8_t *buf, int buf_size,
+ int sample_rate,
+ int sample_fmt, int64_t ch_layout, int planar,
+ int64_t pts, int av_unused flags);
+
+/**
+ * Queue an audio buffer to the audio buffer source.
+ *
+ * @param abuffersrc audio source buffer context
+ * @param samplesref buffer ref to queue
+ * @param flags unused
+ *
+ * @deprecated use av_buffersrc_add_ref() instead.
+ */
+attribute_deprecated
+int av_asrc_buffer_add_audio_buffer_ref(AVFilterContext *abuffersrc,
+ AVFilterBufferRef *samplesref,
+ int av_unused flags);
+
+#endif /* AVFILTER_ASRC_ABUFFER_H */
diff --git a/Externals/ffmpeg/dev/include/libavfilter/avcodec.h b/Externals/ffmpeg/dev/include/libavfilter/avcodec.h
new file mode 100644
index 0000000000..d3d0e20e71
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavfilter/avcodec.h
@@ -0,0 +1,69 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_AVCODEC_H
+#define AVFILTER_AVCODEC_H
+
+/**
+ * @file
+ * libavcodec/libavfilter gluing utilities
+ *
+ * This should be included in an application ONLY if the installed
+ * libavfilter has been compiled with libavcodec support, otherwise
+ * symbols defined below will not be available.
+ */
+
+#include "avfilter.h"
+
+#if FF_API_AVFILTERBUFFER
+/**
+ * Create and return a picref reference from the data and properties
+ * contained in frame.
+ *
+ * @param perms permissions to assign to the new buffer reference
+ * @deprecated avfilter APIs work natively with AVFrame instead.
+ */
+attribute_deprecated
+AVFilterBufferRef *avfilter_get_video_buffer_ref_from_frame(const AVFrame *frame, int perms);
+
+
+/**
+ * Create and return a picref reference from the data and properties
+ * contained in frame.
+ *
+ * @param perms permissions to assign to the new buffer reference
+ * @deprecated avfilter APIs work natively with AVFrame instead.
+ */
+attribute_deprecated
+AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_frame(const AVFrame *frame,
+ int perms);
+
+/**
+ * Create and return a buffer reference from the data and properties
+ * contained in frame.
+ *
+ * @param perms permissions to assign to the new buffer reference
+ * @deprecated avfilter APIs work natively with AVFrame instead.
+ */
+attribute_deprecated
+AVFilterBufferRef *avfilter_get_buffer_ref_from_frame(enum AVMediaType type,
+ const AVFrame *frame,
+ int perms);
+#endif
+
+#endif /* AVFILTER_AVCODEC_H */
diff --git a/Externals/ffmpeg/dev/include/libavfilter/avfilter.h b/Externals/ffmpeg/dev/include/libavfilter/avfilter.h
new file mode 100644
index 0000000000..b5220b96d9
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavfilter/avfilter.h
@@ -0,0 +1,1531 @@
+/*
+ * filter layer
+ * Copyright (c) 2007 Bobby Bingham
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_AVFILTER_H
+#define AVFILTER_AVFILTER_H
+
+/**
+ * @file
+ * @ingroup lavfi
+ * Main libavfilter public API header
+ */
+
+/**
+ * @defgroup lavfi Libavfilter - graph-based frame editing library
+ * @{
+ */
+
+#include
+
+#include "libavutil/attributes.h"
+#include "libavutil/avutil.h"
+#include "libavutil/dict.h"
+#include "libavutil/frame.h"
+#include "libavutil/log.h"
+#include "libavutil/samplefmt.h"
+#include "libavutil/pixfmt.h"
+#include "libavutil/rational.h"
+
+#include "libavfilter/version.h"
+
+/**
+ * Return the LIBAVFILTER_VERSION_INT constant.
+ */
+unsigned avfilter_version(void);
+
+/**
+ * Return the libavfilter build-time configuration.
+ */
+const char *avfilter_configuration(void);
+
+/**
+ * Return the libavfilter license.
+ */
+const char *avfilter_license(void);
+
+typedef struct AVFilterContext AVFilterContext;
+typedef struct AVFilterLink AVFilterLink;
+typedef struct AVFilterPad AVFilterPad;
+typedef struct AVFilterFormats AVFilterFormats;
+
+#if FF_API_AVFILTERBUFFER
+/**
+ * A reference-counted buffer data type used by the filter system. Filters
+ * should not store pointers to this structure directly, but instead use the
+ * AVFilterBufferRef structure below.
+ */
+typedef struct AVFilterBuffer {
+ uint8_t *data[8]; ///< buffer data for each plane/channel
+
+ /**
+ * pointers to the data planes/channels.
+ *
+ * For video, this should simply point to data[].
+ *
+ * For planar audio, each channel has a separate data pointer, and
+ * linesize[0] contains the size of each channel buffer.
+ * For packed audio, there is just one data pointer, and linesize[0]
+ * contains the total size of the buffer for all channels.
+ *
+ * Note: Both data and extended_data will always be set, but for planar
+ * audio with more channels that can fit in data, extended_data must be used
+ * in order to access all channels.
+ */
+ uint8_t **extended_data;
+ int linesize[8]; ///< number of bytes per line
+
+ /** private data to be used by a custom free function */
+ void *priv;
+ /**
+ * A pointer to the function to deallocate this buffer if the default
+ * function is not sufficient. This could, for example, add the memory
+ * back into a memory pool to be reused later without the overhead of
+ * reallocating it from scratch.
+ */
+ void (*free)(struct AVFilterBuffer *buf);
+
+ int format; ///< media format
+ int w, h; ///< width and height of the allocated buffer
+ unsigned refcount; ///< number of references to this buffer
+} AVFilterBuffer;
+
+#define AV_PERM_READ 0x01 ///< can read from the buffer
+#define AV_PERM_WRITE 0x02 ///< can write to the buffer
+#define AV_PERM_PRESERVE 0x04 ///< nobody else can overwrite the buffer
+#define AV_PERM_REUSE 0x08 ///< can output the buffer multiple times, with the same contents each time
+#define AV_PERM_REUSE2 0x10 ///< can output the buffer multiple times, modified each time
+#define AV_PERM_NEG_LINESIZES 0x20 ///< the buffer requested can have negative linesizes
+#define AV_PERM_ALIGN 0x40 ///< the buffer must be aligned
+
+#define AVFILTER_ALIGN 16 //not part of ABI
+
+/**
+ * Audio specific properties in a reference to an AVFilterBuffer. Since
+ * AVFilterBufferRef is common to different media formats, audio specific
+ * per reference properties must be separated out.
+ */
+typedef struct AVFilterBufferRefAudioProps {
+ uint64_t channel_layout; ///< channel layout of audio buffer
+ int nb_samples; ///< number of audio samples per channel
+ int sample_rate; ///< audio buffer sample rate
+ int channels; ///< number of channels (do not access directly)
+} AVFilterBufferRefAudioProps;
+
+/**
+ * Video specific properties in a reference to an AVFilterBuffer. Since
+ * AVFilterBufferRef is common to different media formats, video specific
+ * per reference properties must be separated out.
+ */
+typedef struct AVFilterBufferRefVideoProps {
+ int w; ///< image width
+ int h; ///< image height
+ AVRational sample_aspect_ratio; ///< sample aspect ratio
+ int interlaced; ///< is frame interlaced
+ int top_field_first; ///< field order
+ enum AVPictureType pict_type; ///< picture type of the frame
+ int key_frame; ///< 1 -> keyframe, 0-> not
+ int qp_table_linesize; ///< qp_table stride
+ int qp_table_size; ///< qp_table size
+ int8_t *qp_table; ///< array of Quantization Parameters
+} AVFilterBufferRefVideoProps;
+
+/**
+ * A reference to an AVFilterBuffer. Since filters can manipulate the origin of
+ * a buffer to, for example, crop image without any memcpy, the buffer origin
+ * and dimensions are per-reference properties. Linesize is also useful for
+ * image flipping, frame to field filters, etc, and so is also per-reference.
+ *
+ * TODO: add anything necessary for frame reordering
+ */
+typedef struct AVFilterBufferRef {
+ AVFilterBuffer *buf; ///< the buffer that this is a reference to
+ uint8_t *data[8]; ///< picture/audio data for each plane
+ /**
+ * pointers to the data planes/channels.
+ *
+ * For video, this should simply point to data[].
+ *
+ * For planar audio, each channel has a separate data pointer, and
+ * linesize[0] contains the size of each channel buffer.
+ * For packed audio, there is just one data pointer, and linesize[0]
+ * contains the total size of the buffer for all channels.
+ *
+ * Note: Both data and extended_data will always be set, but for planar
+ * audio with more channels that can fit in data, extended_data must be used
+ * in order to access all channels.
+ */
+ uint8_t **extended_data;
+ int linesize[8]; ///< number of bytes per line
+
+ AVFilterBufferRefVideoProps *video; ///< video buffer specific properties
+ AVFilterBufferRefAudioProps *audio; ///< audio buffer specific properties
+
+ /**
+ * presentation timestamp. The time unit may change during
+ * filtering, as it is specified in the link and the filter code
+ * may need to rescale the PTS accordingly.
+ */
+ int64_t pts;
+ int64_t pos; ///< byte position in stream, -1 if unknown
+
+ int format; ///< media format
+
+ int perms; ///< permissions, see the AV_PERM_* flags
+
+ enum AVMediaType type; ///< media type of buffer data
+
+ AVDictionary *metadata; ///< dictionary containing metadata key=value tags
+} AVFilterBufferRef;
+
+/**
+ * Copy properties of src to dst, without copying the actual data
+ */
+attribute_deprecated
+void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, const AVFilterBufferRef *src);
+
+/**
+ * Add a new reference to a buffer.
+ *
+ * @param ref an existing reference to the buffer
+ * @param pmask a bitmask containing the allowable permissions in the new
+ * reference
+ * @return a new reference to the buffer with the same properties as the
+ * old, excluding any permissions denied by pmask
+ */
+attribute_deprecated
+AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask);
+
+/**
+ * Remove a reference to a buffer. If this is the last reference to the
+ * buffer, the buffer itself is also automatically freed.
+ *
+ * @param ref reference to the buffer, may be NULL
+ *
+ * @note it is recommended to use avfilter_unref_bufferp() instead of this
+ * function
+ */
+attribute_deprecated
+void avfilter_unref_buffer(AVFilterBufferRef *ref);
+
+/**
+ * Remove a reference to a buffer and set the pointer to NULL.
+ * If this is the last reference to the buffer, the buffer itself
+ * is also automatically freed.
+ *
+ * @param ref pointer to the buffer reference
+ */
+attribute_deprecated
+void avfilter_unref_bufferp(AVFilterBufferRef **ref);
+#endif
+
+/**
+ * Get the number of channels of a buffer reference.
+ */
+attribute_deprecated
+int avfilter_ref_get_channels(AVFilterBufferRef *ref);
+
+#if FF_API_AVFILTERPAD_PUBLIC
+/**
+ * A filter pad used for either input or output.
+ *
+ * See doc/filter_design.txt for details on how to implement the methods.
+ *
+ * @warning this struct might be removed from public API.
+ * users should call avfilter_pad_get_name() and avfilter_pad_get_type()
+ * to access the name and type fields; there should be no need to access
+ * any other fields from outside of libavfilter.
+ */
+struct AVFilterPad {
+ /**
+ * Pad name. The name is unique among inputs and among outputs, but an
+ * input may have the same name as an output. This may be NULL if this
+ * pad has no need to ever be referenced by name.
+ */
+ const char *name;
+
+ /**
+ * AVFilterPad type.
+ */
+ enum AVMediaType type;
+
+ /**
+ * Input pads:
+ * Minimum required permissions on incoming buffers. Any buffer with
+ * insufficient permissions will be automatically copied by the filter
+ * system to a new buffer which provides the needed access permissions.
+ *
+ * Output pads:
+ * Guaranteed permissions on outgoing buffers. Any buffer pushed on the
+ * link must have at least these permissions; this fact is checked by
+ * asserts. It can be used to optimize buffer allocation.
+ */
+ attribute_deprecated int min_perms;
+
+ /**
+ * Input pads:
+ * Permissions which are not accepted on incoming buffers. Any buffer
+ * which has any of these permissions set will be automatically copied
+ * by the filter system to a new buffer which does not have those
+ * permissions. This can be used to easily disallow buffers with
+ * AV_PERM_REUSE.
+ *
+ * Output pads:
+ * Permissions which are automatically removed on outgoing buffers. It
+ * can be used to optimize buffer allocation.
+ */
+ attribute_deprecated int rej_perms;
+
+ /**
+ * @deprecated unused
+ */
+ int (*start_frame)(AVFilterLink *link, AVFilterBufferRef *picref);
+
+ /**
+ * Callback function to get a video buffer. If NULL, the filter system will
+ * use ff_default_get_video_buffer().
+ *
+ * Input video pads only.
+ */
+ AVFrame *(*get_video_buffer)(AVFilterLink *link, int w, int h);
+
+ /**
+ * Callback function to get an audio buffer. If NULL, the filter system will
+ * use ff_default_get_audio_buffer().
+ *
+ * Input audio pads only.
+ */
+ AVFrame *(*get_audio_buffer)(AVFilterLink *link, int nb_samples);
+
+ /**
+ * @deprecated unused
+ */
+ int (*end_frame)(AVFilterLink *link);
+
+ /**
+ * @deprecated unused
+ */
+ int (*draw_slice)(AVFilterLink *link, int y, int height, int slice_dir);
+
+ /**
+ * Filtering callback. This is where a filter receives a frame with
+ * audio/video data and should do its processing.
+ *
+ * Input pads only.
+ *
+ * @return >= 0 on success, a negative AVERROR on error. This function
+ * must ensure that frame is properly unreferenced on error if it
+ * hasn't been passed on to another filter.
+ */
+ int (*filter_frame)(AVFilterLink *link, AVFrame *frame);
+
+ /**
+ * Frame poll callback. This returns the number of immediately available
+ * samples. It should return a positive value if the next request_frame()
+ * is guaranteed to return one frame (with no delay).
+ *
+ * Defaults to just calling the source poll_frame() method.
+ *
+ * Output pads only.
+ */
+ int (*poll_frame)(AVFilterLink *link);
+
+ /**
+ * Frame request callback. A call to this should result in at least one
+ * frame being output over the given link. This should return zero on
+ * success, and another value on error.
+ * See ff_request_frame() for the error codes with a specific
+ * meaning.
+ *
+ * Output pads only.
+ */
+ int (*request_frame)(AVFilterLink *link);
+
+ /**
+ * Link configuration callback.
+ *
+ * For output pads, this should set the following link properties:
+ * video: width, height, sample_aspect_ratio, time_base
+ * audio: sample_rate.
+ *
+ * This should NOT set properties such as format, channel_layout, etc which
+ * are negotiated between filters by the filter system using the
+ * query_formats() callback before this function is called.
+ *
+ * For input pads, this should check the properties of the link, and update
+ * the filter's internal state as necessary.
+ *
+ * For both input and output pads, this should return zero on success,
+ * and another value on error.
+ */
+ int (*config_props)(AVFilterLink *link);
+
+ /**
+ * The filter expects a fifo to be inserted on its input link,
+ * typically because it has a delay.
+ *
+ * input pads only.
+ */
+ int needs_fifo;
+
+ /**
+ * The filter expects writable frames from its input link,
+ * duplicating data buffers if needed.
+ *
+ * input pads only.
+ */
+ int needs_writable;
+};
+#endif
+
+/**
+ * Get the number of elements in a NULL-terminated array of AVFilterPads (e.g.
+ * AVFilter.inputs/outputs).
+ */
+int avfilter_pad_count(const AVFilterPad *pads);
+
+/**
+ * Get the name of an AVFilterPad.
+ *
+ * @param pads an array of AVFilterPads
+ * @param pad_idx index of the pad in the array it; is the caller's
+ * responsibility to ensure the index is valid
+ *
+ * @return name of the pad_idx'th pad in pads
+ */
+const char *avfilter_pad_get_name(const AVFilterPad *pads, int pad_idx);
+
+/**
+ * Get the type of an AVFilterPad.
+ *
+ * @param pads an array of AVFilterPads
+ * @param pad_idx index of the pad in the array; it is the caller's
+ * responsibility to ensure the index is valid
+ *
+ * @return type of the pad_idx'th pad in pads
+ */
+enum AVMediaType avfilter_pad_get_type(const AVFilterPad *pads, int pad_idx);
+
+/**
+ * The number of the filter inputs is not determined just by AVFilter.inputs.
+ * The filter might add additional inputs during initialization depending on the
+ * options supplied to it.
+ */
+#define AVFILTER_FLAG_DYNAMIC_INPUTS (1 << 0)
+/**
+ * The number of the filter outputs is not determined just by AVFilter.outputs.
+ * The filter might add additional outputs during initialization depending on
+ * the options supplied to it.
+ */
+#define AVFILTER_FLAG_DYNAMIC_OUTPUTS (1 << 1)
+/**
+ * The filter supports multithreading by splitting frames into multiple parts
+ * and processing them concurrently.
+ */
+#define AVFILTER_FLAG_SLICE_THREADS (1 << 2)
+/**
+ * Some filters support a generic "enable" expression option that can be used
+ * to enable or disable a filter in the timeline. Filters supporting this
+ * option have this flag set. When the enable expression is false, the default
+ * no-op filter_frame() function is called in place of the filter_frame()
+ * callback defined on each input pad, thus the frame is passed unchanged to
+ * the next filters.
+ */
+#define AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC (1 << 16)
+/**
+ * Same as AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC, except that the filter will
+ * have its filter_frame() callback(s) called as usual even when the enable
+ * expression is false. The filter will disable filtering within the
+ * filter_frame() callback(s) itself, for example executing code depending on
+ * the AVFilterContext->is_disabled value.
+ */
+#define AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL (1 << 17)
+/**
+ * Handy mask to test whether the filter supports or no the timeline feature
+ * (internally or generically).
+ */
+#define AVFILTER_FLAG_SUPPORT_TIMELINE (AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL)
+
+/**
+ * Filter definition. This defines the pads a filter contains, and all the
+ * callback functions used to interact with the filter.
+ */
+typedef struct AVFilter {
+ /**
+ * Filter name. Must be non-NULL and unique among filters.
+ */
+ const char *name;
+
+ /**
+ * A description of the filter. May be NULL.
+ *
+ * You should use the NULL_IF_CONFIG_SMALL() macro to define it.
+ */
+ const char *description;
+
+ /**
+ * List of inputs, terminated by a zeroed element.
+ *
+ * NULL if there are no (static) inputs. Instances of filters with
+ * AVFILTER_FLAG_DYNAMIC_INPUTS set may have more inputs than present in
+ * this list.
+ */
+ const AVFilterPad *inputs;
+ /**
+ * List of outputs, terminated by a zeroed element.
+ *
+ * NULL if there are no (static) outputs. Instances of filters with
+ * AVFILTER_FLAG_DYNAMIC_OUTPUTS set may have more outputs than present in
+ * this list.
+ */
+ const AVFilterPad *outputs;
+
+ /**
+ * A class for the private data, used to declare filter private AVOptions.
+ * This field is NULL for filters that do not declare any options.
+ *
+ * If this field is non-NULL, the first member of the filter private data
+ * must be a pointer to AVClass, which will be set by libavfilter generic
+ * code to this class.
+ */
+ const AVClass *priv_class;
+
+ /**
+ * A combination of AVFILTER_FLAG_*
+ */
+ int flags;
+
+ /*****************************************************************
+ * All fields below this line are not part of the public API. They
+ * may not be used outside of libavfilter and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+
+ /**
+ * Filter initialization function.
+ *
+ * This callback will be called only once during the filter lifetime, after
+ * all the options have been set, but before links between filters are
+ * established and format negotiation is done.
+ *
+ * Basic filter initialization should be done here. Filters with dynamic
+ * inputs and/or outputs should create those inputs/outputs here based on
+ * provided options. No more changes to this filter's inputs/outputs can be
+ * done after this callback.
+ *
+ * This callback must not assume that the filter links exist or frame
+ * parameters are known.
+ *
+ * @ref AVFilter.uninit "uninit" is guaranteed to be called even if
+ * initialization fails, so this callback does not have to clean up on
+ * failure.
+ *
+ * @return 0 on success, a negative AVERROR on failure
+ */
+ int (*init)(AVFilterContext *ctx);
+
+ /**
+ * Should be set instead of @ref AVFilter.init "init" by the filters that
+ * want to pass a dictionary of AVOptions to nested contexts that are
+ * allocated during init.
+ *
+ * On return, the options dict should be freed and replaced with one that
+ * contains all the options which could not be processed by this filter (or
+ * with NULL if all the options were processed).
+ *
+ * Otherwise the semantics is the same as for @ref AVFilter.init "init".
+ */
+ int (*init_dict)(AVFilterContext *ctx, AVDictionary **options);
+
+ /**
+ * Filter uninitialization function.
+ *
+ * Called only once right before the filter is freed. Should deallocate any
+ * memory held by the filter, release any buffer references, etc. It does
+ * not need to deallocate the AVFilterContext.priv memory itself.
+ *
+ * This callback may be called even if @ref AVFilter.init "init" was not
+ * called or failed, so it must be prepared to handle such a situation.
+ */
+ void (*uninit)(AVFilterContext *ctx);
+
+ /**
+ * Query formats supported by the filter on its inputs and outputs.
+ *
+ * This callback is called after the filter is initialized (so the inputs
+ * and outputs are fixed), shortly before the format negotiation. This
+ * callback may be called more than once.
+ *
+ * This callback must set AVFilterLink.out_formats on every input link and
+ * AVFilterLink.in_formats on every output link to a list of pixel/sample
+ * formats that the filter supports on that link. For audio links, this
+ * filter must also set @ref AVFilterLink.in_samplerates "in_samplerates" /
+ * @ref AVFilterLink.out_samplerates "out_samplerates" and
+ * @ref AVFilterLink.in_channel_layouts "in_channel_layouts" /
+ * @ref AVFilterLink.out_channel_layouts "out_channel_layouts" analogously.
+ *
+ * This callback may be NULL for filters with one input, in which case
+ * libavfilter assumes that it supports all input formats and preserves
+ * them on output.
+ *
+ * @return zero on success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+ int (*query_formats)(AVFilterContext *);
+
+ int priv_size; ///< size of private data to allocate for the filter
+
+ /**
+ * Used by the filter registration system. Must not be touched by any other
+ * code.
+ */
+ struct AVFilter *next;
+
+ /**
+ * Make the filter instance process a command.
+ *
+ * @param cmd the command to process, for handling simplicity all commands must be alphanumeric only
+ * @param arg the argument for the command
+ * @param res a buffer with size res_size where the filter(s) can return a response. This must not change when the command is not supported.
+ * @param flags if AVFILTER_CMD_FLAG_FAST is set and the command would be
+ * time consuming then a filter should treat it like an unsupported command
+ *
+ * @returns >=0 on success otherwise an error code.
+ * AVERROR(ENOSYS) on unsupported commands
+ */
+ int (*process_command)(AVFilterContext *, const char *cmd, const char *arg, char *res, int res_len, int flags);
+
+ /**
+ * Filter initialization function, alternative to the init()
+ * callback. Args contains the user-supplied parameters, opaque is
+ * used for providing binary data.
+ */
+ int (*init_opaque)(AVFilterContext *ctx, void *opaque);
+} AVFilter;
+
+/**
+ * Process multiple parts of the frame concurrently.
+ */
+#define AVFILTER_THREAD_SLICE (1 << 0)
+
+typedef struct AVFilterInternal AVFilterInternal;
+
+/** An instance of a filter */
+struct AVFilterContext {
+ const AVClass *av_class; ///< needed for av_log() and filters common options
+
+ const AVFilter *filter; ///< the AVFilter of which this is an instance
+
+ char *name; ///< name of this filter instance
+
+ AVFilterPad *input_pads; ///< array of input pads
+ AVFilterLink **inputs; ///< array of pointers to input links
+#if FF_API_FOO_COUNT
+ attribute_deprecated unsigned input_count; ///< @deprecated use nb_inputs
+#endif
+ unsigned nb_inputs; ///< number of input pads
+
+ AVFilterPad *output_pads; ///< array of output pads
+ AVFilterLink **outputs; ///< array of pointers to output links
+#if FF_API_FOO_COUNT
+ attribute_deprecated unsigned output_count; ///< @deprecated use nb_outputs
+#endif
+ unsigned nb_outputs; ///< number of output pads
+
+ void *priv; ///< private data for use by the filter
+
+ struct AVFilterGraph *graph; ///< filtergraph this filter belongs to
+
+ /**
+ * Type of multithreading being allowed/used. A combination of
+ * AVFILTER_THREAD_* flags.
+ *
+ * May be set by the caller before initializing the filter to forbid some
+ * or all kinds of multithreading for this filter. The default is allowing
+ * everything.
+ *
+ * When the filter is initialized, this field is combined using bit AND with
+ * AVFilterGraph.thread_type to get the final mask used for determining
+ * allowed threading types. I.e. a threading type needs to be set in both
+ * to be allowed.
+ *
+ * After the filter is initialized, libavfilter sets this field to the
+ * threading type that is actually used (0 for no multithreading).
+ */
+ int thread_type;
+
+ /**
+ * An opaque struct for libavfilter internal use.
+ */
+ AVFilterInternal *internal;
+
+ struct AVFilterCommand *command_queue;
+
+ char *enable_str; ///< enable expression string
+ void *enable; ///< parsed expression (AVExpr*)
+ double *var_values; ///< variable values for the enable expression
+ int is_disabled; ///< the enabled state from the last expression evaluation
+};
+
+/**
+ * A link between two filters. This contains pointers to the source and
+ * destination filters between which this link exists, and the indexes of
+ * the pads involved. In addition, this link also contains the parameters
+ * which have been negotiated and agreed upon between the filter, such as
+ * image dimensions, format, etc.
+ */
+struct AVFilterLink {
+ AVFilterContext *src; ///< source filter
+ AVFilterPad *srcpad; ///< output pad on the source filter
+
+ AVFilterContext *dst; ///< dest filter
+ AVFilterPad *dstpad; ///< input pad on the dest filter
+
+ enum AVMediaType type; ///< filter media type
+
+ /* These parameters apply only to video */
+ int w; ///< agreed upon image width
+ int h; ///< agreed upon image height
+ AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio
+ /* These parameters apply only to audio */
+ uint64_t channel_layout; ///< channel layout of current buffer (see libavutil/channel_layout.h)
+ int sample_rate; ///< samples per second
+
+ int format; ///< agreed upon media format
+
+ /**
+ * Define the time base used by the PTS of the frames/samples
+ * which will pass through this link.
+ * During the configuration stage, each filter is supposed to
+ * change only the output timebase, while the timebase of the
+ * input link is assumed to be an unchangeable property.
+ */
+ AVRational time_base;
+
+ /*****************************************************************
+ * All fields below this line are not part of the public API. They
+ * may not be used outside of libavfilter and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+ /**
+ * Lists of formats and channel layouts supported by the input and output
+ * filters respectively. These lists are used for negotiating the format
+ * to actually be used, which will be loaded into the format and
+ * channel_layout members, above, when chosen.
+ *
+ */
+ AVFilterFormats *in_formats;
+ AVFilterFormats *out_formats;
+
+ /**
+ * Lists of channel layouts and sample rates used for automatic
+ * negotiation.
+ */
+ AVFilterFormats *in_samplerates;
+ AVFilterFormats *out_samplerates;
+ struct AVFilterChannelLayouts *in_channel_layouts;
+ struct AVFilterChannelLayouts *out_channel_layouts;
+
+ /**
+ * Audio only, the destination filter sets this to a non-zero value to
+ * request that buffers with the given number of samples should be sent to
+ * it. AVFilterPad.needs_fifo must also be set on the corresponding input
+ * pad.
+ * Last buffer before EOF will be padded with silence.
+ */
+ int request_samples;
+
+ /** stage of the initialization of the link properties (dimensions, etc) */
+ enum {
+ AVLINK_UNINIT = 0, ///< not started
+ AVLINK_STARTINIT, ///< started, but incomplete
+ AVLINK_INIT ///< complete
+ } init_state;
+
+ struct AVFilterPool *pool;
+
+ /**
+ * Graph the filter belongs to.
+ */
+ struct AVFilterGraph *graph;
+
+ /**
+ * Current timestamp of the link, as defined by the most recent
+ * frame(s), in AV_TIME_BASE units.
+ */
+ int64_t current_pts;
+
+ /**
+ * Index in the age array.
+ */
+ int age_index;
+
+ /**
+ * Frame rate of the stream on the link, or 1/0 if unknown;
+ * if left to 0/0, will be automatically be copied from the first input
+ * of the source filter if it exists.
+ *
+ * Sources should set it to the best estimation of the real frame rate.
+ * Filters should update it if necessary depending on their function.
+ * Sinks can use it to set a default output frame rate.
+ * It is similar to the r_frame_rate field in AVStream.
+ */
+ AVRational frame_rate;
+
+ /**
+ * Buffer partially filled with samples to achieve a fixed/minimum size.
+ */
+ AVFrame *partial_buf;
+
+ /**
+ * Size of the partial buffer to allocate.
+ * Must be between min_samples and max_samples.
+ */
+ int partial_buf_size;
+
+ /**
+ * Minimum number of samples to filter at once. If filter_frame() is
+ * called with fewer samples, it will accumulate them in partial_buf.
+ * This field and the related ones must not be changed after filtering
+ * has started.
+ * If 0, all related fields are ignored.
+ */
+ int min_samples;
+
+ /**
+ * Maximum number of samples to filter at once. If filter_frame() is
+ * called with more samples, it will split them.
+ */
+ int max_samples;
+
+ /**
+ * The buffer reference currently being received across the link by the
+ * destination filter. This is used internally by the filter system to
+ * allow automatic copying of buffers which do not have sufficient
+ * permissions for the destination. This should not be accessed directly
+ * by the filters.
+ */
+ AVFilterBufferRef *cur_buf_copy;
+
+ /**
+ * True if the link is closed.
+ * If set, all attempts of start_frame, filter_frame or request_frame
+ * will fail with AVERROR_EOF, and if necessary the reference will be
+ * destroyed.
+ * If request_frame returns AVERROR_EOF, this flag is set on the
+ * corresponding link.
+ * It can be set also be set by either the source or the destination
+ * filter.
+ */
+ int closed;
+
+ /**
+ * Number of channels.
+ */
+ int channels;
+
+ /**
+ * True if a frame is being requested on the link.
+ * Used internally by the framework.
+ */
+ unsigned frame_requested;
+
+ /**
+ * Link processing flags.
+ */
+ unsigned flags;
+
+ /**
+ * Number of past frames sent through the link.
+ */
+ int64_t frame_count;
+};
+
+/**
+ * Link two filters together.
+ *
+ * @param src the source filter
+ * @param srcpad index of the output pad on the source filter
+ * @param dst the destination filter
+ * @param dstpad index of the input pad on the destination filter
+ * @return zero on success
+ */
+int avfilter_link(AVFilterContext *src, unsigned srcpad,
+ AVFilterContext *dst, unsigned dstpad);
+
+/**
+ * Free the link in *link, and set its pointer to NULL.
+ */
+void avfilter_link_free(AVFilterLink **link);
+
+/**
+ * Get the number of channels of a link.
+ */
+int avfilter_link_get_channels(AVFilterLink *link);
+
+/**
+ * Set the closed field of a link.
+ */
+void avfilter_link_set_closed(AVFilterLink *link, int closed);
+
+/**
+ * Negotiate the media format, dimensions, etc of all inputs to a filter.
+ *
+ * @param filter the filter to negotiate the properties for its inputs
+ * @return zero on successful negotiation
+ */
+int avfilter_config_links(AVFilterContext *filter);
+
+#if FF_API_AVFILTERBUFFER
+/**
+ * Create a buffer reference wrapped around an already allocated image
+ * buffer.
+ *
+ * @param data pointers to the planes of the image to reference
+ * @param linesize linesizes for the planes of the image to reference
+ * @param perms the required access permissions
+ * @param w the width of the image specified by the data and linesize arrays
+ * @param h the height of the image specified by the data and linesize arrays
+ * @param format the pixel format of the image specified by the data and linesize arrays
+ */
+attribute_deprecated
+AVFilterBufferRef *
+avfilter_get_video_buffer_ref_from_arrays(uint8_t * const data[4], const int linesize[4], int perms,
+ int w, int h, enum AVPixelFormat format);
+
+/**
+ * Create an audio buffer reference wrapped around an already
+ * allocated samples buffer.
+ *
+ * See avfilter_get_audio_buffer_ref_from_arrays_channels() for a version
+ * that can handle unknown channel layouts.
+ *
+ * @param data pointers to the samples plane buffers
+ * @param linesize linesize for the samples plane buffers
+ * @param perms the required access permissions
+ * @param nb_samples number of samples per channel
+ * @param sample_fmt the format of each sample in the buffer to allocate
+ * @param channel_layout the channel layout of the buffer
+ */
+attribute_deprecated
+AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays(uint8_t **data,
+ int linesize,
+ int perms,
+ int nb_samples,
+ enum AVSampleFormat sample_fmt,
+ uint64_t channel_layout);
+/**
+ * Create an audio buffer reference wrapped around an already
+ * allocated samples buffer.
+ *
+ * @param data pointers to the samples plane buffers
+ * @param linesize linesize for the samples plane buffers
+ * @param perms the required access permissions
+ * @param nb_samples number of samples per channel
+ * @param sample_fmt the format of each sample in the buffer to allocate
+ * @param channels the number of channels of the buffer
+ * @param channel_layout the channel layout of the buffer,
+ * must be either 0 or consistent with channels
+ */
+attribute_deprecated
+AVFilterBufferRef *avfilter_get_audio_buffer_ref_from_arrays_channels(uint8_t **data,
+ int linesize,
+ int perms,
+ int nb_samples,
+ enum AVSampleFormat sample_fmt,
+ int channels,
+ uint64_t channel_layout);
+
+#endif
+
+
+#define AVFILTER_CMD_FLAG_ONE 1 ///< Stop once a filter understood the command (for target=all for example), fast filters are favored automatically
+#define AVFILTER_CMD_FLAG_FAST 2 ///< Only execute command when its fast (like a video out that supports contrast adjustment in hw)
+
+/**
+ * Make the filter instance process a command.
+ * It is recommended to use avfilter_graph_send_command().
+ */
+int avfilter_process_command(AVFilterContext *filter, const char *cmd, const char *arg, char *res, int res_len, int flags);
+
+/** Initialize the filter system. Register all builtin filters. */
+void avfilter_register_all(void);
+
+#if FF_API_OLD_FILTER_REGISTER
+/** Uninitialize the filter system. Unregister all filters. */
+attribute_deprecated
+void avfilter_uninit(void);
+#endif
+
+/**
+ * Register a filter. This is only needed if you plan to use
+ * avfilter_get_by_name later to lookup the AVFilter structure by name. A
+ * filter can still by instantiated with avfilter_graph_alloc_filter even if it
+ * is not registered.
+ *
+ * @param filter the filter to register
+ * @return 0 if the registration was successful, a negative value
+ * otherwise
+ */
+int avfilter_register(AVFilter *filter);
+
+/**
+ * Get a filter definition matching the given name.
+ *
+ * @param name the filter name to find
+ * @return the filter definition, if any matching one is registered.
+ * NULL if none found.
+ */
+#if !FF_API_NOCONST_GET_NAME
+const
+#endif
+AVFilter *avfilter_get_by_name(const char *name);
+
+/**
+ * Iterate over all registered filters.
+ * @return If prev is non-NULL, next registered filter after prev or NULL if
+ * prev is the last filter. If prev is NULL, return the first registered filter.
+ */
+const AVFilter *avfilter_next(const AVFilter *prev);
+
+#if FF_API_OLD_FILTER_REGISTER
+/**
+ * If filter is NULL, returns a pointer to the first registered filter pointer,
+ * if filter is non-NULL, returns the next pointer after filter.
+ * If the returned pointer points to NULL, the last registered filter
+ * was already reached.
+ * @deprecated use avfilter_next()
+ */
+attribute_deprecated
+AVFilter **av_filter_next(AVFilter **filter);
+#endif
+
+#if FF_API_AVFILTER_OPEN
+/**
+ * Create a filter instance.
+ *
+ * @param filter_ctx put here a pointer to the created filter context
+ * on success, NULL on failure
+ * @param filter the filter to create an instance of
+ * @param inst_name Name to give to the new instance. Can be NULL for none.
+ * @return >= 0 in case of success, a negative error code otherwise
+ * @deprecated use avfilter_graph_alloc_filter() instead
+ */
+attribute_deprecated
+int avfilter_open(AVFilterContext **filter_ctx, AVFilter *filter, const char *inst_name);
+#endif
+
+
+#if FF_API_AVFILTER_INIT_FILTER
+/**
+ * Initialize a filter.
+ *
+ * @param filter the filter to initialize
+ * @param args A string of parameters to use when initializing the filter.
+ * The format and meaning of this string varies by filter.
+ * @param opaque Any extra non-string data needed by the filter. The meaning
+ * of this parameter varies by filter.
+ * @return zero on success
+ */
+attribute_deprecated
+int avfilter_init_filter(AVFilterContext *filter, const char *args, void *opaque);
+#endif
+
+/**
+ * Initialize a filter with the supplied parameters.
+ *
+ * @param ctx uninitialized filter context to initialize
+ * @param args Options to initialize the filter with. This must be a
+ * ':'-separated list of options in the 'key=value' form.
+ * May be NULL if the options have been set directly using the
+ * AVOptions API or there are no options that need to be set.
+ * @return 0 on success, a negative AVERROR on failure
+ */
+int avfilter_init_str(AVFilterContext *ctx, const char *args);
+
+/**
+ * Initialize a filter with the supplied dictionary of options.
+ *
+ * @param ctx uninitialized filter context to initialize
+ * @param options An AVDictionary filled with options for this filter. On
+ * return this parameter will be destroyed and replaced with
+ * a dict containing options that were not found. This dictionary
+ * must be freed by the caller.
+ * May be NULL, then this function is equivalent to
+ * avfilter_init_str() with the second parameter set to NULL.
+ * @return 0 on success, a negative AVERROR on failure
+ *
+ * @note This function and avfilter_init_str() do essentially the same thing,
+ * the difference is in manner in which the options are passed. It is up to the
+ * calling code to choose whichever is more preferable. The two functions also
+ * behave differently when some of the provided options are not declared as
+ * supported by the filter. In such a case, avfilter_init_str() will fail, but
+ * this function will leave those extra options in the options AVDictionary and
+ * continue as usual.
+ */
+int avfilter_init_dict(AVFilterContext *ctx, AVDictionary **options);
+
+/**
+ * Free a filter context. This will also remove the filter from its
+ * filtergraph's list of filters.
+ *
+ * @param filter the filter to free
+ */
+void avfilter_free(AVFilterContext *filter);
+
+/**
+ * Insert a filter in the middle of an existing link.
+ *
+ * @param link the link into which the filter should be inserted
+ * @param filt the filter to be inserted
+ * @param filt_srcpad_idx the input pad on the filter to connect
+ * @param filt_dstpad_idx the output pad on the filter to connect
+ * @return zero on success
+ */
+int avfilter_insert_filter(AVFilterLink *link, AVFilterContext *filt,
+ unsigned filt_srcpad_idx, unsigned filt_dstpad_idx);
+
+#if FF_API_AVFILTERBUFFER
+/**
+ * Copy the frame properties of src to dst, without copying the actual
+ * image data.
+ *
+ * @return 0 on success, a negative number on error.
+ */
+attribute_deprecated
+int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src);
+
+/**
+ * Copy the frame properties and data pointers of src to dst, without copying
+ * the actual data.
+ *
+ * @return 0 on success, a negative number on error.
+ */
+attribute_deprecated
+int avfilter_copy_buf_props(AVFrame *dst, const AVFilterBufferRef *src);
+#endif
+
+/**
+ * @return AVClass for AVFilterContext.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avfilter_get_class(void);
+
+typedef struct AVFilterGraphInternal AVFilterGraphInternal;
+
+/**
+ * A function pointer passed to the @ref AVFilterGraph.execute callback to be
+ * executed multiple times, possibly in parallel.
+ *
+ * @param ctx the filter context the job belongs to
+ * @param arg an opaque parameter passed through from @ref
+ * AVFilterGraph.execute
+ * @param jobnr the index of the job being executed
+ * @param nb_jobs the total number of jobs
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+typedef int (avfilter_action_func)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
+
+/**
+ * A function executing multiple jobs, possibly in parallel.
+ *
+ * @param ctx the filter context to which the jobs belong
+ * @param func the function to be called multiple times
+ * @param arg the argument to be passed to func
+ * @param ret a nb_jobs-sized array to be filled with return values from each
+ * invocation of func
+ * @param nb_jobs the number of jobs to execute
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+typedef int (avfilter_execute_func)(AVFilterContext *ctx, avfilter_action_func *func,
+ void *arg, int *ret, int nb_jobs);
+
+typedef struct AVFilterGraph {
+ const AVClass *av_class;
+#if FF_API_FOO_COUNT
+ attribute_deprecated
+ unsigned filter_count_unused;
+#endif
+ AVFilterContext **filters;
+#if !FF_API_FOO_COUNT
+ unsigned nb_filters;
+#endif
+
+ char *scale_sws_opts; ///< sws options to use for the auto-inserted scale filters
+ char *resample_lavr_opts; ///< libavresample options to use for the auto-inserted resample filters
+#if FF_API_FOO_COUNT
+ unsigned nb_filters;
+#endif
+
+ /**
+ * Type of multithreading allowed for filters in this graph. A combination
+ * of AVFILTER_THREAD_* flags.
+ *
+ * May be set by the caller at any point, the setting will apply to all
+ * filters initialized after that. The default is allowing everything.
+ *
+ * When a filter in this graph is initialized, this field is combined using
+ * bit AND with AVFilterContext.thread_type to get the final mask used for
+ * determining allowed threading types. I.e. a threading type needs to be
+ * set in both to be allowed.
+ */
+ int thread_type;
+
+ /**
+ * Maximum number of threads used by filters in this graph. May be set by
+ * the caller before adding any filters to the filtergraph. Zero (the
+ * default) means that the number of threads is determined automatically.
+ */
+ int nb_threads;
+
+ /**
+ * Opaque object for libavfilter internal use.
+ */
+ AVFilterGraphInternal *internal;
+
+ /**
+ * Opaque user data. May be set by the caller to an arbitrary value, e.g. to
+ * be used from callbacks like @ref AVFilterGraph.execute.
+ * Libavfilter will not touch this field in any way.
+ */
+ void *opaque;
+
+ /**
+ * This callback may be set by the caller immediately after allocating the
+ * graph and before adding any filters to it, to provide a custom
+ * multithreading implementation.
+ *
+ * If set, filters with slice threading capability will call this callback
+ * to execute multiple jobs in parallel.
+ *
+ * If this field is left unset, libavfilter will use its internal
+ * implementation, which may or may not be multithreaded depending on the
+ * platform and build options.
+ */
+ avfilter_execute_func *execute;
+
+ char *aresample_swr_opts; ///< swr options to use for the auto-inserted aresample filters, Access ONLY through AVOptions
+
+ /**
+ * Private fields
+ *
+ * The following fields are for internal use only.
+ * Their type, offset, number and semantic can change without notice.
+ */
+
+ AVFilterLink **sink_links;
+ int sink_links_count;
+
+ unsigned disable_auto_convert;
+} AVFilterGraph;
+
+/**
+ * Allocate a filter graph.
+ */
+AVFilterGraph *avfilter_graph_alloc(void);
+
+/**
+ * Create a new filter instance in a filter graph.
+ *
+ * @param graph graph in which the new filter will be used
+ * @param filter the filter to create an instance of
+ * @param name Name to give to the new instance (will be copied to
+ * AVFilterContext.name). This may be used by the caller to identify
+ * different filters, libavfilter itself assigns no semantics to
+ * this parameter. May be NULL.
+ *
+ * @return the context of the newly created filter instance (note that it is
+ * also retrievable directly through AVFilterGraph.filters or with
+ * avfilter_graph_get_filter()) on success or NULL on failure.
+ */
+AVFilterContext *avfilter_graph_alloc_filter(AVFilterGraph *graph,
+ const AVFilter *filter,
+ const char *name);
+
+/**
+ * Get a filter instance identified by instance name from graph.
+ *
+ * @param graph filter graph to search through.
+ * @param name filter instance name (should be unique in the graph).
+ * @return the pointer to the found filter instance or NULL if it
+ * cannot be found.
+ */
+AVFilterContext *avfilter_graph_get_filter(AVFilterGraph *graph, const char *name);
+
+#if FF_API_AVFILTER_OPEN
+/**
+ * Add an existing filter instance to a filter graph.
+ *
+ * @param graphctx the filter graph
+ * @param filter the filter to be added
+ *
+ * @deprecated use avfilter_graph_alloc_filter() to allocate a filter in a
+ * filter graph
+ */
+attribute_deprecated
+int avfilter_graph_add_filter(AVFilterGraph *graphctx, AVFilterContext *filter);
+#endif
+
+/**
+ * Create and add a filter instance into an existing graph.
+ * The filter instance is created from the filter filt and inited
+ * with the parameters args and opaque.
+ *
+ * In case of success put in *filt_ctx the pointer to the created
+ * filter instance, otherwise set *filt_ctx to NULL.
+ *
+ * @param name the instance name to give to the created filter instance
+ * @param graph_ctx the filter graph
+ * @return a negative AVERROR error code in case of failure, a non
+ * negative value otherwise
+ */
+int avfilter_graph_create_filter(AVFilterContext **filt_ctx, const AVFilter *filt,
+ const char *name, const char *args, void *opaque,
+ AVFilterGraph *graph_ctx);
+
+/**
+ * Enable or disable automatic format conversion inside the graph.
+ *
+ * Note that format conversion can still happen inside explicitly inserted
+ * scale and aresample filters.
+ *
+ * @param flags any of the AVFILTER_AUTO_CONVERT_* constants
+ */
+void avfilter_graph_set_auto_convert(AVFilterGraph *graph, unsigned flags);
+
+enum {
+ AVFILTER_AUTO_CONVERT_ALL = 0, /**< all automatic conversions enabled */
+ AVFILTER_AUTO_CONVERT_NONE = -1, /**< all automatic conversions disabled */
+};
+
+/**
+ * Check validity and configure all the links and formats in the graph.
+ *
+ * @param graphctx the filter graph
+ * @param log_ctx context used for logging
+ * @return >= 0 in case of success, a negative AVERROR code otherwise
+ */
+int avfilter_graph_config(AVFilterGraph *graphctx, void *log_ctx);
+
+/**
+ * Free a graph, destroy its links, and set *graph to NULL.
+ * If *graph is NULL, do nothing.
+ */
+void avfilter_graph_free(AVFilterGraph **graph);
+
+/**
+ * A linked-list of the inputs/outputs of the filter chain.
+ *
+ * This is mainly useful for avfilter_graph_parse() / avfilter_graph_parse2(),
+ * where it is used to communicate open (unlinked) inputs and outputs from and
+ * to the caller.
+ * This struct specifies, per each not connected pad contained in the graph, the
+ * filter context and the pad index required for establishing a link.
+ */
+typedef struct AVFilterInOut {
+ /** unique name for this input/output in the list */
+ char *name;
+
+ /** filter context associated to this input/output */
+ AVFilterContext *filter_ctx;
+
+ /** index of the filt_ctx pad to use for linking */
+ int pad_idx;
+
+ /** next input/input in the list, NULL if this is the last */
+ struct AVFilterInOut *next;
+} AVFilterInOut;
+
+/**
+ * Allocate a single AVFilterInOut entry.
+ * Must be freed with avfilter_inout_free().
+ * @return allocated AVFilterInOut on success, NULL on failure.
+ */
+AVFilterInOut *avfilter_inout_alloc(void);
+
+/**
+ * Free the supplied list of AVFilterInOut and set *inout to NULL.
+ * If *inout is NULL, do nothing.
+ */
+void avfilter_inout_free(AVFilterInOut **inout);
+
+#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI || !FF_API_OLD_GRAPH_PARSE
+/**
+ * Add a graph described by a string to a graph.
+ *
+ * @note The caller must provide the lists of inputs and outputs,
+ * which therefore must be known before calling the function.
+ *
+ * @note The inputs parameter describes inputs of the already existing
+ * part of the graph; i.e. from the point of view of the newly created
+ * part, they are outputs. Similarly the outputs parameter describes
+ * outputs of the already existing filters, which are provided as
+ * inputs to the parsed filters.
+ *
+ * @param graph the filter graph where to link the parsed graph context
+ * @param filters string to be parsed
+ * @param inputs linked list to the inputs of the graph
+ * @param outputs linked list to the outputs of the graph
+ * @return zero on success, a negative AVERROR code on error
+ */
+int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut *inputs, AVFilterInOut *outputs,
+ void *log_ctx);
+#else
+/**
+ * Add a graph described by a string to a graph.
+ *
+ * @param graph the filter graph where to link the parsed graph context
+ * @param filters string to be parsed
+ * @param inputs pointer to a linked list to the inputs of the graph, may be NULL.
+ * If non-NULL, *inputs is updated to contain the list of open inputs
+ * after the parsing, should be freed with avfilter_inout_free().
+ * @param outputs pointer to a linked list to the outputs of the graph, may be NULL.
+ * If non-NULL, *outputs is updated to contain the list of open outputs
+ * after the parsing, should be freed with avfilter_inout_free().
+ * @return non negative on success, a negative AVERROR code on error
+ * @deprecated Use avfilter_graph_parse_ptr() instead.
+ */
+attribute_deprecated
+int avfilter_graph_parse(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut **inputs, AVFilterInOut **outputs,
+ void *log_ctx);
+#endif
+
+/**
+ * Add a graph described by a string to a graph.
+ *
+ * @param graph the filter graph where to link the parsed graph context
+ * @param filters string to be parsed
+ * @param inputs pointer to a linked list to the inputs of the graph, may be NULL.
+ * If non-NULL, *inputs is updated to contain the list of open inputs
+ * after the parsing, should be freed with avfilter_inout_free().
+ * @param outputs pointer to a linked list to the outputs of the graph, may be NULL.
+ * If non-NULL, *outputs is updated to contain the list of open outputs
+ * after the parsing, should be freed with avfilter_inout_free().
+ * @return non negative on success, a negative AVERROR code on error
+ */
+int avfilter_graph_parse_ptr(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut **inputs, AVFilterInOut **outputs,
+ void *log_ctx);
+
+/**
+ * Add a graph described by a string to a graph.
+ *
+ * @param[in] graph the filter graph where to link the parsed graph context
+ * @param[in] filters string to be parsed
+ * @param[out] inputs a linked list of all free (unlinked) inputs of the
+ * parsed graph will be returned here. It is to be freed
+ * by the caller using avfilter_inout_free().
+ * @param[out] outputs a linked list of all free (unlinked) outputs of the
+ * parsed graph will be returned here. It is to be freed by the
+ * caller using avfilter_inout_free().
+ * @return zero on success, a negative AVERROR code on error
+ *
+ * @note This function returns the inputs and outputs that are left
+ * unlinked after parsing the graph and the caller then deals with
+ * them.
+ * @note This function makes no reference whatsoever to already
+ * existing parts of the graph and the inputs parameter will on return
+ * contain inputs of the newly parsed part of the graph. Analogously
+ * the outputs parameter will contain outputs of the newly created
+ * filters.
+ */
+int avfilter_graph_parse2(AVFilterGraph *graph, const char *filters,
+ AVFilterInOut **inputs,
+ AVFilterInOut **outputs);
+
+/**
+ * Send a command to one or more filter instances.
+ *
+ * @param graph the filter graph
+ * @param target the filter(s) to which the command should be sent
+ * "all" sends to all filters
+ * otherwise it can be a filter or filter instance name
+ * which will send the command to all matching filters.
+ * @param cmd the command to send, for handling simplicity all commands must be alphanumeric only
+ * @param arg the argument for the command
+ * @param res a buffer with size res_size where the filter(s) can return a response.
+ *
+ * @returns >=0 on success otherwise an error code.
+ * AVERROR(ENOSYS) on unsupported commands
+ */
+int avfilter_graph_send_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, char *res, int res_len, int flags);
+
+/**
+ * Queue a command for one or more filter instances.
+ *
+ * @param graph the filter graph
+ * @param target the filter(s) to which the command should be sent
+ * "all" sends to all filters
+ * otherwise it can be a filter or filter instance name
+ * which will send the command to all matching filters.
+ * @param cmd the command to sent, for handling simplicity all commands must be alphanumeric only
+ * @param arg the argument for the command
+ * @param ts time at which the command should be sent to the filter
+ *
+ * @note As this executes commands after this function returns, no return code
+ * from the filter is provided, also AVFILTER_CMD_FLAG_ONE is not supported.
+ */
+int avfilter_graph_queue_command(AVFilterGraph *graph, const char *target, const char *cmd, const char *arg, int flags, double ts);
+
+
+/**
+ * Dump a graph into a human-readable string representation.
+ *
+ * @param graph the graph to dump
+ * @param options formatting options; currently ignored
+ * @return a string, or NULL in case of memory allocation failure;
+ * the string must be freed using av_free
+ */
+char *avfilter_graph_dump(AVFilterGraph *graph, const char *options);
+
+/**
+ * Request a frame on the oldest sink link.
+ *
+ * If the request returns AVERROR_EOF, try the next.
+ *
+ * Note that this function is not meant to be the sole scheduling mechanism
+ * of a filtergraph, only a convenience function to help drain a filtergraph
+ * in a balanced way under normal circumstances.
+ *
+ * Also note that AVERROR_EOF does not mean that frames did not arrive on
+ * some of the sinks during the process.
+ * When there are multiple sink links, in case the requested link
+ * returns an EOF, this may cause a filter to flush pending frames
+ * which are sent to another sink link, although unrequested.
+ *
+ * @return the return value of ff_request_frame(),
+ * or AVERROR_EOF if all links returned AVERROR_EOF
+ */
+int avfilter_graph_request_oldest(AVFilterGraph *graph);
+
+/**
+ * @}
+ */
+
+#endif /* AVFILTER_AVFILTER_H */
diff --git a/Externals/ffmpeg/dev/include/libavfilter/avfiltergraph.h b/Externals/ffmpeg/dev/include/libavfilter/avfiltergraph.h
new file mode 100644
index 0000000000..b31d581ca0
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavfilter/avfiltergraph.h
@@ -0,0 +1,28 @@
+/*
+ * Filter graphs
+ * copyright (c) 2007 Bobby Bingham
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_AVFILTERGRAPH_H
+#define AVFILTER_AVFILTERGRAPH_H
+
+#include "avfilter.h"
+#include "libavutil/log.h"
+
+#endif /* AVFILTER_AVFILTERGRAPH_H */
diff --git a/Externals/ffmpeg/dev/include/libavfilter/buffersink.h b/Externals/ffmpeg/dev/include/libavfilter/buffersink.h
new file mode 100644
index 0000000000..24cd2feac7
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavfilter/buffersink.h
@@ -0,0 +1,204 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_BUFFERSINK_H
+#define AVFILTER_BUFFERSINK_H
+
+/**
+ * @file
+ * @ingroup lavfi_buffersink
+ * memory buffer sink API for audio and video
+ */
+
+#include "avfilter.h"
+
+/**
+ * @defgroup lavfi_buffersink Buffer sink API
+ * @ingroup lavfi
+ * @{
+ */
+
+#if FF_API_AVFILTERBUFFER
+/**
+ * Get an audio/video buffer data from buffer_sink and put it in bufref.
+ *
+ * This function works with both audio and video buffer sinks.
+ *
+ * @param buffer_sink pointer to a buffersink or abuffersink context
+ * @param flags a combination of AV_BUFFERSINK_FLAG_* flags
+ * @return >= 0 in case of success, a negative AVERROR code in case of
+ * failure
+ */
+attribute_deprecated
+int av_buffersink_get_buffer_ref(AVFilterContext *buffer_sink,
+ AVFilterBufferRef **bufref, int flags);
+
+/**
+ * Get the number of immediately available frames.
+ */
+attribute_deprecated
+int av_buffersink_poll_frame(AVFilterContext *ctx);
+
+/**
+ * Get a buffer with filtered data from sink and put it in buf.
+ *
+ * @param ctx pointer to a context of a buffersink or abuffersink AVFilter.
+ * @param buf pointer to the buffer will be written here if buf is non-NULL. buf
+ * must be freed by the caller using avfilter_unref_buffer().
+ * Buf may also be NULL to query whether a buffer is ready to be
+ * output.
+ *
+ * @return >= 0 in case of success, a negative AVERROR code in case of
+ * failure.
+ */
+attribute_deprecated
+int av_buffersink_read(AVFilterContext *ctx, AVFilterBufferRef **buf);
+
+/**
+ * Same as av_buffersink_read, but with the ability to specify the number of
+ * samples read. This function is less efficient than av_buffersink_read(),
+ * because it copies the data around.
+ *
+ * @param ctx pointer to a context of the abuffersink AVFilter.
+ * @param buf pointer to the buffer will be written here if buf is non-NULL. buf
+ * must be freed by the caller using avfilter_unref_buffer(). buf
+ * will contain exactly nb_samples audio samples, except at the end
+ * of stream, when it can contain less than nb_samples.
+ * Buf may also be NULL to query whether a buffer is ready to be
+ * output.
+ *
+ * @warning do not mix this function with av_buffersink_read(). Use only one or
+ * the other with a single sink, not both.
+ */
+attribute_deprecated
+int av_buffersink_read_samples(AVFilterContext *ctx, AVFilterBufferRef **buf,
+ int nb_samples);
+#endif
+
+/**
+ * Get a frame with filtered data from sink and put it in frame.
+ *
+ * @param ctx pointer to a buffersink or abuffersink filter context.
+ * @param frame pointer to an allocated frame that will be filled with data.
+ * The data must be freed using av_frame_unref() / av_frame_free()
+ * @param flags a combination of AV_BUFFERSINK_FLAG_* flags
+ *
+ * @return >= 0 in for success, a negative AVERROR code for failure.
+ */
+int av_buffersink_get_frame_flags(AVFilterContext *ctx, AVFrame *frame, int flags);
+
+/**
+ * Tell av_buffersink_get_buffer_ref() to read video/samples buffer
+ * reference, but not remove it from the buffer. This is useful if you
+ * need only to read a video/samples buffer, without to fetch it.
+ */
+#define AV_BUFFERSINK_FLAG_PEEK 1
+
+/**
+ * Tell av_buffersink_get_buffer_ref() not to request a frame from its input.
+ * If a frame is already buffered, it is read (and removed from the buffer),
+ * but if no frame is present, return AVERROR(EAGAIN).
+ */
+#define AV_BUFFERSINK_FLAG_NO_REQUEST 2
+
+/**
+ * Struct to use for initializing a buffersink context.
+ */
+typedef struct {
+ const enum AVPixelFormat *pixel_fmts; ///< list of allowed pixel formats, terminated by AV_PIX_FMT_NONE
+} AVBufferSinkParams;
+
+/**
+ * Create an AVBufferSinkParams structure.
+ *
+ * Must be freed with av_free().
+ */
+AVBufferSinkParams *av_buffersink_params_alloc(void);
+
+/**
+ * Struct to use for initializing an abuffersink context.
+ */
+typedef struct {
+ const enum AVSampleFormat *sample_fmts; ///< list of allowed sample formats, terminated by AV_SAMPLE_FMT_NONE
+ const int64_t *channel_layouts; ///< list of allowed channel layouts, terminated by -1
+ const int *channel_counts; ///< list of allowed channel counts, terminated by -1
+ int all_channel_counts; ///< if not 0, accept any channel count or layout
+ int *sample_rates; ///< list of allowed sample rates, terminated by -1
+} AVABufferSinkParams;
+
+/**
+ * Create an AVABufferSinkParams structure.
+ *
+ * Must be freed with av_free().
+ */
+AVABufferSinkParams *av_abuffersink_params_alloc(void);
+
+/**
+ * Set the frame size for an audio buffer sink.
+ *
+ * All calls to av_buffersink_get_buffer_ref will return a buffer with
+ * exactly the specified number of samples, or AVERROR(EAGAIN) if there is
+ * not enough. The last buffer at EOF will be padded with 0.
+ */
+void av_buffersink_set_frame_size(AVFilterContext *ctx, unsigned frame_size);
+
+/**
+ * Get the frame rate of the input.
+ */
+AVRational av_buffersink_get_frame_rate(AVFilterContext *ctx);
+
+/**
+ * Get a frame with filtered data from sink and put it in frame.
+ *
+ * @param ctx pointer to a context of a buffersink or abuffersink AVFilter.
+ * @param frame pointer to an allocated frame that will be filled with data.
+ * The data must be freed using av_frame_unref() / av_frame_free()
+ *
+ * @return
+ * - >= 0 if a frame was successfully returned.
+ * - AVERROR(EAGAIN) if no frames are available at this point; more
+ * input frames must be added to the filtergraph to get more output.
+ * - AVERROR_EOF if there will be no more output frames on this sink.
+ * - A different negative AVERROR code in other failure cases.
+ */
+int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame);
+
+/**
+ * Same as av_buffersink_get_frame(), but with the ability to specify the number
+ * of samples read. This function is less efficient than
+ * av_buffersink_get_frame(), because it copies the data around.
+ *
+ * @param ctx pointer to a context of the abuffersink AVFilter.
+ * @param frame pointer to an allocated frame that will be filled with data.
+ * The data must be freed using av_frame_unref() / av_frame_free()
+ * frame will contain exactly nb_samples audio samples, except at
+ * the end of stream, when it can contain less than nb_samples.
+ *
+ * @return The return codes have the same meaning as for
+ * av_buffersink_get_samples().
+ *
+ * @warning do not mix this function with av_buffersink_get_frame(). Use only one or
+ * the other with a single sink, not both.
+ */
+int av_buffersink_get_samples(AVFilterContext *ctx, AVFrame *frame, int nb_samples);
+
+/**
+ * @}
+ */
+
+#endif /* AVFILTER_BUFFERSINK_H */
diff --git a/Externals/ffmpeg/dev/include/libavfilter/buffersrc.h b/Externals/ffmpeg/dev/include/libavfilter/buffersrc.h
new file mode 100644
index 0000000000..ea34c04ee9
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavfilter/buffersrc.h
@@ -0,0 +1,160 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_BUFFERSRC_H
+#define AVFILTER_BUFFERSRC_H
+
+/**
+ * @file
+ * @ingroup lavfi_buffersrc
+ * Memory buffer source API.
+ */
+
+#include "libavcodec/avcodec.h"
+#include "avfilter.h"
+
+/**
+ * @defgroup lavfi_buffersrc Buffer source API
+ * @ingroup lavfi
+ * @{
+ */
+
+enum {
+
+ /**
+ * Do not check for format changes.
+ */
+ AV_BUFFERSRC_FLAG_NO_CHECK_FORMAT = 1,
+
+#if FF_API_AVFILTERBUFFER
+ /**
+ * Ignored
+ */
+ AV_BUFFERSRC_FLAG_NO_COPY = 2,
+#endif
+
+ /**
+ * Immediately push the frame to the output.
+ */
+ AV_BUFFERSRC_FLAG_PUSH = 4,
+
+ /**
+ * Keep a reference to the frame.
+ * If the frame if reference-counted, create a new reference; otherwise
+ * copy the frame data.
+ */
+ AV_BUFFERSRC_FLAG_KEEP_REF = 8,
+
+};
+
+/**
+ * Add buffer data in picref to buffer_src.
+ *
+ * @param buffer_src pointer to a buffer source context
+ * @param picref a buffer reference, or NULL to mark EOF
+ * @param flags a combination of AV_BUFFERSRC_FLAG_*
+ * @return >= 0 in case of success, a negative AVERROR code
+ * in case of failure
+ */
+int av_buffersrc_add_ref(AVFilterContext *buffer_src,
+ AVFilterBufferRef *picref, int flags);
+
+/**
+ * Get the number of failed requests.
+ *
+ * A failed request is when the request_frame method is called while no
+ * frame is present in the buffer.
+ * The number is reset when a frame is added.
+ */
+unsigned av_buffersrc_get_nb_failed_requests(AVFilterContext *buffer_src);
+
+#if FF_API_AVFILTERBUFFER
+/**
+ * Add a buffer to a filtergraph.
+ *
+ * @param ctx an instance of the buffersrc filter
+ * @param buf buffer containing frame data to be passed down the filtergraph.
+ * This function will take ownership of buf, the user must not free it.
+ * A NULL buf signals EOF -- i.e. no more frames will be sent to this filter.
+ *
+ * @deprecated use av_buffersrc_write_frame() or av_buffersrc_add_frame()
+ */
+attribute_deprecated
+int av_buffersrc_buffer(AVFilterContext *ctx, AVFilterBufferRef *buf);
+#endif
+
+/**
+ * Add a frame to the buffer source.
+ *
+ * @param ctx an instance of the buffersrc filter
+ * @param frame frame to be added. If the frame is reference counted, this
+ * function will make a new reference to it. Otherwise the frame data will be
+ * copied.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ *
+ * This function is equivalent to av_buffersrc_add_frame_flags() with the
+ * AV_BUFFERSRC_FLAG_KEEP_REF flag.
+ */
+int av_buffersrc_write_frame(AVFilterContext *ctx, const AVFrame *frame);
+
+/**
+ * Add a frame to the buffer source.
+ *
+ * @param ctx an instance of the buffersrc filter
+ * @param frame frame to be added. If the frame is reference counted, this
+ * function will take ownership of the reference(s) and reset the frame.
+ * Otherwise the frame data will be copied. If this function returns an error,
+ * the input frame is not touched.
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ *
+ * @note the difference between this function and av_buffersrc_write_frame() is
+ * that av_buffersrc_write_frame() creates a new reference to the input frame,
+ * while this function takes ownership of the reference passed to it.
+ *
+ * This function is equivalent to av_buffersrc_add_frame_flags() without the
+ * AV_BUFFERSRC_FLAG_KEEP_REF flag.
+ */
+int av_buffersrc_add_frame(AVFilterContext *ctx, AVFrame *frame);
+
+/**
+ * Add a frame to the buffer source.
+ *
+ * By default, if the frame is reference-counted, this function will take
+ * ownership of the reference(s) and reset the frame. This can be controlled
+ * using the flags.
+ *
+ * If this function returns an error, the input frame is not touched.
+ *
+ * @param buffer_src pointer to a buffer source context
+ * @param frame a frame, or NULL to mark EOF
+ * @param flags a combination of AV_BUFFERSRC_FLAG_*
+ * @return >= 0 in case of success, a negative AVERROR code
+ * in case of failure
+ */
+int av_buffersrc_add_frame_flags(AVFilterContext *buffer_src,
+ AVFrame *frame, int flags);
+
+
+/**
+ * @}
+ */
+
+#endif /* AVFILTER_BUFFERSRC_H */
diff --git a/Externals/ffmpeg/dev/include/libavfilter/version.h b/Externals/ffmpeg/dev/include/libavfilter/version.h
new file mode 100644
index 0000000000..383eb55ef4
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavfilter/version.h
@@ -0,0 +1,80 @@
+/*
+ * Version macros.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_VERSION_H
+#define AVFILTER_VERSION_H
+
+/**
+ * @file
+ * @ingroup lavfi
+ * Libavfilter version macros
+ */
+
+#include "libavutil/version.h"
+
+#define LIBAVFILTER_VERSION_MAJOR 5
+#define LIBAVFILTER_VERSION_MINOR 7
+#define LIBAVFILTER_VERSION_MICRO 101
+
+#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
+ LIBAVFILTER_VERSION_MINOR, \
+ LIBAVFILTER_VERSION_MICRO)
+#define LIBAVFILTER_VERSION AV_VERSION(LIBAVFILTER_VERSION_MAJOR, \
+ LIBAVFILTER_VERSION_MINOR, \
+ LIBAVFILTER_VERSION_MICRO)
+#define LIBAVFILTER_BUILD LIBAVFILTER_VERSION_INT
+
+#define LIBAVFILTER_IDENT "Lavfi" AV_STRINGIFY(LIBAVFILTER_VERSION)
+
+/**
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ */
+
+#ifndef FF_API_AVFILTERPAD_PUBLIC
+#define FF_API_AVFILTERPAD_PUBLIC (LIBAVFILTER_VERSION_MAJOR < 6)
+#endif
+#ifndef FF_API_FOO_COUNT
+#define FF_API_FOO_COUNT (LIBAVFILTER_VERSION_MAJOR < 6)
+#endif
+#ifndef FF_API_AVFILTERBUFFER
+#define FF_API_AVFILTERBUFFER (LIBAVFILTER_VERSION_MAJOR < 6)
+#endif
+#ifndef FF_API_OLD_FILTER_OPTS
+#define FF_API_OLD_FILTER_OPTS (LIBAVFILTER_VERSION_MAJOR < 6)
+#endif
+#ifndef FF_API_AVFILTER_OPEN
+#define FF_API_AVFILTER_OPEN (LIBAVFILTER_VERSION_MAJOR < 6)
+#endif
+#ifndef FF_API_AVFILTER_INIT_FILTER
+#define FF_API_AVFILTER_INIT_FILTER (LIBAVFILTER_VERSION_MAJOR < 6)
+#endif
+#ifndef FF_API_OLD_FILTER_REGISTER
+#define FF_API_OLD_FILTER_REGISTER (LIBAVFILTER_VERSION_MAJOR < 6)
+#endif
+#ifndef FF_API_OLD_GRAPH_PARSE
+#define FF_API_OLD_GRAPH_PARSE (LIBAVFILTER_VERSION_MAJOR < 5)
+#endif
+#ifndef FF_API_NOCONST_GET_NAME
+#define FF_API_NOCONST_GET_NAME (LIBAVFILTER_VERSION_MAJOR < 6)
+#endif
+
+#endif /* AVFILTER_VERSION_H */
diff --git a/Externals/ffmpeg/dev/include/libavformat/avformat.h b/Externals/ffmpeg/dev/include/libavformat/avformat.h
new file mode 100644
index 0000000000..a62a9ef5a9
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavformat/avformat.h
@@ -0,0 +1,2687 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFORMAT_AVFORMAT_H
+#define AVFORMAT_AVFORMAT_H
+
+/**
+ * @file
+ * @ingroup libavf
+ * Main libavformat public API header
+ */
+
+/**
+ * @defgroup libavf I/O and Muxing/Demuxing Library
+ * @{
+ *
+ * Libavformat (lavf) is a library for dealing with various media container
+ * formats. Its main two purposes are demuxing - i.e. splitting a media file
+ * into component streams, and the reverse process of muxing - writing supplied
+ * data in a specified container format. It also has an @ref lavf_io
+ * "I/O module" which supports a number of protocols for accessing the data (e.g.
+ * file, tcp, http and others). Before using lavf, you need to call
+ * av_register_all() to register all compiled muxers, demuxers and protocols.
+ * Unless you are absolutely sure you won't use libavformat's network
+ * capabilities, you should also call avformat_network_init().
+ *
+ * A supported input format is described by an AVInputFormat struct, conversely
+ * an output format is described by AVOutputFormat. You can iterate over all
+ * registered input/output formats using the av_iformat_next() /
+ * av_oformat_next() functions. The protocols layer is not part of the public
+ * API, so you can only get the names of supported protocols with the
+ * avio_enum_protocols() function.
+ *
+ * Main lavf structure used for both muxing and demuxing is AVFormatContext,
+ * which exports all information about the file being read or written. As with
+ * most Libavformat structures, its size is not part of public ABI, so it cannot be
+ * allocated on stack or directly with av_malloc(). To create an
+ * AVFormatContext, use avformat_alloc_context() (some functions, like
+ * avformat_open_input() might do that for you).
+ *
+ * Most importantly an AVFormatContext contains:
+ * @li the @ref AVFormatContext.iformat "input" or @ref AVFormatContext.oformat
+ * "output" format. It is either autodetected or set by user for input;
+ * always set by user for output.
+ * @li an @ref AVFormatContext.streams "array" of AVStreams, which describe all
+ * elementary streams stored in the file. AVStreams are typically referred to
+ * using their index in this array.
+ * @li an @ref AVFormatContext.pb "I/O context". It is either opened by lavf or
+ * set by user for input, always set by user for output (unless you are dealing
+ * with an AVFMT_NOFILE format).
+ *
+ * @section lavf_options Passing options to (de)muxers
+ * Lavf allows to configure muxers and demuxers using the @ref avoptions
+ * mechanism. Generic (format-independent) libavformat options are provided by
+ * AVFormatContext, they can be examined from a user program by calling
+ * av_opt_next() / av_opt_find() on an allocated AVFormatContext (or its AVClass
+ * from avformat_get_class()). Private (format-specific) options are provided by
+ * AVFormatContext.priv_data if and only if AVInputFormat.priv_class /
+ * AVOutputFormat.priv_class of the corresponding format struct is non-NULL.
+ * Further options may be provided by the @ref AVFormatContext.pb "I/O context",
+ * if its AVClass is non-NULL, and the protocols layer. See the discussion on
+ * nesting in @ref avoptions documentation to learn how to access those.
+ *
+ * @defgroup lavf_decoding Demuxing
+ * @{
+ * Demuxers read a media file and split it into chunks of data (@em packets). A
+ * @ref AVPacket "packet" contains one or more encoded frames which belongs to a
+ * single elementary stream. In the lavf API this process is represented by the
+ * avformat_open_input() function for opening a file, av_read_frame() for
+ * reading a single packet and finally avformat_close_input(), which does the
+ * cleanup.
+ *
+ * @section lavf_decoding_open Opening a media file
+ * The minimum information required to open a file is its URL or filename, which
+ * is passed to avformat_open_input(), as in the following code:
+ * @code
+ * const char *url = "in.mp3";
+ * AVFormatContext *s = NULL;
+ * int ret = avformat_open_input(&s, url, NULL, NULL);
+ * if (ret < 0)
+ * abort();
+ * @endcode
+ * The above code attempts to allocate an AVFormatContext, open the
+ * specified file (autodetecting the format) and read the header, exporting the
+ * information stored there into s. Some formats do not have a header or do not
+ * store enough information there, so it is recommended that you call the
+ * avformat_find_stream_info() function which tries to read and decode a few
+ * frames to find missing information.
+ *
+ * In some cases you might want to preallocate an AVFormatContext yourself with
+ * avformat_alloc_context() and do some tweaking on it before passing it to
+ * avformat_open_input(). One such case is when you want to use custom functions
+ * for reading input data instead of lavf internal I/O layer.
+ * To do that, create your own AVIOContext with avio_alloc_context(), passing
+ * your reading callbacks to it. Then set the @em pb field of your
+ * AVFormatContext to newly created AVIOContext.
+ *
+ * Since the format of the opened file is in general not known until after
+ * avformat_open_input() has returned, it is not possible to set demuxer private
+ * options on a preallocated context. Instead, the options should be passed to
+ * avformat_open_input() wrapped in an AVDictionary:
+ * @code
+ * AVDictionary *options = NULL;
+ * av_dict_set(&options, "video_size", "640x480", 0);
+ * av_dict_set(&options, "pixel_format", "rgb24", 0);
+ *
+ * if (avformat_open_input(&s, url, NULL, &options) < 0)
+ * abort();
+ * av_dict_free(&options);
+ * @endcode
+ * This code passes the private options 'video_size' and 'pixel_format' to the
+ * demuxer. They would be necessary for e.g. the rawvideo demuxer, since it
+ * cannot know how to interpret raw video data otherwise. If the format turns
+ * out to be something different than raw video, those options will not be
+ * recognized by the demuxer and therefore will not be applied. Such unrecognized
+ * options are then returned in the options dictionary (recognized options are
+ * consumed). The calling program can handle such unrecognized options as it
+ * wishes, e.g.
+ * @code
+ * AVDictionaryEntry *e;
+ * if (e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX)) {
+ * fprintf(stderr, "Option %s not recognized by the demuxer.\n", e->key);
+ * abort();
+ * }
+ * @endcode
+ *
+ * After you have finished reading the file, you must close it with
+ * avformat_close_input(). It will free everything associated with the file.
+ *
+ * @section lavf_decoding_read Reading from an opened file
+ * Reading data from an opened AVFormatContext is done by repeatedly calling
+ * av_read_frame() on it. Each call, if successful, will return an AVPacket
+ * containing encoded data for one AVStream, identified by
+ * AVPacket.stream_index. This packet may be passed straight into the libavcodec
+ * decoding functions avcodec_decode_video2(), avcodec_decode_audio4() or
+ * avcodec_decode_subtitle2() if the caller wishes to decode the data.
+ *
+ * AVPacket.pts, AVPacket.dts and AVPacket.duration timing information will be
+ * set if known. They may also be unset (i.e. AV_NOPTS_VALUE for
+ * pts/dts, 0 for duration) if the stream does not provide them. The timing
+ * information will be in AVStream.time_base units, i.e. it has to be
+ * multiplied by the timebase to convert them to seconds.
+ *
+ * If AVPacket.buf is set on the returned packet, then the packet is
+ * allocated dynamically and the user may keep it indefinitely.
+ * Otherwise, if AVPacket.buf is NULL, the packet data is backed by a
+ * static storage somewhere inside the demuxer and the packet is only valid
+ * until the next av_read_frame() call or closing the file. If the caller
+ * requires a longer lifetime, av_dup_packet() will make an av_malloc()ed copy
+ * of it.
+ * In both cases, the packet must be freed with av_free_packet() when it is no
+ * longer needed.
+ *
+ * @section lavf_decoding_seek Seeking
+ * @}
+ *
+ * @defgroup lavf_encoding Muxing
+ * @{
+ * Muxers take encoded data in the form of @ref AVPacket "AVPackets" and write
+ * it into files or other output bytestreams in the specified container format.
+ *
+ * The main API functions for muxing are avformat_write_header() for writing the
+ * file header, av_write_frame() / av_interleaved_write_frame() for writing the
+ * packets and av_write_trailer() for finalizing the file.
+ *
+ * At the beginning of the muxing process, the caller must first call
+ * avformat_alloc_context() to create a muxing context. The caller then sets up
+ * the muxer by filling the various fields in this context:
+ *
+ * - The @ref AVFormatContext.oformat "oformat" field must be set to select the
+ * muxer that will be used.
+ * - Unless the format is of the AVFMT_NOFILE type, the @ref AVFormatContext.pb
+ * "pb" field must be set to an opened IO context, either returned from
+ * avio_open2() or a custom one.
+ * - Unless the format is of the AVFMT_NOSTREAMS type, at least one stream must
+ * be created with the avformat_new_stream() function. The caller should fill
+ * the @ref AVStream.codec "stream codec context" information, such as the
+ * codec @ref AVCodecContext.codec_type "type", @ref AVCodecContext.codec_id
+ * "id" and other parameters (e.g. width / height, the pixel or sample format,
+ * etc.) as known. The @ref AVStream.time_base "stream timebase" should
+ * be set to the timebase that the caller desires to use for this stream (note
+ * that the timebase actually used by the muxer can be different, as will be
+ * described later).
+ * - The caller may fill in additional information, such as @ref
+ * AVFormatContext.metadata "global" or @ref AVStream.metadata "per-stream"
+ * metadata, @ref AVFormatContext.chapters "chapters", @ref
+ * AVFormatContext.programs "programs", etc. as described in the
+ * AVFormatContext documentation. Whether such information will actually be
+ * stored in the output depends on what the container format and the muxer
+ * support.
+ *
+ * When the muxing context is fully set up, the caller must call
+ * avformat_write_header() to initialize the muxer internals and write the file
+ * header. Whether anything actually is written to the IO context at this step
+ * depends on the muxer, but this function must always be called. Any muxer
+ * private options must be passed in the options parameter to this function.
+ *
+ * The data is then sent to the muxer by repeatedly calling av_write_frame() or
+ * av_interleaved_write_frame() (consult those functions' documentation for
+ * discussion on the difference between them; only one of them may be used with
+ * a single muxing context, they should not be mixed). Do note that the timing
+ * information on the packets sent to the muxer must be in the corresponding
+ * AVStream's timebase. That timebase is set by the muxer (in the
+ * avformat_write_header() step) and may be different from the timebase
+ * requested by the caller.
+ *
+ * Once all the data has been written, the caller must call av_write_trailer()
+ * to flush any buffered packets and finalize the output file, then close the IO
+ * context (if any) and finally free the muxing context with
+ * avformat_free_context().
+ * @}
+ *
+ * @defgroup lavf_io I/O Read/Write
+ * @{
+ * @}
+ *
+ * @defgroup lavf_codec Demuxers
+ * @{
+ * @defgroup lavf_codec_native Native Demuxers
+ * @{
+ * @}
+ * @defgroup lavf_codec_wrappers External library wrappers
+ * @{
+ * @}
+ * @}
+ * @defgroup lavf_protos I/O Protocols
+ * @{
+ * @}
+ * @defgroup lavf_internal Internal
+ * @{
+ * @}
+ * @}
+ *
+ */
+
+#include
+#include /* FILE */
+#include "libavcodec/avcodec.h"
+#include "libavutil/dict.h"
+#include "libavutil/log.h"
+
+#include "avio.h"
+#include "libavformat/version.h"
+
+struct AVFormatContext;
+
+struct AVDeviceInfoList;
+struct AVDeviceCapabilitiesQuery;
+
+/**
+ * @defgroup metadata_api Public Metadata API
+ * @{
+ * @ingroup libavf
+ * The metadata API allows libavformat to export metadata tags to a client
+ * application when demuxing. Conversely it allows a client application to
+ * set metadata when muxing.
+ *
+ * Metadata is exported or set as pairs of key/value strings in the 'metadata'
+ * fields of the AVFormatContext, AVStream, AVChapter and AVProgram structs
+ * using the @ref lavu_dict "AVDictionary" API. Like all strings in FFmpeg,
+ * metadata is assumed to be UTF-8 encoded Unicode. Note that metadata
+ * exported by demuxers isn't checked to be valid UTF-8 in most cases.
+ *
+ * Important concepts to keep in mind:
+ * - Keys are unique; there can never be 2 tags with the same key. This is
+ * also meant semantically, i.e., a demuxer should not knowingly produce
+ * several keys that are literally different but semantically identical.
+ * E.g., key=Author5, key=Author6. In this example, all authors must be
+ * placed in the same tag.
+ * - Metadata is flat, not hierarchical; there are no subtags. If you
+ * want to store, e.g., the email address of the child of producer Alice
+ * and actor Bob, that could have key=alice_and_bobs_childs_email_address.
+ * - Several modifiers can be applied to the tag name. This is done by
+ * appending a dash character ('-') and the modifier name in the order
+ * they appear in the list below -- e.g. foo-eng-sort, not foo-sort-eng.
+ * - language -- a tag whose value is localized for a particular language
+ * is appended with the ISO 639-2/B 3-letter language code.
+ * For example: Author-ger=Michael, Author-eng=Mike
+ * The original/default language is in the unqualified "Author" tag.
+ * A demuxer should set a default if it sets any translated tag.
+ * - sorting -- a modified version of a tag that should be used for
+ * sorting will have '-sort' appended. E.g. artist="The Beatles",
+ * artist-sort="Beatles, The".
+ * - Some protocols and demuxers support metadata updates. After a successful
+ * call to av_read_packet(), AVFormatContext.event_flags or AVStream.event_flags
+ * will be updated to indicate if metadata changed. In order to detect metadata
+ * changes on a stream, you need to loop through all streams in the AVFormatContext
+ * and check their individual event_flags.
+ *
+ * - Demuxers attempt to export metadata in a generic format, however tags
+ * with no generic equivalents are left as they are stored in the container.
+ * Follows a list of generic tag names:
+ *
+ @verbatim
+ album -- name of the set this work belongs to
+ album_artist -- main creator of the set/album, if different from artist.
+ e.g. "Various Artists" for compilation albums.
+ artist -- main creator of the work
+ comment -- any additional description of the file.
+ composer -- who composed the work, if different from artist.
+ copyright -- name of copyright holder.
+ creation_time-- date when the file was created, preferably in ISO 8601.
+ date -- date when the work was created, preferably in ISO 8601.
+ disc -- number of a subset, e.g. disc in a multi-disc collection.
+ encoder -- name/settings of the software/hardware that produced the file.
+ encoded_by -- person/group who created the file.
+ filename -- original name of the file.
+ genre -- .
+ language -- main language in which the work is performed, preferably
+ in ISO 639-2 format. Multiple languages can be specified by
+ separating them with commas.
+ performer -- artist who performed the work, if different from artist.
+ E.g for "Also sprach Zarathustra", artist would be "Richard
+ Strauss" and performer "London Philharmonic Orchestra".
+ publisher -- name of the label/publisher.
+ service_name -- name of the service in broadcasting (channel name).
+ service_provider -- name of the service provider in broadcasting.
+ title -- name of the work.
+ track -- number of this work in the set, can be in form current/total.
+ variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of
+ @endverbatim
+ *
+ * Look in the examples section for an application example how to use the Metadata API.
+ *
+ * @}
+ */
+
+/* packet functions */
+
+
+/**
+ * Allocate and read the payload of a packet and initialize its
+ * fields with default values.
+ *
+ * @param s associated IO context
+ * @param pkt packet
+ * @param size desired payload size
+ * @return >0 (read size) if OK, AVERROR_xxx otherwise
+ */
+int av_get_packet(AVIOContext *s, AVPacket *pkt, int size);
+
+
+/**
+ * Read data and append it to the current content of the AVPacket.
+ * If pkt->size is 0 this is identical to av_get_packet.
+ * Note that this uses av_grow_packet and thus involves a realloc
+ * which is inefficient. Thus this function should only be used
+ * when there is no reasonable way to know (an upper bound of)
+ * the final size.
+ *
+ * @param s associated IO context
+ * @param pkt packet
+ * @param size amount of data to read
+ * @return >0 (read size) if OK, AVERROR_xxx otherwise, previous data
+ * will not be lost even if an error occurs.
+ */
+int av_append_packet(AVIOContext *s, AVPacket *pkt, int size);
+
+#if FF_API_LAVF_FRAC
+/*************************************************/
+/* fractional numbers for exact pts handling */
+
+/**
+ * The exact value of the fractional number is: 'val + num / den'.
+ * num is assumed to be 0 <= num < den.
+ */
+typedef struct AVFrac {
+ int64_t val, num, den;
+} AVFrac;
+#endif
+
+/*************************************************/
+/* input/output formats */
+
+struct AVCodecTag;
+
+/**
+ * This structure contains the data a format has to probe a file.
+ */
+typedef struct AVProbeData {
+ const char *filename;
+ unsigned char *buf; /**< Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero. */
+ int buf_size; /**< Size of buf except extra allocated bytes */
+ const char *mime_type; /**< mime_type, when known. */
+} AVProbeData;
+
+#define AVPROBE_SCORE_RETRY (AVPROBE_SCORE_MAX/4)
+#define AVPROBE_SCORE_STREAM_RETRY (AVPROBE_SCORE_MAX/4-1)
+
+#define AVPROBE_SCORE_EXTENSION 50 ///< score for file extension
+#define AVPROBE_SCORE_MIME 75 ///< score for file mime type
+#define AVPROBE_SCORE_MAX 100 ///< maximum score
+
+#define AVPROBE_PADDING_SIZE 32 ///< extra allocated bytes at the end of the probe buffer
+
+/// Demuxer will use avio_open, no opened file should be provided by the caller.
+#define AVFMT_NOFILE 0x0001
+#define AVFMT_NEEDNUMBER 0x0002 /**< Needs '%d' in filename. */
+#define AVFMT_SHOW_IDS 0x0008 /**< Show format stream IDs numbers. */
+#define AVFMT_RAWPICTURE 0x0020 /**< Format wants AVPicture structure for
+ raw picture data. */
+#define AVFMT_GLOBALHEADER 0x0040 /**< Format wants global header. */
+#define AVFMT_NOTIMESTAMPS 0x0080 /**< Format does not need / have any timestamps. */
+#define AVFMT_GENERIC_INDEX 0x0100 /**< Use generic index building code. */
+#define AVFMT_TS_DISCONT 0x0200 /**< Format allows timestamp discontinuities. Note, muxers always require valid (monotone) timestamps */
+#define AVFMT_VARIABLE_FPS 0x0400 /**< Format allows variable fps. */
+#define AVFMT_NODIMENSIONS 0x0800 /**< Format does not need width/height */
+#define AVFMT_NOSTREAMS 0x1000 /**< Format does not require any streams */
+#define AVFMT_NOBINSEARCH 0x2000 /**< Format does not allow to fall back on binary search via read_timestamp */
+#define AVFMT_NOGENSEARCH 0x4000 /**< Format does not allow to fall back on generic search */
+#define AVFMT_NO_BYTE_SEEK 0x8000 /**< Format does not allow seeking by bytes */
+#define AVFMT_ALLOW_FLUSH 0x10000 /**< Format allows flushing. If not set, the muxer will not receive a NULL packet in the write_packet function. */
+#if LIBAVFORMAT_VERSION_MAJOR <= 54
+#define AVFMT_TS_NONSTRICT 0x8020000 //we try to be compatible to the ABIs of ffmpeg and major forks
+#else
+#define AVFMT_TS_NONSTRICT 0x20000
+#endif
+ /**< Format does not require strictly
+ increasing timestamps, but they must
+ still be monotonic */
+#define AVFMT_TS_NEGATIVE 0x40000 /**< Format allows muxing negative
+ timestamps. If not set the timestamp
+ will be shifted in av_write_frame and
+ av_interleaved_write_frame so they
+ start from 0.
+ The user or muxer can override this through
+ AVFormatContext.avoid_negative_ts
+ */
+
+#define AVFMT_SEEK_TO_PTS 0x4000000 /**< Seeking is based on PTS */
+
+/**
+ * @addtogroup lavf_encoding
+ * @{
+ */
+typedef struct AVOutputFormat {
+ const char *name;
+ /**
+ * Descriptive name for the format, meant to be more human-readable
+ * than name. You should use the NULL_IF_CONFIG_SMALL() macro
+ * to define it.
+ */
+ const char *long_name;
+ const char *mime_type;
+ const char *extensions; /**< comma-separated filename extensions */
+ /* output support */
+ enum AVCodecID audio_codec; /**< default audio codec */
+ enum AVCodecID video_codec; /**< default video codec */
+ enum AVCodecID subtitle_codec; /**< default subtitle codec */
+ /**
+ * can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_RAWPICTURE,
+ * AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS,
+ * AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH,
+ * AVFMT_TS_NONSTRICT
+ */
+ int flags;
+
+ /**
+ * List of supported codec_id-codec_tag pairs, ordered by "better
+ * choice first". The arrays are all terminated by AV_CODEC_ID_NONE.
+ */
+ const struct AVCodecTag * const *codec_tag;
+
+
+ const AVClass *priv_class; ///< AVClass for the private context
+
+ /*****************************************************************
+ * No fields below this line are part of the public API. They
+ * may not be used outside of libavformat and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+ struct AVOutputFormat *next;
+ /**
+ * size of private data so that it can be allocated in the wrapper
+ */
+ int priv_data_size;
+
+ int (*write_header)(struct AVFormatContext *);
+ /**
+ * Write a packet. If AVFMT_ALLOW_FLUSH is set in flags,
+ * pkt can be NULL in order to flush data buffered in the muxer.
+ * When flushing, return 0 if there still is more data to flush,
+ * or 1 if everything was flushed and there is no more buffered
+ * data.
+ */
+ int (*write_packet)(struct AVFormatContext *, AVPacket *pkt);
+ int (*write_trailer)(struct AVFormatContext *);
+ /**
+ * Currently only used to set pixel format if not YUV420P.
+ */
+ int (*interleave_packet)(struct AVFormatContext *, AVPacket *out,
+ AVPacket *in, int flush);
+ /**
+ * Test if the given codec can be stored in this container.
+ *
+ * @return 1 if the codec is supported, 0 if it is not.
+ * A negative number if unknown.
+ * MKTAG('A', 'P', 'I', 'C') if the codec is only supported as AV_DISPOSITION_ATTACHED_PIC
+ */
+ int (*query_codec)(enum AVCodecID id, int std_compliance);
+
+ void (*get_output_timestamp)(struct AVFormatContext *s, int stream,
+ int64_t *dts, int64_t *wall);
+ /**
+ * Allows sending messages from application to device.
+ */
+ int (*control_message)(struct AVFormatContext *s, int type,
+ void *data, size_t data_size);
+
+ /**
+ * Write an uncoded AVFrame.
+ *
+ * See av_write_uncoded_frame() for details.
+ *
+ * The library will free *frame afterwards, but the muxer can prevent it
+ * by setting the pointer to NULL.
+ */
+ int (*write_uncoded_frame)(struct AVFormatContext *, int stream_index,
+ AVFrame **frame, unsigned flags);
+ /**
+ * Returns device list with it properties.
+ * @see avdevice_list_devices() for more details.
+ */
+ int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list);
+ /**
+ * Initialize device capabilities submodule.
+ * @see avdevice_capabilities_create() for more details.
+ */
+ int (*create_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps);
+ /**
+ * Free device capabilities submodule.
+ * @see avdevice_capabilities_free() for more details.
+ */
+ int (*free_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps);
+} AVOutputFormat;
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavf_decoding
+ * @{
+ */
+typedef struct AVInputFormat {
+ /**
+ * A comma separated list of short names for the format. New names
+ * may be appended with a minor bump.
+ */
+ const char *name;
+
+ /**
+ * Descriptive name for the format, meant to be more human-readable
+ * than name. You should use the NULL_IF_CONFIG_SMALL() macro
+ * to define it.
+ */
+ const char *long_name;
+
+ /**
+ * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS,
+ * AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH,
+ * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS.
+ */
+ int flags;
+
+ /**
+ * If extensions are defined, then no probe is done. You should
+ * usually not use extension format guessing because it is not
+ * reliable enough
+ */
+ const char *extensions;
+
+ const struct AVCodecTag * const *codec_tag;
+
+ const AVClass *priv_class; ///< AVClass for the private context
+
+ /**
+ * Comma-separated list of mime types.
+ * It is used check for matching mime types while probing.
+ * @see av_probe_input_format2
+ */
+ const char *mime_type;
+
+ /*****************************************************************
+ * No fields below this line are part of the public API. They
+ * may not be used outside of libavformat and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+ struct AVInputFormat *next;
+
+ /**
+ * Raw demuxers store their codec ID here.
+ */
+ int raw_codec_id;
+
+ /**
+ * Size of private data so that it can be allocated in the wrapper.
+ */
+ int priv_data_size;
+
+ /**
+ * Tell if a given file has a chance of being parsed as this format.
+ * The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes
+ * big so you do not have to check for that unless you need more.
+ */
+ int (*read_probe)(AVProbeData *);
+
+ /**
+ * Read the format header and initialize the AVFormatContext
+ * structure. Return 0 if OK. 'avformat_new_stream' should be
+ * called to create new streams.
+ */
+ int (*read_header)(struct AVFormatContext *);
+
+ /**
+ * Read one packet and put it in 'pkt'. pts and flags are also
+ * set. 'avformat_new_stream' can be called only if the flag
+ * AVFMTCTX_NOHEADER is used and only in the calling thread (not in a
+ * background thread).
+ * @return 0 on success, < 0 on error.
+ * When returning an error, pkt must not have been allocated
+ * or must be freed before returning
+ */
+ int (*read_packet)(struct AVFormatContext *, AVPacket *pkt);
+
+ /**
+ * Close the stream. The AVFormatContext and AVStreams are not
+ * freed by this function
+ */
+ int (*read_close)(struct AVFormatContext *);
+
+ /**
+ * Seek to a given timestamp relative to the frames in
+ * stream component stream_index.
+ * @param stream_index Must not be -1.
+ * @param flags Selects which direction should be preferred if no exact
+ * match is available.
+ * @return >= 0 on success (but not necessarily the new offset)
+ */
+ int (*read_seek)(struct AVFormatContext *,
+ int stream_index, int64_t timestamp, int flags);
+
+ /**
+ * Get the next timestamp in stream[stream_index].time_base units.
+ * @return the timestamp or AV_NOPTS_VALUE if an error occurred
+ */
+ int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index,
+ int64_t *pos, int64_t pos_limit);
+
+ /**
+ * Start/resume playing - only meaningful if using a network-based format
+ * (RTSP).
+ */
+ int (*read_play)(struct AVFormatContext *);
+
+ /**
+ * Pause playing - only meaningful if using a network-based format
+ * (RTSP).
+ */
+ int (*read_pause)(struct AVFormatContext *);
+
+ /**
+ * Seek to timestamp ts.
+ * Seeking will be done so that the point from which all active streams
+ * can be presented successfully will be closest to ts and within min/max_ts.
+ * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL.
+ */
+ int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags);
+
+ /**
+ * Returns device list with it properties.
+ * @see avdevice_list_devices() for more details.
+ */
+ int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list);
+
+ /**
+ * Initialize device capabilities submodule.
+ * @see avdevice_capabilities_create() for more details.
+ */
+ int (*create_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps);
+
+ /**
+ * Free device capabilities submodule.
+ * @see avdevice_capabilities_free() for more details.
+ */
+ int (*free_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps);
+} AVInputFormat;
+/**
+ * @}
+ */
+
+enum AVStreamParseType {
+ AVSTREAM_PARSE_NONE,
+ AVSTREAM_PARSE_FULL, /**< full parsing and repack */
+ AVSTREAM_PARSE_HEADERS, /**< Only parse headers, do not repack. */
+ AVSTREAM_PARSE_TIMESTAMPS, /**< full parsing and interpolation of timestamps for frames not starting on a packet boundary */
+ AVSTREAM_PARSE_FULL_ONCE, /**< full parsing and repack of the first frame only, only implemented for H.264 currently */
+ AVSTREAM_PARSE_FULL_RAW=MKTAG(0,'R','A','W'), /**< full parsing and repack with timestamp and position generation by parser for raw
+ this assumes that each packet in the file contains no demuxer level headers and
+ just codec level data, otherwise position generation would fail */
+};
+
+typedef struct AVIndexEntry {
+ int64_t pos;
+ int64_t timestamp; /**<
+ * Timestamp in AVStream.time_base units, preferably the time from which on correctly decoded frames are available
+ * when seeking to this entry. That means preferable PTS on keyframe based formats.
+ * But demuxers can choose to store a different timestamp, if it is more convenient for the implementation or nothing better
+ * is known
+ */
+#define AVINDEX_KEYFRAME 0x0001
+ int flags:2;
+ int size:30; //Yeah, trying to keep the size of this small to reduce memory requirements (it is 24 vs. 32 bytes due to possible 8-byte alignment).
+ int min_distance; /**< Minimum distance between this and the previous keyframe, used to avoid unneeded searching. */
+} AVIndexEntry;
+
+#define AV_DISPOSITION_DEFAULT 0x0001
+#define AV_DISPOSITION_DUB 0x0002
+#define AV_DISPOSITION_ORIGINAL 0x0004
+#define AV_DISPOSITION_COMMENT 0x0008
+#define AV_DISPOSITION_LYRICS 0x0010
+#define AV_DISPOSITION_KARAOKE 0x0020
+
+/**
+ * Track should be used during playback by default.
+ * Useful for subtitle track that should be displayed
+ * even when user did not explicitly ask for subtitles.
+ */
+#define AV_DISPOSITION_FORCED 0x0040
+#define AV_DISPOSITION_HEARING_IMPAIRED 0x0080 /**< stream for hearing impaired audiences */
+#define AV_DISPOSITION_VISUAL_IMPAIRED 0x0100 /**< stream for visual impaired audiences */
+#define AV_DISPOSITION_CLEAN_EFFECTS 0x0200 /**< stream without voice */
+/**
+ * The stream is stored in the file as an attached picture/"cover art" (e.g.
+ * APIC frame in ID3v2). The single packet associated with it will be returned
+ * among the first few packets read from the file unless seeking takes place.
+ * It can also be accessed at any time in AVStream.attached_pic.
+ */
+#define AV_DISPOSITION_ATTACHED_PIC 0x0400
+
+/**
+ * To specify text track kind (different from subtitles default).
+ */
+#define AV_DISPOSITION_CAPTIONS 0x10000
+#define AV_DISPOSITION_DESCRIPTIONS 0x20000
+#define AV_DISPOSITION_METADATA 0x40000
+
+/**
+ * Options for behavior on timestamp wrap detection.
+ */
+#define AV_PTS_WRAP_IGNORE 0 ///< ignore the wrap
+#define AV_PTS_WRAP_ADD_OFFSET 1 ///< add the format specific offset on wrap detection
+#define AV_PTS_WRAP_SUB_OFFSET -1 ///< subtract the format specific offset on wrap detection
+
+/**
+ * Stream structure.
+ * New fields can be added to the end with minor version bumps.
+ * Removal, reordering and changes to existing fields require a major
+ * version bump.
+ * sizeof(AVStream) must not be used outside libav*.
+ */
+typedef struct AVStream {
+ int index; /**< stream index in AVFormatContext */
+ /**
+ * Format-specific stream ID.
+ * decoding: set by libavformat
+ * encoding: set by the user, replaced by libavformat if left unset
+ */
+ int id;
+ /**
+ * Codec context associated with this stream. Allocated and freed by
+ * libavformat.
+ *
+ * - decoding: The demuxer exports codec information stored in the headers
+ * here.
+ * - encoding: The user sets codec information, the muxer writes it to the
+ * output. Mandatory fields as specified in AVCodecContext
+ * documentation must be set even if this AVCodecContext is
+ * not actually used for encoding.
+ */
+ AVCodecContext *codec;
+ void *priv_data;
+
+#if FF_API_LAVF_FRAC
+ /**
+ * @deprecated this field is unused
+ */
+ attribute_deprecated
+ struct AVFrac pts;
+#endif
+
+ /**
+ * This is the fundamental unit of time (in seconds) in terms
+ * of which frame timestamps are represented.
+ *
+ * decoding: set by libavformat
+ * encoding: May be set by the caller before avformat_write_header() to
+ * provide a hint to the muxer about the desired timebase. In
+ * avformat_write_header(), the muxer will overwrite this field
+ * with the timebase that will actually be used for the timestamps
+ * written into the file (which may or may not be related to the
+ * user-provided one, depending on the format).
+ */
+ AVRational time_base;
+
+ /**
+ * Decoding: pts of the first frame of the stream in presentation order, in stream time base.
+ * Only set this if you are absolutely 100% sure that the value you set
+ * it to really is the pts of the first frame.
+ * This may be undefined (AV_NOPTS_VALUE).
+ * @note The ASF header does NOT contain a correct start_time the ASF
+ * demuxer must NOT set this.
+ */
+ int64_t start_time;
+
+ /**
+ * Decoding: duration of the stream, in stream time base.
+ * If a source file does not specify a duration, but does specify
+ * a bitrate, this value will be estimated from bitrate and file size.
+ */
+ int64_t duration;
+
+ int64_t nb_frames; ///< number of frames in this stream if known or 0
+
+ int disposition; /**< AV_DISPOSITION_* bit field */
+
+ enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed.
+
+ /**
+ * sample aspect ratio (0 if unknown)
+ * - encoding: Set by user.
+ * - decoding: Set by libavformat.
+ */
+ AVRational sample_aspect_ratio;
+
+ AVDictionary *metadata;
+
+ /**
+ * Average framerate
+ *
+ * - demuxing: May be set by libavformat when creating the stream or in
+ * avformat_find_stream_info().
+ * - muxing: May be set by the caller before avformat_write_header().
+ */
+ AVRational avg_frame_rate;
+
+ /**
+ * For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet
+ * will contain the attached picture.
+ *
+ * decoding: set by libavformat, must not be modified by the caller.
+ * encoding: unused
+ */
+ AVPacket attached_pic;
+
+ /**
+ * An array of side data that applies to the whole stream (i.e. the
+ * container does not allow it to change between packets).
+ *
+ * There may be no overlap between the side data in this array and side data
+ * in the packets. I.e. a given side data is either exported by the muxer
+ * (demuxing) / set by the caller (muxing) in this array, then it never
+ * appears in the packets, or the side data is exported / sent through
+ * the packets (always in the first packet where the value becomes known or
+ * changes), then it does not appear in this array.
+ *
+ * - demuxing: Set by libavformat when the stream is created.
+ * - muxing: May be set by the caller before avformat_write_header().
+ *
+ * Freed by libavformat in avformat_free_context().
+ *
+ * @see av_format_inject_global_side_data()
+ */
+ AVPacketSideData *side_data;
+ /**
+ * The number of elements in the AVStream.side_data array.
+ */
+ int nb_side_data;
+
+ /**
+ * Flags for the user to detect events happening on the stream. Flags must
+ * be cleared by the user once the event has been handled.
+ * A combination of AVSTREAM_EVENT_FLAG_*.
+ */
+ int event_flags;
+#define AVSTREAM_EVENT_FLAG_METADATA_UPDATED 0x0001 ///< The call resulted in updated metadata.
+
+ /*****************************************************************
+ * All fields below this line are not part of the public API. They
+ * may not be used outside of libavformat and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+
+ /**
+ * Stream information used internally by av_find_stream_info()
+ */
+#define MAX_STD_TIMEBASES (30*12+7+6)
+ struct {
+ int64_t last_dts;
+ int64_t duration_gcd;
+ int duration_count;
+ int64_t rfps_duration_sum;
+ double (*duration_error)[2][MAX_STD_TIMEBASES];
+ int64_t codec_info_duration;
+ int64_t codec_info_duration_fields;
+
+ /**
+ * 0 -> decoder has not been searched for yet.
+ * >0 -> decoder found
+ * <0 -> decoder with codec_id == -found_decoder has not been found
+ */
+ int found_decoder;
+
+ int64_t last_duration;
+
+ /**
+ * Those are used for average framerate estimation.
+ */
+ int64_t fps_first_dts;
+ int fps_first_dts_idx;
+ int64_t fps_last_dts;
+ int fps_last_dts_idx;
+
+ } *info;
+
+ int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */
+
+ // Timestamp generation support:
+ /**
+ * Timestamp corresponding to the last dts sync point.
+ *
+ * Initialized when AVCodecParserContext.dts_sync_point >= 0 and
+ * a DTS is received from the underlying container. Otherwise set to
+ * AV_NOPTS_VALUE by default.
+ */
+ int64_t first_dts;
+ int64_t cur_dts;
+ int64_t last_IP_pts;
+ int last_IP_duration;
+
+ /**
+ * Number of packets to buffer for codec probing
+ */
+#define MAX_PROBE_PACKETS 2500
+ int probe_packets;
+
+ /**
+ * Number of frames that have been demuxed during av_find_stream_info()
+ */
+ int codec_info_nb_frames;
+
+ /* av_read_frame() support */
+ enum AVStreamParseType need_parsing;
+ struct AVCodecParserContext *parser;
+
+ /**
+ * last packet in packet_buffer for this stream when muxing.
+ */
+ struct AVPacketList *last_in_packet_buffer;
+ AVProbeData probe_data;
+#define MAX_REORDER_DELAY 16
+ int64_t pts_buffer[MAX_REORDER_DELAY+1];
+
+ AVIndexEntry *index_entries; /**< Only used if the format does not
+ support seeking natively. */
+ int nb_index_entries;
+ unsigned int index_entries_allocated_size;
+
+ /**
+ * Real base framerate of the stream.
+ * This is the lowest framerate with which all timestamps can be
+ * represented accurately (it is the least common multiple of all
+ * framerates in the stream). Note, this value is just a guess!
+ * For example, if the time base is 1/90000 and all frames have either
+ * approximately 3600 or 1800 timer ticks, then r_frame_rate will be 50/1.
+ *
+ * Code outside avformat should access this field using:
+ * av_stream_get/set_r_frame_rate(stream)
+ */
+ AVRational r_frame_rate;
+
+ /**
+ * Stream Identifier
+ * This is the MPEG-TS stream identifier +1
+ * 0 means unknown
+ */
+ int stream_identifier;
+
+ int64_t interleaver_chunk_size;
+ int64_t interleaver_chunk_duration;
+
+ /**
+ * stream probing state
+ * -1 -> probing finished
+ * 0 -> no probing requested
+ * rest -> perform probing with request_probe being the minimum score to accept.
+ * NOT PART OF PUBLIC API
+ */
+ int request_probe;
+ /**
+ * Indicates that everything up to the next keyframe
+ * should be discarded.
+ */
+ int skip_to_keyframe;
+
+ /**
+ * Number of samples to skip at the start of the frame decoded from the next packet.
+ */
+ int skip_samples;
+
+ /**
+ * If not 0, the first audio sample that should be discarded from the stream.
+ * This is broken by design (needs global sample count), but can't be
+ * avoided for broken by design formats such as mp3 with ad-hoc gapless
+ * audio support.
+ */
+ int64_t first_discard_sample;
+
+ /**
+ * The sample after last sample that is intended to be discarded after
+ * first_discard_sample. Works on frame boundaries only. Used to prevent
+ * early EOF if the gapless info is broken (considered concatenated mp3s).
+ */
+ int64_t last_discard_sample;
+
+ /**
+ * Number of internally decoded frames, used internally in libavformat, do not access
+ * its lifetime differs from info which is why it is not in that structure.
+ */
+ int nb_decoded_frames;
+
+ /**
+ * Timestamp offset added to timestamps before muxing
+ * NOT PART OF PUBLIC API
+ */
+ int64_t mux_ts_offset;
+
+ /**
+ * Internal data to check for wrapping of the time stamp
+ */
+ int64_t pts_wrap_reference;
+
+ /**
+ * Options for behavior, when a wrap is detected.
+ *
+ * Defined by AV_PTS_WRAP_ values.
+ *
+ * If correction is enabled, there are two possibilities:
+ * If the first time stamp is near the wrap point, the wrap offset
+ * will be subtracted, which will create negative time stamps.
+ * Otherwise the offset will be added.
+ */
+ int pts_wrap_behavior;
+
+ /**
+ * Internal data to prevent doing update_initial_durations() twice
+ */
+ int update_initial_durations_done;
+
+ /**
+ * Internal data to generate dts from pts
+ */
+ int64_t pts_reorder_error[MAX_REORDER_DELAY+1];
+ uint8_t pts_reorder_error_count[MAX_REORDER_DELAY+1];
+
+ /**
+ * Internal data to analyze DTS and detect faulty mpeg streams
+ */
+ int64_t last_dts_for_order_check;
+ uint8_t dts_ordered;
+ uint8_t dts_misordered;
+
+ /**
+ * Internal data to inject global side data
+ */
+ int inject_global_side_data;
+
+ /**
+ * String containing paris of key and values describing recommended encoder configuration.
+ * Paris are separated by ','.
+ * Keys are separated from values by '='.
+ */
+ char *recommended_encoder_configuration;
+
+ /**
+ * display aspect ratio (0 if unknown)
+ * - encoding: unused
+ * - decoding: Set by libavformat to calculate sample_aspect_ratio internally
+ */
+ AVRational display_aspect_ratio;
+} AVStream;
+
+AVRational av_stream_get_r_frame_rate(const AVStream *s);
+void av_stream_set_r_frame_rate(AVStream *s, AVRational r);
+struct AVCodecParserContext *av_stream_get_parser(const AVStream *s);
+char* av_stream_get_recommended_encoder_configuration(const AVStream *s);
+void av_stream_set_recommended_encoder_configuration(AVStream *s, char *configuration);
+
+/**
+ * Returns the pts of the last muxed packet + its duration
+ *
+ * the retuned value is undefined when used with a demuxer.
+ */
+int64_t av_stream_get_end_pts(const AVStream *st);
+
+#define AV_PROGRAM_RUNNING 1
+
+/**
+ * New fields can be added to the end with minor version bumps.
+ * Removal, reordering and changes to existing fields require a major
+ * version bump.
+ * sizeof(AVProgram) must not be used outside libav*.
+ */
+typedef struct AVProgram {
+ int id;
+ int flags;
+ enum AVDiscard discard; ///< selects which program to discard and which to feed to the caller
+ unsigned int *stream_index;
+ unsigned int nb_stream_indexes;
+ AVDictionary *metadata;
+
+ int program_num;
+ int pmt_pid;
+ int pcr_pid;
+
+ /*****************************************************************
+ * All fields below this line are not part of the public API. They
+ * may not be used outside of libavformat and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+ int64_t start_time;
+ int64_t end_time;
+
+ int64_t pts_wrap_reference; ///< reference dts for wrap detection
+ int pts_wrap_behavior; ///< behavior on wrap detection
+} AVProgram;
+
+#define AVFMTCTX_NOHEADER 0x0001 /**< signal that no header is present
+ (streams are added dynamically) */
+
+typedef struct AVChapter {
+ int id; ///< unique ID to identify the chapter
+ AVRational time_base; ///< time base in which the start/end timestamps are specified
+ int64_t start, end; ///< chapter start/end time in time_base units
+ AVDictionary *metadata;
+} AVChapter;
+
+
+/**
+ * Callback used by devices to communicate with application.
+ */
+typedef int (*av_format_control_message)(struct AVFormatContext *s, int type,
+ void *data, size_t data_size);
+
+
+/**
+ * The duration of a video can be estimated through various ways, and this enum can be used
+ * to know how the duration was estimated.
+ */
+enum AVDurationEstimationMethod {
+ AVFMT_DURATION_FROM_PTS, ///< Duration accurately estimated from PTSes
+ AVFMT_DURATION_FROM_STREAM, ///< Duration estimated from a stream with a known duration
+ AVFMT_DURATION_FROM_BITRATE ///< Duration estimated from bitrate (less accurate)
+};
+
+typedef struct AVFormatInternal AVFormatInternal;
+
+/**
+ * Format I/O context.
+ * New fields can be added to the end with minor version bumps.
+ * Removal, reordering and changes to existing fields require a major
+ * version bump.
+ * sizeof(AVFormatContext) must not be used outside libav*, use
+ * avformat_alloc_context() to create an AVFormatContext.
+ */
+typedef struct AVFormatContext {
+ /**
+ * A class for logging and @ref avoptions. Set by avformat_alloc_context().
+ * Exports (de)muxer private options if they exist.
+ */
+ const AVClass *av_class;
+
+ /**
+ * The input container format.
+ *
+ * Demuxing only, set by avformat_open_input().
+ */
+ struct AVInputFormat *iformat;
+
+ /**
+ * The output container format.
+ *
+ * Muxing only, must be set by the caller before avformat_write_header().
+ */
+ struct AVOutputFormat *oformat;
+
+ /**
+ * Format private data. This is an AVOptions-enabled struct
+ * if and only if iformat/oformat.priv_class is not NULL.
+ *
+ * - muxing: set by avformat_write_header()
+ * - demuxing: set by avformat_open_input()
+ */
+ void *priv_data;
+
+ /**
+ * I/O context.
+ *
+ * - demuxing: either set by the user before avformat_open_input() (then
+ * the user must close it manually) or set by avformat_open_input().
+ * - muxing: set by the user before avformat_write_header(). The caller must
+ * take care of closing / freeing the IO context.
+ *
+ * Do NOT set this field if AVFMT_NOFILE flag is set in
+ * iformat/oformat.flags. In such a case, the (de)muxer will handle
+ * I/O in some other way and this field will be NULL.
+ */
+ AVIOContext *pb;
+
+ /* stream info */
+ /**
+ * Flags signalling stream properties. A combination of AVFMTCTX_*.
+ * Set by libavformat.
+ */
+ int ctx_flags;
+
+ /**
+ * Number of elements in AVFormatContext.streams.
+ *
+ * Set by avformat_new_stream(), must not be modified by any other code.
+ */
+ unsigned int nb_streams;
+ /**
+ * A list of all streams in the file. New streams are created with
+ * avformat_new_stream().
+ *
+ * - demuxing: streams are created by libavformat in avformat_open_input().
+ * If AVFMTCTX_NOHEADER is set in ctx_flags, then new streams may also
+ * appear in av_read_frame().
+ * - muxing: streams are created by the user before avformat_write_header().
+ *
+ * Freed by libavformat in avformat_free_context().
+ */
+ AVStream **streams;
+
+ /**
+ * input or output filename
+ *
+ * - demuxing: set by avformat_open_input()
+ * - muxing: may be set by the caller before avformat_write_header()
+ */
+ char filename[1024];
+
+ /**
+ * Position of the first frame of the component, in
+ * AV_TIME_BASE fractional seconds. NEVER set this value directly:
+ * It is deduced from the AVStream values.
+ *
+ * Demuxing only, set by libavformat.
+ */
+ int64_t start_time;
+
+ /**
+ * Duration of the stream, in AV_TIME_BASE fractional
+ * seconds. Only set this value if you know none of the individual stream
+ * durations and also do not set any of them. This is deduced from the
+ * AVStream values if not set.
+ *
+ * Demuxing only, set by libavformat.
+ */
+ int64_t duration;
+
+ /**
+ * Total stream bitrate in bit/s, 0 if not
+ * available. Never set it directly if the file_size and the
+ * duration are known as FFmpeg can compute it automatically.
+ */
+ int bit_rate;
+
+ unsigned int packet_size;
+ int max_delay;
+
+ /**
+ * Flags modifying the (de)muxer behaviour. A combination of AVFMT_FLAG_*.
+ * Set by the user before avformat_open_input() / avformat_write_header().
+ */
+ int flags;
+#define AVFMT_FLAG_GENPTS 0x0001 ///< Generate missing pts even if it requires parsing future frames.
+#define AVFMT_FLAG_IGNIDX 0x0002 ///< Ignore index.
+#define AVFMT_FLAG_NONBLOCK 0x0004 ///< Do not block when reading packets from input.
+#define AVFMT_FLAG_IGNDTS 0x0008 ///< Ignore DTS on frames that contain both DTS & PTS
+#define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container
+#define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled
+#define AVFMT_FLAG_NOBUFFER 0x0040 ///< Do not buffer frames when possible
+#define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it.
+#define AVFMT_FLAG_DISCARD_CORRUPT 0x0100 ///< Discard frames marked corrupted
+#define AVFMT_FLAG_FLUSH_PACKETS 0x0200 ///< Flush the AVIOContext every packet.
+/**
+ * When muxing, try to avoid writing any random/volatile data to the output.
+ * This includes any random IDs, real-time timestamps/dates, muxer version, etc.
+ *
+ * This flag is mainly intended for testing.
+ */
+#define AVFMT_FLAG_BITEXACT 0x0400
+#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload
+#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down)
+#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted)
+#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Don't merge side data but keep it separate.
+
+ /**
+ * @deprecated deprecated in favor of probesize2
+ */
+ unsigned int probesize;
+
+ /**
+ * @deprecated deprecated in favor of max_analyze_duration2
+ */
+ attribute_deprecated
+ int max_analyze_duration;
+
+ const uint8_t *key;
+ int keylen;
+
+ unsigned int nb_programs;
+ AVProgram **programs;
+
+ /**
+ * Forced video codec_id.
+ * Demuxing: Set by user.
+ */
+ enum AVCodecID video_codec_id;
+
+ /**
+ * Forced audio codec_id.
+ * Demuxing: Set by user.
+ */
+ enum AVCodecID audio_codec_id;
+
+ /**
+ * Forced subtitle codec_id.
+ * Demuxing: Set by user.
+ */
+ enum AVCodecID subtitle_codec_id;
+
+ /**
+ * Maximum amount of memory in bytes to use for the index of each stream.
+ * If the index exceeds this size, entries will be discarded as
+ * needed to maintain a smaller size. This can lead to slower or less
+ * accurate seeking (depends on demuxer).
+ * Demuxers for which a full in-memory index is mandatory will ignore
+ * this.
+ * - muxing: unused
+ * - demuxing: set by user
+ */
+ unsigned int max_index_size;
+
+ /**
+ * Maximum amount of memory in bytes to use for buffering frames
+ * obtained from realtime capture devices.
+ */
+ unsigned int max_picture_buffer;
+
+ /**
+ * Number of chapters in AVChapter array.
+ * When muxing, chapters are normally written in the file header,
+ * so nb_chapters should normally be initialized before write_header
+ * is called. Some muxers (e.g. mov and mkv) can also write chapters
+ * in the trailer. To write chapters in the trailer, nb_chapters
+ * must be zero when write_header is called and non-zero when
+ * write_trailer is called.
+ * - muxing: set by user
+ * - demuxing: set by libavformat
+ */
+ unsigned int nb_chapters;
+ AVChapter **chapters;
+
+ /**
+ * Metadata that applies to the whole file.
+ *
+ * - demuxing: set by libavformat in avformat_open_input()
+ * - muxing: may be set by the caller before avformat_write_header()
+ *
+ * Freed by libavformat in avformat_free_context().
+ */
+ AVDictionary *metadata;
+
+ /**
+ * Start time of the stream in real world time, in microseconds
+ * since the Unix epoch (00:00 1st January 1970). That is, pts=0 in the
+ * stream was captured at this real world time.
+ * - muxing: Set by the caller before avformat_write_header(). If set to
+ * either 0 or AV_NOPTS_VALUE, then the current wall-time will
+ * be used.
+ * - demuxing: Set by libavformat. AV_NOPTS_VALUE if unknown. Note that
+ * the value may become known after some number of frames
+ * have been received.
+ */
+ int64_t start_time_realtime;
+
+ /**
+ * The number of frames used for determining the framerate in
+ * avformat_find_stream_info().
+ * Demuxing only, set by the caller before avformat_find_stream_info().
+ */
+ int fps_probe_size;
+
+ /**
+ * Error recognition; higher values will detect more errors but may
+ * misdetect some more or less valid parts as errors.
+ * Demuxing only, set by the caller before avformat_open_input().
+ */
+ int error_recognition;
+
+ /**
+ * Custom interrupt callbacks for the I/O layer.
+ *
+ * demuxing: set by the user before avformat_open_input().
+ * muxing: set by the user before avformat_write_header()
+ * (mainly useful for AVFMT_NOFILE formats). The callback
+ * should also be passed to avio_open2() if it's used to
+ * open the file.
+ */
+ AVIOInterruptCB interrupt_callback;
+
+ /**
+ * Flags to enable debugging.
+ */
+ int debug;
+#define FF_FDEBUG_TS 0x0001
+
+ /**
+ * Maximum buffering duration for interleaving.
+ *
+ * To ensure all the streams are interleaved correctly,
+ * av_interleaved_write_frame() will wait until it has at least one packet
+ * for each stream before actually writing any packets to the output file.
+ * When some streams are "sparse" (i.e. there are large gaps between
+ * successive packets), this can result in excessive buffering.
+ *
+ * This field specifies the maximum difference between the timestamps of the
+ * first and the last packet in the muxing queue, above which libavformat
+ * will output a packet regardless of whether it has queued a packet for all
+ * the streams.
+ *
+ * Muxing only, set by the caller before avformat_write_header().
+ */
+ int64_t max_interleave_delta;
+
+ /**
+ * Allow non-standard and experimental extension
+ * @see AVCodecContext.strict_std_compliance
+ */
+ int strict_std_compliance;
+
+ /**
+ * Flags for the user to detect events happening on the file. Flags must
+ * be cleared by the user once the event has been handled.
+ * A combination of AVFMT_EVENT_FLAG_*.
+ */
+ int event_flags;
+#define AVFMT_EVENT_FLAG_METADATA_UPDATED 0x0001 ///< The call resulted in updated metadata.
+
+ /**
+ * Maximum number of packets to read while waiting for the first timestamp.
+ * Decoding only.
+ */
+ int max_ts_probe;
+
+ /**
+ * Avoid negative timestamps during muxing.
+ * Any value of the AVFMT_AVOID_NEG_TS_* constants.
+ * Note, this only works when using av_interleaved_write_frame. (interleave_packet_per_dts is in use)
+ * - muxing: Set by user
+ * - demuxing: unused
+ */
+ int avoid_negative_ts;
+#define AVFMT_AVOID_NEG_TS_AUTO -1 ///< Enabled when required by target format
+#define AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE 1 ///< Shift timestamps so they are non negative
+#define AVFMT_AVOID_NEG_TS_MAKE_ZERO 2 ///< Shift timestamps so that they start at 0
+
+
+ /**
+ * Transport stream id.
+ * This will be moved into demuxer private options. Thus no API/ABI compatibility
+ */
+ int ts_id;
+
+ /**
+ * Audio preload in microseconds.
+ * Note, not all formats support this and unpredictable things may happen if it is used when not supported.
+ * - encoding: Set by user via AVOptions (NO direct access)
+ * - decoding: unused
+ */
+ int audio_preload;
+
+ /**
+ * Max chunk time in microseconds.
+ * Note, not all formats support this and unpredictable things may happen if it is used when not supported.
+ * - encoding: Set by user via AVOptions (NO direct access)
+ * - decoding: unused
+ */
+ int max_chunk_duration;
+
+ /**
+ * Max chunk size in bytes
+ * Note, not all formats support this and unpredictable things may happen if it is used when not supported.
+ * - encoding: Set by user via AVOptions (NO direct access)
+ * - decoding: unused
+ */
+ int max_chunk_size;
+
+ /**
+ * forces the use of wallclock timestamps as pts/dts of packets
+ * This has undefined results in the presence of B frames.
+ * - encoding: unused
+ * - decoding: Set by user via AVOptions (NO direct access)
+ */
+ int use_wallclock_as_timestamps;
+
+ /**
+ * avio flags, used to force AVIO_FLAG_DIRECT.
+ * - encoding: unused
+ * - decoding: Set by user via AVOptions (NO direct access)
+ */
+ int avio_flags;
+
+ /**
+ * The duration field can be estimated through various ways, and this field can be used
+ * to know how the duration was estimated.
+ * - encoding: unused
+ * - decoding: Read by user via AVOptions (NO direct access)
+ */
+ enum AVDurationEstimationMethod duration_estimation_method;
+
+ /**
+ * Skip initial bytes when opening stream
+ * - encoding: unused
+ * - decoding: Set by user via AVOptions (NO direct access)
+ */
+ int64_t skip_initial_bytes;
+
+ /**
+ * Correct single timestamp overflows
+ * - encoding: unused
+ * - decoding: Set by user via AVOptions (NO direct access)
+ */
+ unsigned int correct_ts_overflow;
+
+ /**
+ * Force seeking to any (also non key) frames.
+ * - encoding: unused
+ * - decoding: Set by user via AVOptions (NO direct access)
+ */
+ int seek2any;
+
+ /**
+ * Flush the I/O context after each packet.
+ * - encoding: Set by user via AVOptions (NO direct access)
+ * - decoding: unused
+ */
+ int flush_packets;
+
+ /**
+ * format probing score.
+ * The maximal score is AVPROBE_SCORE_MAX, its set when the demuxer probes
+ * the format.
+ * - encoding: unused
+ * - decoding: set by avformat, read by user via av_format_get_probe_score() (NO direct access)
+ */
+ int probe_score;
+
+ /**
+ * number of bytes to read maximally to identify format.
+ * - encoding: unused
+ * - decoding: set by user through AVOPtions (NO direct access)
+ */
+ int format_probesize;
+
+ /**
+ * ',' separated list of allowed decoders.
+ * If NULL then all are allowed
+ * - encoding: unused
+ * - decoding: set by user through AVOptions (NO direct access)
+ */
+ char *codec_whitelist;
+
+ /**
+ * ',' separated list of allowed demuxers.
+ * If NULL then all are allowed
+ * - encoding: unused
+ * - decoding: set by user through AVOptions (NO direct access)
+ */
+ char *format_whitelist;
+
+ /*****************************************************************
+ * All fields below this line are not part of the public API. They
+ * may not be used outside of libavformat and can be changed and
+ * removed at will.
+ * New public fields should be added right above.
+ *****************************************************************
+ */
+
+ /**
+ * This buffer is only needed when packets were already buffered but
+ * not decoded, for example to get the codec parameters in MPEG
+ * streams.
+ */
+ struct AVPacketList *packet_buffer;
+ struct AVPacketList *packet_buffer_end;
+
+ /* av_seek_frame() support */
+ int64_t data_offset; /**< offset of the first packet */
+
+ /**
+ * Raw packets from the demuxer, prior to parsing and decoding.
+ * This buffer is used for buffering packets until the codec can
+ * be identified, as parsing cannot be done without knowing the
+ * codec.
+ */
+ struct AVPacketList *raw_packet_buffer;
+ struct AVPacketList *raw_packet_buffer_end;
+ /**
+ * Packets split by the parser get queued here.
+ */
+ struct AVPacketList *parse_queue;
+ struct AVPacketList *parse_queue_end;
+ /**
+ * Remaining size available for raw_packet_buffer, in bytes.
+ */
+#define RAW_PACKET_BUFFER_SIZE 2500000
+ int raw_packet_buffer_remaining_size;
+
+ /**
+ * Offset to remap timestamps to be non-negative.
+ * Expressed in timebase units.
+ * @see AVStream.mux_ts_offset
+ */
+ int64_t offset;
+
+ /**
+ * Timebase for the timestamp offset.
+ */
+ AVRational offset_timebase;
+
+ /**
+ * An opaque field for libavformat internal usage.
+ * Must not be accessed in any way by callers.
+ */
+ AVFormatInternal *internal;
+
+ /**
+ * IO repositioned flag.
+ * This is set by avformat when the underlaying IO context read pointer
+ * is repositioned, for example when doing byte based seeking.
+ * Demuxers can use the flag to detect such changes.
+ */
+ int io_repositioned;
+
+ /**
+ * Forced video codec.
+ * This allows forcing a specific decoder, even when there are multiple with
+ * the same codec_id.
+ * Demuxing: Set by user via av_format_set_video_codec (NO direct access).
+ */
+ AVCodec *video_codec;
+
+ /**
+ * Forced audio codec.
+ * This allows forcing a specific decoder, even when there are multiple with
+ * the same codec_id.
+ * Demuxing: Set by user via av_format_set_audio_codec (NO direct access).
+ */
+ AVCodec *audio_codec;
+
+ /**
+ * Forced subtitle codec.
+ * This allows forcing a specific decoder, even when there are multiple with
+ * the same codec_id.
+ * Demuxing: Set by user via av_format_set_subtitle_codec (NO direct access).
+ */
+ AVCodec *subtitle_codec;
+
+ /**
+ * Number of bytes to be written as padding in a metadata header.
+ * Demuxing: Unused.
+ * Muxing: Set by user via av_format_set_metadata_header_padding.
+ */
+ int metadata_header_padding;
+
+ /**
+ * User data.
+ * This is a place for some private data of the user.
+ * Mostly usable with control_message_cb or any future callbacks in device's context.
+ */
+ void *opaque;
+
+ /**
+ * Callback used by devices to communicate with application.
+ */
+ av_format_control_message control_message_cb;
+
+ /**
+ * Output timestamp offset, in microseconds.
+ * Muxing: set by user via AVOptions (NO direct access)
+ */
+ int64_t output_ts_offset;
+
+ /**
+ * Maximum duration (in AV_TIME_BASE units) of the data read
+ * from input in avformat_find_stream_info().
+ * Demuxing only, set by the caller before avformat_find_stream_info()
+ * via AVOptions (NO direct access).
+ * Can be set to 0 to let avformat choose using a heuristic.
+ */
+ int64_t max_analyze_duration2;
+
+ /**
+ * Maximum size of the data read from input for determining
+ * the input container format.
+ * Demuxing only, set by the caller before avformat_open_input()
+ * via AVOptions (NO direct access).
+ */
+ int64_t probesize2;
+
+ /**
+ * dump format separator.
+ * can be ", " or "\n " or anything else
+ * Code outside libavformat should access this field using AVOptions
+ * (NO direct access).
+ * - muxing: Set by user.
+ * - demuxing: Set by user.
+ */
+ uint8_t *dump_separator;
+} AVFormatContext;
+
+int av_format_get_probe_score(const AVFormatContext *s);
+AVCodec * av_format_get_video_codec(const AVFormatContext *s);
+void av_format_set_video_codec(AVFormatContext *s, AVCodec *c);
+AVCodec * av_format_get_audio_codec(const AVFormatContext *s);
+void av_format_set_audio_codec(AVFormatContext *s, AVCodec *c);
+AVCodec * av_format_get_subtitle_codec(const AVFormatContext *s);
+void av_format_set_subtitle_codec(AVFormatContext *s, AVCodec *c);
+int av_format_get_metadata_header_padding(const AVFormatContext *s);
+void av_format_set_metadata_header_padding(AVFormatContext *s, int c);
+void * av_format_get_opaque(const AVFormatContext *s);
+void av_format_set_opaque(AVFormatContext *s, void *opaque);
+av_format_control_message av_format_get_control_message_cb(const AVFormatContext *s);
+void av_format_set_control_message_cb(AVFormatContext *s, av_format_control_message callback);
+
+/**
+ * This function will cause global side data to be injected in the next packet
+ * of each stream as well as after any subsequent seek.
+ */
+void av_format_inject_global_side_data(AVFormatContext *s);
+
+/**
+ * Returns the method used to set ctx->duration.
+ *
+ * @return AVFMT_DURATION_FROM_PTS, AVFMT_DURATION_FROM_STREAM, or AVFMT_DURATION_FROM_BITRATE.
+ */
+enum AVDurationEstimationMethod av_fmt_ctx_get_duration_estimation_method(const AVFormatContext* ctx);
+
+typedef struct AVPacketList {
+ AVPacket pkt;
+ struct AVPacketList *next;
+} AVPacketList;
+
+
+/**
+ * @defgroup lavf_core Core functions
+ * @ingroup libavf
+ *
+ * Functions for querying libavformat capabilities, allocating core structures,
+ * etc.
+ * @{
+ */
+
+/**
+ * Return the LIBAVFORMAT_VERSION_INT constant.
+ */
+unsigned avformat_version(void);
+
+/**
+ * Return the libavformat build-time configuration.
+ */
+const char *avformat_configuration(void);
+
+/**
+ * Return the libavformat license.
+ */
+const char *avformat_license(void);
+
+/**
+ * Initialize libavformat and register all the muxers, demuxers and
+ * protocols. If you do not call this function, then you can select
+ * exactly which formats you want to support.
+ *
+ * @see av_register_input_format()
+ * @see av_register_output_format()
+ */
+void av_register_all(void);
+
+void av_register_input_format(AVInputFormat *format);
+void av_register_output_format(AVOutputFormat *format);
+
+/**
+ * Do global initialization of network components. This is optional,
+ * but recommended, since it avoids the overhead of implicitly
+ * doing the setup for each session.
+ *
+ * Calling this function will become mandatory if using network
+ * protocols at some major version bump.
+ */
+int avformat_network_init(void);
+
+/**
+ * Undo the initialization done by avformat_network_init.
+ */
+int avformat_network_deinit(void);
+
+/**
+ * If f is NULL, returns the first registered input format,
+ * if f is non-NULL, returns the next registered input format after f
+ * or NULL if f is the last one.
+ */
+AVInputFormat *av_iformat_next(const AVInputFormat *f);
+
+/**
+ * If f is NULL, returns the first registered output format,
+ * if f is non-NULL, returns the next registered output format after f
+ * or NULL if f is the last one.
+ */
+AVOutputFormat *av_oformat_next(const AVOutputFormat *f);
+
+/**
+ * Allocate an AVFormatContext.
+ * avformat_free_context() can be used to free the context and everything
+ * allocated by the framework within it.
+ */
+AVFormatContext *avformat_alloc_context(void);
+
+/**
+ * Free an AVFormatContext and all its streams.
+ * @param s context to free
+ */
+void avformat_free_context(AVFormatContext *s);
+
+/**
+ * Get the AVClass for AVFormatContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *avformat_get_class(void);
+
+/**
+ * Add a new stream to a media file.
+ *
+ * When demuxing, it is called by the demuxer in read_header(). If the
+ * flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also
+ * be called in read_packet().
+ *
+ * When muxing, should be called by the user before avformat_write_header().
+ *
+ * User is required to call avcodec_close() and avformat_free_context() to
+ * clean up the allocation by avformat_new_stream().
+ *
+ * @param s media file handle
+ * @param c If non-NULL, the AVCodecContext corresponding to the new stream
+ * will be initialized to use this codec. This is needed for e.g. codec-specific
+ * defaults to be set, so codec should be provided if it is known.
+ *
+ * @return newly created stream or NULL on error.
+ */
+AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c);
+
+/**
+ * Get side information from stream.
+ *
+ * @param stream stream
+ * @param type desired side information type
+ * @param size pointer for side information size to store (optional)
+ * @return pointer to data if present or NULL otherwise
+ */
+uint8_t *av_stream_get_side_data(AVStream *stream,
+ enum AVPacketSideDataType type, int *size);
+
+AVProgram *av_new_program(AVFormatContext *s, int id);
+
+/**
+ * @}
+ */
+
+
+/**
+ * Allocate an AVFormatContext for an output format.
+ * avformat_free_context() can be used to free the context and
+ * everything allocated by the framework within it.
+ *
+ * @param *ctx is set to the created format context, or to NULL in
+ * case of failure
+ * @param oformat format to use for allocating the context, if NULL
+ * format_name and filename are used instead
+ * @param format_name the name of output format to use for allocating the
+ * context, if NULL filename is used instead
+ * @param filename the name of the filename to use for allocating the
+ * context, may be NULL
+ * @return >= 0 in case of success, a negative AVERROR code in case of
+ * failure
+ */
+int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat,
+ const char *format_name, const char *filename);
+
+/**
+ * @addtogroup lavf_decoding
+ * @{
+ */
+
+/**
+ * Find AVInputFormat based on the short name of the input format.
+ */
+AVInputFormat *av_find_input_format(const char *short_name);
+
+/**
+ * Guess the file format.
+ *
+ * @param pd data to be probed
+ * @param is_opened Whether the file is already opened; determines whether
+ * demuxers with or without AVFMT_NOFILE are probed.
+ */
+AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened);
+
+/**
+ * Guess the file format.
+ *
+ * @param pd data to be probed
+ * @param is_opened Whether the file is already opened; determines whether
+ * demuxers with or without AVFMT_NOFILE are probed.
+ * @param score_max A probe score larger that this is required to accept a
+ * detection, the variable is set to the actual detection
+ * score afterwards.
+ * If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended
+ * to retry with a larger probe buffer.
+ */
+AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max);
+
+/**
+ * Guess the file format.
+ *
+ * @param is_opened Whether the file is already opened; determines whether
+ * demuxers with or without AVFMT_NOFILE are probed.
+ * @param score_ret The score of the best detection.
+ */
+AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret);
+
+/**
+ * Probe a bytestream to determine the input format. Each time a probe returns
+ * with a score that is too low, the probe buffer size is increased and another
+ * attempt is made. When the maximum probe size is reached, the input format
+ * with the highest score is returned.
+ *
+ * @param pb the bytestream to probe
+ * @param fmt the input format is put here
+ * @param filename the filename of the stream
+ * @param logctx the log context
+ * @param offset the offset within the bytestream to probe from
+ * @param max_probe_size the maximum probe buffer size (zero for default)
+ * @return the score in case of success, a negative value corresponding to an
+ * the maximal score is AVPROBE_SCORE_MAX
+ * AVERROR code otherwise
+ */
+int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt,
+ const char *filename, void *logctx,
+ unsigned int offset, unsigned int max_probe_size);
+
+/**
+ * Like av_probe_input_buffer2() but returns 0 on success
+ */
+int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt,
+ const char *filename, void *logctx,
+ unsigned int offset, unsigned int max_probe_size);
+
+/**
+ * Open an input stream and read the header. The codecs are not opened.
+ * The stream must be closed with avformat_close_input().
+ *
+ * @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context).
+ * May be a pointer to NULL, in which case an AVFormatContext is allocated by this
+ * function and written into ps.
+ * Note that a user-supplied AVFormatContext will be freed on failure.
+ * @param filename Name of the stream to open.
+ * @param fmt If non-NULL, this parameter forces a specific input format.
+ * Otherwise the format is autodetected.
+ * @param options A dictionary filled with AVFormatContext and demuxer-private options.
+ * On return this parameter will be destroyed and replaced with a dict containing
+ * options that were not found. May be NULL.
+ *
+ * @return 0 on success, a negative AVERROR on failure.
+ *
+ * @note If you want to use custom IO, preallocate the format context and set its pb field.
+ */
+int avformat_open_input(AVFormatContext **ps, const char *filename, AVInputFormat *fmt, AVDictionary **options);
+
+attribute_deprecated
+int av_demuxer_open(AVFormatContext *ic);
+
+/**
+ * Read packets of a media file to get stream information. This
+ * is useful for file formats with no headers such as MPEG. This
+ * function also computes the real framerate in case of MPEG-2 repeat
+ * frame mode.
+ * The logical file position is not changed by this function;
+ * examined packets may be buffered for later processing.
+ *
+ * @param ic media file handle
+ * @param options If non-NULL, an ic.nb_streams long array of pointers to
+ * dictionaries, where i-th member contains options for
+ * codec corresponding to i-th stream.
+ * On return each dictionary will be filled with options that were not found.
+ * @return >=0 if OK, AVERROR_xxx on error
+ *
+ * @note this function isn't guaranteed to open all the codecs, so
+ * options being non-empty at return is a perfectly normal behavior.
+ *
+ * @todo Let the user decide somehow what information is needed so that
+ * we do not waste time getting stuff the user does not need.
+ */
+int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options);
+
+/**
+ * Find the programs which belong to a given stream.
+ *
+ * @param ic media file handle
+ * @param last the last found program, the search will start after this
+ * program, or from the beginning if it is NULL
+ * @param s stream index
+ * @return the next program which belongs to s, NULL if no program is found or
+ * the last program is not among the programs of ic.
+ */
+AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s);
+
+/**
+ * Find the "best" stream in the file.
+ * The best stream is determined according to various heuristics as the most
+ * likely to be what the user expects.
+ * If the decoder parameter is non-NULL, av_find_best_stream will find the
+ * default decoder for the stream's codec; streams for which no decoder can
+ * be found are ignored.
+ *
+ * @param ic media file handle
+ * @param type stream type: video, audio, subtitles, etc.
+ * @param wanted_stream_nb user-requested stream number,
+ * or -1 for automatic selection
+ * @param related_stream try to find a stream related (eg. in the same
+ * program) to this one, or -1 if none
+ * @param decoder_ret if non-NULL, returns the decoder for the
+ * selected stream
+ * @param flags flags; none are currently defined
+ * @return the non-negative stream number in case of success,
+ * AVERROR_STREAM_NOT_FOUND if no stream with the requested type
+ * could be found,
+ * AVERROR_DECODER_NOT_FOUND if streams were found but no decoder
+ * @note If av_find_best_stream returns successfully and decoder_ret is not
+ * NULL, then *decoder_ret is guaranteed to be set to a valid AVCodec.
+ */
+int av_find_best_stream(AVFormatContext *ic,
+ enum AVMediaType type,
+ int wanted_stream_nb,
+ int related_stream,
+ AVCodec **decoder_ret,
+ int flags);
+
+/**
+ * Return the next frame of a stream.
+ * This function returns what is stored in the file, and does not validate
+ * that what is there are valid frames for the decoder. It will split what is
+ * stored in the file into frames and return one for each call. It will not
+ * omit invalid data between valid frames so as to give the decoder the maximum
+ * information possible for decoding.
+ *
+ * If pkt->buf is NULL, then the packet is valid until the next
+ * av_read_frame() or until avformat_close_input(). Otherwise the packet
+ * is valid indefinitely. In both cases the packet must be freed with
+ * av_free_packet when it is no longer needed. For video, the packet contains
+ * exactly one frame. For audio, it contains an integer number of frames if each
+ * frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames
+ * have a variable size (e.g. MPEG audio), then it contains one frame.
+ *
+ * pkt->pts, pkt->dts and pkt->duration are always set to correct
+ * values in AVStream.time_base units (and guessed if the format cannot
+ * provide them). pkt->pts can be AV_NOPTS_VALUE if the video format
+ * has B-frames, so it is better to rely on pkt->dts if you do not
+ * decompress the payload.
+ *
+ * @return 0 if OK, < 0 on error or end of file
+ */
+int av_read_frame(AVFormatContext *s, AVPacket *pkt);
+
+/**
+ * Seek to the keyframe at timestamp.
+ * 'timestamp' in 'stream_index'.
+ *
+ * @param s media file handle
+ * @param stream_index If stream_index is (-1), a default
+ * stream is selected, and timestamp is automatically converted
+ * from AV_TIME_BASE units to the stream specific time_base.
+ * @param timestamp Timestamp in AVStream.time_base units
+ * or, if no stream is specified, in AV_TIME_BASE units.
+ * @param flags flags which select direction and seeking mode
+ * @return >= 0 on success
+ */
+int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp,
+ int flags);
+
+/**
+ * Seek to timestamp ts.
+ * Seeking will be done so that the point from which all active streams
+ * can be presented successfully will be closest to ts and within min/max_ts.
+ * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL.
+ *
+ * If flags contain AVSEEK_FLAG_BYTE, then all timestamps are in bytes and
+ * are the file position (this may not be supported by all demuxers).
+ * If flags contain AVSEEK_FLAG_FRAME, then all timestamps are in frames
+ * in the stream with stream_index (this may not be supported by all demuxers).
+ * Otherwise all timestamps are in units of the stream selected by stream_index
+ * or if stream_index is -1, in AV_TIME_BASE units.
+ * If flags contain AVSEEK_FLAG_ANY, then non-keyframes are treated as
+ * keyframes (this may not be supported by all demuxers).
+ * If flags contain AVSEEK_FLAG_BACKWARD, it is ignored.
+ *
+ * @param s media file handle
+ * @param stream_index index of the stream which is used as time base reference
+ * @param min_ts smallest acceptable timestamp
+ * @param ts target timestamp
+ * @param max_ts largest acceptable timestamp
+ * @param flags flags
+ * @return >=0 on success, error code otherwise
+ *
+ * @note This is part of the new seek API which is still under construction.
+ * Thus do not use this yet. It may change at any time, do not expect
+ * ABI compatibility yet!
+ */
+int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags);
+
+/**
+ * Start playing a network-based stream (e.g. RTSP stream) at the
+ * current position.
+ */
+int av_read_play(AVFormatContext *s);
+
+/**
+ * Pause a network-based stream (e.g. RTSP stream).
+ *
+ * Use av_read_play() to resume it.
+ */
+int av_read_pause(AVFormatContext *s);
+
+/**
+ * Close an opened input AVFormatContext. Free it and all its contents
+ * and set *s to NULL.
+ */
+void avformat_close_input(AVFormatContext **s);
+/**
+ * @}
+ */
+
+#define AVSEEK_FLAG_BACKWARD 1 ///< seek backward
+#define AVSEEK_FLAG_BYTE 2 ///< seeking based on position in bytes
+#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non-keyframes
+#define AVSEEK_FLAG_FRAME 8 ///< seeking based on frame number
+
+/**
+ * @addtogroup lavf_encoding
+ * @{
+ */
+/**
+ * Allocate the stream private data and write the stream header to
+ * an output media file.
+ *
+ * @param s Media file handle, must be allocated with avformat_alloc_context().
+ * Its oformat field must be set to the desired output format;
+ * Its pb field must be set to an already opened AVIOContext.
+ * @param options An AVDictionary filled with AVFormatContext and muxer-private options.
+ * On return this parameter will be destroyed and replaced with a dict containing
+ * options that were not found. May be NULL.
+ *
+ * @return 0 on success, negative AVERROR on failure.
+ *
+ * @see av_opt_find, av_dict_set, avio_open, av_oformat_next.
+ */
+int avformat_write_header(AVFormatContext *s, AVDictionary **options);
+
+/**
+ * Write a packet to an output media file.
+ *
+ * This function passes the packet directly to the muxer, without any buffering
+ * or reordering. The caller is responsible for correctly interleaving the
+ * packets if the format requires it. Callers that want libavformat to handle
+ * the interleaving should call av_interleaved_write_frame() instead of this
+ * function.
+ *
+ * @param s media file handle
+ * @param pkt The packet containing the data to be written. Note that unlike
+ * av_interleaved_write_frame(), this function does not take
+ * ownership of the packet passed to it (though some muxers may make
+ * an internal reference to the input packet).
+ *
+ * This parameter can be NULL (at any time, not just at the end), in
+ * order to immediately flush data buffered within the muxer, for
+ * muxers that buffer up data internally before writing it to the
+ * output.
+ *
+ * Packet's @ref AVPacket.stream_index "stream_index" field must be
+ * set to the index of the corresponding stream in @ref
+ * AVFormatContext.streams "s->streams". It is very strongly
+ * recommended that timing information (@ref AVPacket.pts "pts", @ref
+ * AVPacket.dts "dts", @ref AVPacket.duration "duration") is set to
+ * correct values.
+ * @return < 0 on error, = 0 if OK, 1 if flushed and there is no more data to flush
+ *
+ * @see av_interleaved_write_frame()
+ */
+int av_write_frame(AVFormatContext *s, AVPacket *pkt);
+
+/**
+ * Write a packet to an output media file ensuring correct interleaving.
+ *
+ * This function will buffer the packets internally as needed to make sure the
+ * packets in the output file are properly interleaved in the order of
+ * increasing dts. Callers doing their own interleaving should call
+ * av_write_frame() instead of this function.
+ *
+ * @param s media file handle
+ * @param pkt The packet containing the data to be written.
+ *
+ * If the packet is reference-counted, this function will take
+ * ownership of this reference and unreference it later when it sees
+ * fit.
+ * The caller must not access the data through this reference after
+ * this function returns. If the packet is not reference-counted,
+ * libavformat will make a copy.
+ *
+ * This parameter can be NULL (at any time, not just at the end), to
+ * flush the interleaving queues.
+ *
+ * Packet's @ref AVPacket.stream_index "stream_index" field must be
+ * set to the index of the corresponding stream in @ref
+ * AVFormatContext.streams "s->streams". It is very strongly
+ * recommended that timing information (@ref AVPacket.pts "pts", @ref
+ * AVPacket.dts "dts", @ref AVPacket.duration "duration") is set to
+ * correct values.
+ *
+ * @return 0 on success, a negative AVERROR on error. Libavformat will always
+ * take care of freeing the packet, even if this function fails.
+ *
+ * @see av_write_frame(), AVFormatContext.max_interleave_delta
+ */
+int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt);
+
+/**
+ * Write a uncoded frame to an output media file.
+ *
+ * The frame must be correctly interleaved according to the container
+ * specification; if not, then av_interleaved_write_frame() must be used.
+ *
+ * See av_interleaved_write_frame() for details.
+ */
+int av_write_uncoded_frame(AVFormatContext *s, int stream_index,
+ AVFrame *frame);
+
+/**
+ * Write a uncoded frame to an output media file.
+ *
+ * If the muxer supports it, this function allows to write an AVFrame
+ * structure directly, without encoding it into a packet.
+ * It is mostly useful for devices and similar special muxers that use raw
+ * video or PCM data and will not serialize it into a byte stream.
+ *
+ * To test whether it is possible to use it with a given muxer and stream,
+ * use av_write_uncoded_frame_query().
+ *
+ * The caller gives up ownership of the frame and must not access it
+ * afterwards.
+ *
+ * @return >=0 for success, a negative code on error
+ */
+int av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index,
+ AVFrame *frame);
+
+/**
+ * Test whether a muxer supports uncoded frame.
+ *
+ * @return >=0 if an uncoded frame can be written to that muxer and stream,
+ * <0 if not
+ */
+int av_write_uncoded_frame_query(AVFormatContext *s, int stream_index);
+
+/**
+ * Write the stream trailer to an output media file and free the
+ * file private data.
+ *
+ * May only be called after a successful call to avformat_write_header.
+ *
+ * @param s media file handle
+ * @return 0 if OK, AVERROR_xxx on error
+ */
+int av_write_trailer(AVFormatContext *s);
+
+/**
+ * Return the output format in the list of registered output formats
+ * which best matches the provided parameters, or return NULL if
+ * there is no match.
+ *
+ * @param short_name if non-NULL checks if short_name matches with the
+ * names of the registered formats
+ * @param filename if non-NULL checks if filename terminates with the
+ * extensions of the registered formats
+ * @param mime_type if non-NULL checks if mime_type matches with the
+ * MIME type of the registered formats
+ */
+AVOutputFormat *av_guess_format(const char *short_name,
+ const char *filename,
+ const char *mime_type);
+
+/**
+ * Guess the codec ID based upon muxer and filename.
+ */
+enum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name,
+ const char *filename, const char *mime_type,
+ enum AVMediaType type);
+
+/**
+ * Get timing information for the data currently output.
+ * The exact meaning of "currently output" depends on the format.
+ * It is mostly relevant for devices that have an internal buffer and/or
+ * work in real time.
+ * @param s media file handle
+ * @param stream stream in the media file
+ * @param[out] dts DTS of the last packet output for the stream, in stream
+ * time_base units
+ * @param[out] wall absolute time when that packet whas output,
+ * in microsecond
+ * @return 0 if OK, AVERROR(ENOSYS) if the format does not support it
+ * Note: some formats or devices may not allow to measure dts and wall
+ * atomically.
+ */
+int av_get_output_timestamp(struct AVFormatContext *s, int stream,
+ int64_t *dts, int64_t *wall);
+
+
+/**
+ * @}
+ */
+
+
+/**
+ * @defgroup lavf_misc Utility functions
+ * @ingroup libavf
+ * @{
+ *
+ * Miscellaneous utility functions related to both muxing and demuxing
+ * (or neither).
+ */
+
+/**
+ * Send a nice hexadecimal dump of a buffer to the specified file stream.
+ *
+ * @param f The file stream pointer where the dump should be sent to.
+ * @param buf buffer
+ * @param size buffer size
+ *
+ * @see av_hex_dump_log, av_pkt_dump2, av_pkt_dump_log2
+ */
+void av_hex_dump(FILE *f, const uint8_t *buf, int size);
+
+/**
+ * Send a nice hexadecimal dump of a buffer to the log.
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message, lower values signifying
+ * higher importance.
+ * @param buf buffer
+ * @param size buffer size
+ *
+ * @see av_hex_dump, av_pkt_dump2, av_pkt_dump_log2
+ */
+void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size);
+
+/**
+ * Send a nice dump of a packet to the specified file stream.
+ *
+ * @param f The file stream pointer where the dump should be sent to.
+ * @param pkt packet to dump
+ * @param dump_payload True if the payload must be displayed, too.
+ * @param st AVStream that the packet belongs to
+ */
+void av_pkt_dump2(FILE *f, const AVPacket *pkt, int dump_payload, const AVStream *st);
+
+
+/**
+ * Send a nice dump of a packet to the log.
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message, lower values signifying
+ * higher importance.
+ * @param pkt packet to dump
+ * @param dump_payload True if the payload must be displayed, too.
+ * @param st AVStream that the packet belongs to
+ */
+void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload,
+ const AVStream *st);
+
+/**
+ * Get the AVCodecID for the given codec tag tag.
+ * If no codec id is found returns AV_CODEC_ID_NONE.
+ *
+ * @param tags list of supported codec_id-codec_tag pairs, as stored
+ * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag
+ * @param tag codec tag to match to a codec ID
+ */
+enum AVCodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag);
+
+/**
+ * Get the codec tag for the given codec id id.
+ * If no codec tag is found returns 0.
+ *
+ * @param tags list of supported codec_id-codec_tag pairs, as stored
+ * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag
+ * @param id codec ID to match to a codec tag
+ */
+unsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum AVCodecID id);
+
+/**
+ * Get the codec tag for the given codec id.
+ *
+ * @param tags list of supported codec_id - codec_tag pairs, as stored
+ * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag
+ * @param id codec id that should be searched for in the list
+ * @param tag A pointer to the found tag
+ * @return 0 if id was not found in tags, > 0 if it was found
+ */
+int av_codec_get_tag2(const struct AVCodecTag * const *tags, enum AVCodecID id,
+ unsigned int *tag);
+
+int av_find_default_stream_index(AVFormatContext *s);
+
+/**
+ * Get the index for a specific timestamp.
+ *
+ * @param st stream that the timestamp belongs to
+ * @param timestamp timestamp to retrieve the index for
+ * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond
+ * to the timestamp which is <= the requested one, if backward
+ * is 0, then it will be >=
+ * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise
+ * @return < 0 if no such timestamp could be found
+ */
+int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags);
+
+/**
+ * Add an index entry into a sorted list. Update the entry if the list
+ * already contains it.
+ *
+ * @param timestamp timestamp in the time base of the given stream
+ */
+int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp,
+ int size, int distance, int flags);
+
+
+/**
+ * Split a URL string into components.
+ *
+ * The pointers to buffers for storing individual components may be null,
+ * in order to ignore that component. Buffers for components not found are
+ * set to empty strings. If the port is not found, it is set to a negative
+ * value.
+ *
+ * @param proto the buffer for the protocol
+ * @param proto_size the size of the proto buffer
+ * @param authorization the buffer for the authorization
+ * @param authorization_size the size of the authorization buffer
+ * @param hostname the buffer for the host name
+ * @param hostname_size the size of the hostname buffer
+ * @param port_ptr a pointer to store the port number in
+ * @param path the buffer for the path
+ * @param path_size the size of the path buffer
+ * @param url the URL to split
+ */
+void av_url_split(char *proto, int proto_size,
+ char *authorization, int authorization_size,
+ char *hostname, int hostname_size,
+ int *port_ptr,
+ char *path, int path_size,
+ const char *url);
+
+
+/**
+ * Print detailed information about the input or output format, such as
+ * duration, bitrate, streams, container, programs, metadata, side data,
+ * codec and time base.
+ *
+ * @param ic the context to analyze
+ * @param index index of the stream to dump information about
+ * @param url the URL to print, such as source or destination file
+ * @param is_output Select whether the specified context is an input(0) or output(1)
+ */
+void av_dump_format(AVFormatContext *ic,
+ int index,
+ const char *url,
+ int is_output);
+
+/**
+ * Return in 'buf' the path with '%d' replaced by a number.
+ *
+ * Also handles the '%0nd' format where 'n' is the total number
+ * of digits and '%%'.
+ *
+ * @param buf destination buffer
+ * @param buf_size destination buffer size
+ * @param path numbered sequence string
+ * @param number frame number
+ * @return 0 if OK, -1 on format error
+ */
+int av_get_frame_filename(char *buf, int buf_size,
+ const char *path, int number);
+
+/**
+ * Check whether filename actually is a numbered sequence generator.
+ *
+ * @param filename possible numbered sequence string
+ * @return 1 if a valid numbered sequence string, 0 otherwise
+ */
+int av_filename_number_test(const char *filename);
+
+/**
+ * Generate an SDP for an RTP session.
+ *
+ * Note, this overwrites the id values of AVStreams in the muxer contexts
+ * for getting unique dynamic payload types.
+ *
+ * @param ac array of AVFormatContexts describing the RTP streams. If the
+ * array is composed by only one context, such context can contain
+ * multiple AVStreams (one AVStream per RTP stream). Otherwise,
+ * all the contexts in the array (an AVCodecContext per RTP stream)
+ * must contain only one AVStream.
+ * @param n_files number of AVCodecContexts contained in ac
+ * @param buf buffer where the SDP will be stored (must be allocated by
+ * the caller)
+ * @param size the size of the buffer
+ * @return 0 if OK, AVERROR_xxx on error
+ */
+int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size);
+
+/**
+ * Return a positive value if the given filename has one of the given
+ * extensions, 0 otherwise.
+ *
+ * @param filename file name to check against the given extensions
+ * @param extensions a comma-separated list of filename extensions
+ */
+int av_match_ext(const char *filename, const char *extensions);
+
+/**
+ * Test if the given container can store a codec.
+ *
+ * @param ofmt container to check for compatibility
+ * @param codec_id codec to potentially store in container
+ * @param std_compliance standards compliance level, one of FF_COMPLIANCE_*
+ *
+ * @return 1 if codec with ID codec_id can be stored in ofmt, 0 if it cannot.
+ * A negative number if this information is not available.
+ */
+int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id,
+ int std_compliance);
+
+/**
+ * @defgroup riff_fourcc RIFF FourCCs
+ * @{
+ * Get the tables mapping RIFF FourCCs to libavcodec AVCodecIDs. The tables are
+ * meant to be passed to av_codec_get_id()/av_codec_get_tag() as in the
+ * following code:
+ * @code
+ * uint32_t tag = MKTAG('H', '2', '6', '4');
+ * const struct AVCodecTag *table[] = { avformat_get_riff_video_tags(), 0 };
+ * enum AVCodecID id = av_codec_get_id(table, tag);
+ * @endcode
+ */
+/**
+ * @return the table mapping RIFF FourCCs for video to libavcodec AVCodecID.
+ */
+const struct AVCodecTag *avformat_get_riff_video_tags(void);
+/**
+ * @return the table mapping RIFF FourCCs for audio to AVCodecID.
+ */
+const struct AVCodecTag *avformat_get_riff_audio_tags(void);
+/**
+ * @return the table mapping MOV FourCCs for video to libavcodec AVCodecID.
+ */
+const struct AVCodecTag *avformat_get_mov_video_tags(void);
+/**
+ * @return the table mapping MOV FourCCs for audio to AVCodecID.
+ */
+const struct AVCodecTag *avformat_get_mov_audio_tags(void);
+
+/**
+ * @}
+ */
+
+/**
+ * Guess the sample aspect ratio of a frame, based on both the stream and the
+ * frame aspect ratio.
+ *
+ * Since the frame aspect ratio is set by the codec but the stream aspect ratio
+ * is set by the demuxer, these two may not be equal. This function tries to
+ * return the value that you should use if you would like to display the frame.
+ *
+ * Basic logic is to use the stream aspect ratio if it is set to something sane
+ * otherwise use the frame aspect ratio. This way a container setting, which is
+ * usually easy to modify can override the coded value in the frames.
+ *
+ * @param format the format context which the stream is part of
+ * @param stream the stream which the frame is part of
+ * @param frame the frame with the aspect ratio to be determined
+ * @return the guessed (valid) sample_aspect_ratio, 0/1 if no idea
+ */
+AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame);
+
+/**
+ * Guess the frame rate, based on both the container and codec information.
+ *
+ * @param ctx the format context which the stream is part of
+ * @param stream the stream which the frame is part of
+ * @param frame the frame for which the frame rate should be determined, may be NULL
+ * @return the guessed (valid) frame rate, 0/1 if no idea
+ */
+AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame);
+
+/**
+ * Check if the stream st contained in s is matched by the stream specifier
+ * spec.
+ *
+ * See the "stream specifiers" chapter in the documentation for the syntax
+ * of spec.
+ *
+ * @return >0 if st is matched by spec;
+ * 0 if st is not matched by spec;
+ * AVERROR code if spec is invalid
+ *
+ * @note A stream specifier can match several streams in the format.
+ */
+int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st,
+ const char *spec);
+
+int avformat_queue_attached_pictures(AVFormatContext *s);
+
+
+/**
+ * @}
+ */
+
+#endif /* AVFORMAT_AVFORMAT_H */
diff --git a/Externals/ffmpeg/dev/include/libavformat/avio.h b/Externals/ffmpeg/dev/include/libavformat/avio.h
new file mode 100644
index 0000000000..b9b4017fb3
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavformat/avio.h
@@ -0,0 +1,528 @@
+/*
+ * copyright (c) 2001 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#ifndef AVFORMAT_AVIO_H
+#define AVFORMAT_AVIO_H
+
+/**
+ * @file
+ * @ingroup lavf_io
+ * Buffered I/O operations
+ */
+
+#include
+
+#include "libavutil/common.h"
+#include "libavutil/dict.h"
+#include "libavutil/log.h"
+
+#include "libavformat/version.h"
+
+
+#define AVIO_SEEKABLE_NORMAL 0x0001 /**< Seeking works like for a local file */
+
+/**
+ * Callback for checking whether to abort blocking functions.
+ * AVERROR_EXIT is returned in this case by the interrupted
+ * function. During blocking operations, callback is called with
+ * opaque as parameter. If the callback returns 1, the
+ * blocking operation will be aborted.
+ *
+ * No members can be added to this struct without a major bump, if
+ * new elements have been added after this struct in AVFormatContext
+ * or AVIOContext.
+ */
+typedef struct AVIOInterruptCB {
+ int (*callback)(void*);
+ void *opaque;
+} AVIOInterruptCB;
+
+/**
+ * Bytestream IO Context.
+ * New fields can be added to the end with minor version bumps.
+ * Removal, reordering and changes to existing fields require a major
+ * version bump.
+ * sizeof(AVIOContext) must not be used outside libav*.
+ *
+ * @note None of the function pointers in AVIOContext should be called
+ * directly, they should only be set by the client application
+ * when implementing custom I/O. Normally these are set to the
+ * function pointers specified in avio_alloc_context()
+ */
+typedef struct AVIOContext {
+ /**
+ * A class for private options.
+ *
+ * If this AVIOContext is created by avio_open2(), av_class is set and
+ * passes the options down to protocols.
+ *
+ * If this AVIOContext is manually allocated, then av_class may be set by
+ * the caller.
+ *
+ * warning -- this field can be NULL, be sure to not pass this AVIOContext
+ * to any av_opt_* functions in that case.
+ */
+ const AVClass *av_class;
+ unsigned char *buffer; /**< Start of the buffer. */
+ int buffer_size; /**< Maximum buffer size */
+ unsigned char *buf_ptr; /**< Current position in the buffer */
+ unsigned char *buf_end; /**< End of the data, may be less than
+ buffer+buffer_size if the read function returned
+ less data than requested, e.g. for streams where
+ no more data has been received yet. */
+ void *opaque; /**< A private pointer, passed to the read/write/seek/...
+ functions. */
+ int (*read_packet)(void *opaque, uint8_t *buf, int buf_size);
+ int (*write_packet)(void *opaque, uint8_t *buf, int buf_size);
+ int64_t (*seek)(void *opaque, int64_t offset, int whence);
+ int64_t pos; /**< position in the file of the current buffer */
+ int must_flush; /**< true if the next seek should flush */
+ int eof_reached; /**< true if eof reached */
+ int write_flag; /**< true if open for writing */
+ int max_packet_size;
+ unsigned long checksum;
+ unsigned char *checksum_ptr;
+ unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size);
+ int error; /**< contains the error code or 0 if no error happened */
+ /**
+ * Pause or resume playback for network streaming protocols - e.g. MMS.
+ */
+ int (*read_pause)(void *opaque, int pause);
+ /**
+ * Seek to a given timestamp in stream with the specified stream_index.
+ * Needed for some network streaming protocols which don't support seeking
+ * to byte position.
+ */
+ int64_t (*read_seek)(void *opaque, int stream_index,
+ int64_t timestamp, int flags);
+ /**
+ * A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable.
+ */
+ int seekable;
+
+ /**
+ * max filesize, used to limit allocations
+ * This field is internal to libavformat and access from outside is not allowed.
+ */
+ int64_t maxsize;
+
+ /**
+ * avio_read and avio_write should if possible be satisfied directly
+ * instead of going through a buffer, and avio_seek will always
+ * call the underlying seek function directly.
+ */
+ int direct;
+
+ /**
+ * Bytes read statistic
+ * This field is internal to libavformat and access from outside is not allowed.
+ */
+ int64_t bytes_read;
+
+ /**
+ * seek statistic
+ * This field is internal to libavformat and access from outside is not allowed.
+ */
+ int seek_count;
+
+ /**
+ * writeout statistic
+ * This field is internal to libavformat and access from outside is not allowed.
+ */
+ int writeout_count;
+
+ /**
+ * Original buffer size
+ * used internally after probing and ensure seekback to reset the buffer size
+ * This field is internal to libavformat and access from outside is not allowed.
+ */
+ int orig_buffer_size;
+} AVIOContext;
+
+/* unbuffered I/O */
+
+/**
+ * Return the name of the protocol that will handle the passed URL.
+ *
+ * NULL is returned if no protocol could be found for the given URL.
+ *
+ * @return Name of the protocol or NULL.
+ */
+const char *avio_find_protocol_name(const char *url);
+
+/**
+ * Return AVIO_FLAG_* access flags corresponding to the access permissions
+ * of the resource in url, or a negative value corresponding to an
+ * AVERROR code in case of failure. The returned access flags are
+ * masked by the value in flags.
+ *
+ * @note This function is intrinsically unsafe, in the sense that the
+ * checked resource may change its existence or permission status from
+ * one call to another. Thus you should not trust the returned value,
+ * unless you are sure that no other processes are accessing the
+ * checked resource.
+ */
+int avio_check(const char *url, int flags);
+
+/**
+ * Allocate and initialize an AVIOContext for buffered I/O. It must be later
+ * freed with av_free().
+ *
+ * @param buffer Memory block for input/output operations via AVIOContext.
+ * The buffer must be allocated with av_malloc() and friends.
+ * It may be freed and replaced with a new buffer by libavformat.
+ * AVIOContext.buffer holds the buffer currently in use,
+ * which must be later freed with av_free().
+ * @param buffer_size The buffer size is very important for performance.
+ * For protocols with fixed blocksize it should be set to this blocksize.
+ * For others a typical size is a cache page, e.g. 4kb.
+ * @param write_flag Set to 1 if the buffer should be writable, 0 otherwise.
+ * @param opaque An opaque pointer to user-specific data.
+ * @param read_packet A function for refilling the buffer, may be NULL.
+ * @param write_packet A function for writing the buffer contents, may be NULL.
+ * The function may not change the input buffers content.
+ * @param seek A function for seeking to specified byte position, may be NULL.
+ *
+ * @return Allocated AVIOContext or NULL on failure.
+ */
+AVIOContext *avio_alloc_context(
+ unsigned char *buffer,
+ int buffer_size,
+ int write_flag,
+ void *opaque,
+ int (*read_packet)(void *opaque, uint8_t *buf, int buf_size),
+ int (*write_packet)(void *opaque, uint8_t *buf, int buf_size),
+ int64_t (*seek)(void *opaque, int64_t offset, int whence));
+
+void avio_w8(AVIOContext *s, int b);
+void avio_write(AVIOContext *s, const unsigned char *buf, int size);
+void avio_wl64(AVIOContext *s, uint64_t val);
+void avio_wb64(AVIOContext *s, uint64_t val);
+void avio_wl32(AVIOContext *s, unsigned int val);
+void avio_wb32(AVIOContext *s, unsigned int val);
+void avio_wl24(AVIOContext *s, unsigned int val);
+void avio_wb24(AVIOContext *s, unsigned int val);
+void avio_wl16(AVIOContext *s, unsigned int val);
+void avio_wb16(AVIOContext *s, unsigned int val);
+
+/**
+ * Write a NULL-terminated string.
+ * @return number of bytes written.
+ */
+int avio_put_str(AVIOContext *s, const char *str);
+
+/**
+ * Convert an UTF-8 string to UTF-16LE and write it.
+ * @return number of bytes written.
+ */
+int avio_put_str16le(AVIOContext *s, const char *str);
+
+/**
+ * Passing this as the "whence" parameter to a seek function causes it to
+ * return the filesize without seeking anywhere. Supporting this is optional.
+ * If it is not supported then the seek function will return <0.
+ */
+#define AVSEEK_SIZE 0x10000
+
+/**
+ * Oring this flag as into the "whence" parameter to a seek function causes it to
+ * seek by any means (like reopening and linear reading) or other normally unreasonable
+ * means that can be extremely slow.
+ * This may be ignored by the seek code.
+ */
+#define AVSEEK_FORCE 0x20000
+
+/**
+ * fseek() equivalent for AVIOContext.
+ * @return new position or AVERROR.
+ */
+int64_t avio_seek(AVIOContext *s, int64_t offset, int whence);
+
+/**
+ * Skip given number of bytes forward
+ * @return new position or AVERROR.
+ */
+int64_t avio_skip(AVIOContext *s, int64_t offset);
+
+/**
+ * ftell() equivalent for AVIOContext.
+ * @return position or AVERROR.
+ */
+static av_always_inline int64_t avio_tell(AVIOContext *s)
+{
+ return avio_seek(s, 0, SEEK_CUR);
+}
+
+/**
+ * Get the filesize.
+ * @return filesize or AVERROR
+ */
+int64_t avio_size(AVIOContext *s);
+
+/**
+ * feof() equivalent for AVIOContext.
+ * @return non zero if and only if end of file
+ */
+int avio_feof(AVIOContext *s);
+#if FF_API_URL_FEOF
+/**
+ * @deprecated use avio_feof()
+ */
+attribute_deprecated
+int url_feof(AVIOContext *s);
+#endif
+
+/** @warning currently size is limited */
+int avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3);
+
+/**
+ * Force flushing of buffered data.
+ *
+ * For write streams, force the buffered data to be immediately written to the output,
+ * without to wait to fill the internal buffer.
+ *
+ * For read streams, discard all currently buffered data, and advance the
+ * reported file position to that of the underlying stream. This does not
+ * read new data, and does not perform any seeks.
+ */
+void avio_flush(AVIOContext *s);
+
+/**
+ * Read size bytes from AVIOContext into buf.
+ * @return number of bytes read or AVERROR
+ */
+int avio_read(AVIOContext *s, unsigned char *buf, int size);
+
+/**
+ * @name Functions for reading from AVIOContext
+ * @{
+ *
+ * @note return 0 if EOF, so you cannot use it if EOF handling is
+ * necessary
+ */
+int avio_r8 (AVIOContext *s);
+unsigned int avio_rl16(AVIOContext *s);
+unsigned int avio_rl24(AVIOContext *s);
+unsigned int avio_rl32(AVIOContext *s);
+uint64_t avio_rl64(AVIOContext *s);
+unsigned int avio_rb16(AVIOContext *s);
+unsigned int avio_rb24(AVIOContext *s);
+unsigned int avio_rb32(AVIOContext *s);
+uint64_t avio_rb64(AVIOContext *s);
+/**
+ * @}
+ */
+
+/**
+ * Read a string from pb into buf. The reading will terminate when either
+ * a NULL character was encountered, maxlen bytes have been read, or nothing
+ * more can be read from pb. The result is guaranteed to be NULL-terminated, it
+ * will be truncated if buf is too small.
+ * Note that the string is not interpreted or validated in any way, it
+ * might get truncated in the middle of a sequence for multi-byte encodings.
+ *
+ * @return number of bytes read (is always <= maxlen).
+ * If reading ends on EOF or error, the return value will be one more than
+ * bytes actually read.
+ */
+int avio_get_str(AVIOContext *pb, int maxlen, char *buf, int buflen);
+
+/**
+ * Read a UTF-16 string from pb and convert it to UTF-8.
+ * The reading will terminate when either a null or invalid character was
+ * encountered or maxlen bytes have been read.
+ * @return number of bytes read (is always <= maxlen)
+ */
+int avio_get_str16le(AVIOContext *pb, int maxlen, char *buf, int buflen);
+int avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen);
+
+
+/**
+ * @name URL open modes
+ * The flags argument to avio_open must be one of the following
+ * constants, optionally ORed with other flags.
+ * @{
+ */
+#define AVIO_FLAG_READ 1 /**< read-only */
+#define AVIO_FLAG_WRITE 2 /**< write-only */
+#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE) /**< read-write pseudo flag */
+/**
+ * @}
+ */
+
+/**
+ * Use non-blocking mode.
+ * If this flag is set, operations on the context will return
+ * AVERROR(EAGAIN) if they can not be performed immediately.
+ * If this flag is not set, operations on the context will never return
+ * AVERROR(EAGAIN).
+ * Note that this flag does not affect the opening/connecting of the
+ * context. Connecting a protocol will always block if necessary (e.g. on
+ * network protocols) but never hang (e.g. on busy devices).
+ * Warning: non-blocking protocols is work-in-progress; this flag may be
+ * silently ignored.
+ */
+#define AVIO_FLAG_NONBLOCK 8
+
+/**
+ * Use direct mode.
+ * avio_read and avio_write should if possible be satisfied directly
+ * instead of going through a buffer, and avio_seek will always
+ * call the underlying seek function directly.
+ */
+#define AVIO_FLAG_DIRECT 0x8000
+
+/**
+ * Create and initialize a AVIOContext for accessing the
+ * resource indicated by url.
+ * @note When the resource indicated by url has been opened in
+ * read+write mode, the AVIOContext can be used only for writing.
+ *
+ * @param s Used to return the pointer to the created AVIOContext.
+ * In case of failure the pointed to value is set to NULL.
+ * @param url resource to access
+ * @param flags flags which control how the resource indicated by url
+ * is to be opened
+ * @return >= 0 in case of success, a negative value corresponding to an
+ * AVERROR code in case of failure
+ */
+int avio_open(AVIOContext **s, const char *url, int flags);
+
+/**
+ * Create and initialize a AVIOContext for accessing the
+ * resource indicated by url.
+ * @note When the resource indicated by url has been opened in
+ * read+write mode, the AVIOContext can be used only for writing.
+ *
+ * @param s Used to return the pointer to the created AVIOContext.
+ * In case of failure the pointed to value is set to NULL.
+ * @param url resource to access
+ * @param flags flags which control how the resource indicated by url
+ * is to be opened
+ * @param int_cb an interrupt callback to be used at the protocols level
+ * @param options A dictionary filled with protocol-private options. On return
+ * this parameter will be destroyed and replaced with a dict containing options
+ * that were not found. May be NULL.
+ * @return >= 0 in case of success, a negative value corresponding to an
+ * AVERROR code in case of failure
+ */
+int avio_open2(AVIOContext **s, const char *url, int flags,
+ const AVIOInterruptCB *int_cb, AVDictionary **options);
+
+/**
+ * Close the resource accessed by the AVIOContext s and free it.
+ * This function can only be used if s was opened by avio_open().
+ *
+ * The internal buffer is automatically flushed before closing the
+ * resource.
+ *
+ * @return 0 on success, an AVERROR < 0 on error.
+ * @see avio_closep
+ */
+int avio_close(AVIOContext *s);
+
+/**
+ * Close the resource accessed by the AVIOContext *s, free it
+ * and set the pointer pointing to it to NULL.
+ * This function can only be used if s was opened by avio_open().
+ *
+ * The internal buffer is automatically flushed before closing the
+ * resource.
+ *
+ * @return 0 on success, an AVERROR < 0 on error.
+ * @see avio_close
+ */
+int avio_closep(AVIOContext **s);
+
+
+/**
+ * Open a write only memory stream.
+ *
+ * @param s new IO context
+ * @return zero if no error.
+ */
+int avio_open_dyn_buf(AVIOContext **s);
+
+/**
+ * Return the written size and a pointer to the buffer. The buffer
+ * must be freed with av_free().
+ * Padding of FF_INPUT_BUFFER_PADDING_SIZE is added to the buffer.
+ *
+ * @param s IO context
+ * @param pbuffer pointer to a byte buffer
+ * @return the length of the byte buffer
+ */
+int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer);
+
+/**
+ * Iterate through names of available protocols.
+ *
+ * @param opaque A private pointer representing current protocol.
+ * It must be a pointer to NULL on first iteration and will
+ * be updated by successive calls to avio_enum_protocols.
+ * @param output If set to 1, iterate over output protocols,
+ * otherwise over input protocols.
+ *
+ * @return A static string containing the name of current protocol or NULL
+ */
+const char *avio_enum_protocols(void **opaque, int output);
+
+/**
+ * Pause and resume playing - only meaningful if using a network streaming
+ * protocol (e.g. MMS).
+ *
+ * @param h IO context from which to call the read_pause function pointer
+ * @param pause 1 for pause, 0 for resume
+ */
+int avio_pause(AVIOContext *h, int pause);
+
+/**
+ * Seek to a given timestamp relative to some component stream.
+ * Only meaningful if using a network streaming protocol (e.g. MMS.).
+ *
+ * @param h IO context from which to call the seek function pointers
+ * @param stream_index The stream index that the timestamp is relative to.
+ * If stream_index is (-1) the timestamp should be in AV_TIME_BASE
+ * units from the beginning of the presentation.
+ * If a stream_index >= 0 is used and the protocol does not support
+ * seeking based on component streams, the call will fail.
+ * @param timestamp timestamp in AVStream.time_base units
+ * or if there is no stream specified then in AV_TIME_BASE units.
+ * @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE
+ * and AVSEEK_FLAG_ANY. The protocol may silently ignore
+ * AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will
+ * fail if used and not supported.
+ * @return >= 0 on success
+ * @see AVInputFormat::read_seek
+ */
+int64_t avio_seek_time(AVIOContext *h, int stream_index,
+ int64_t timestamp, int flags);
+
+/* Avoid a warning. The header can not be included because it breaks c++. */
+struct AVBPrint;
+
+/**
+ * Read contents of h into print buffer, up to max_size bytes, or up to EOF.
+ *
+ * @return 0 for success (max_size bytes read or EOF reached), negative error
+ * code otherwise
+ */
+int avio_read_to_bprint(AVIOContext *h, struct AVBPrint *pb, size_t max_size);
+
+#endif /* AVFORMAT_AVIO_H */
diff --git a/Externals/ffmpeg/dev/include/libavformat/version.h b/Externals/ffmpeg/dev/include/libavformat/version.h
new file mode 100644
index 0000000000..5e449bb510
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavformat/version.h
@@ -0,0 +1,67 @@
+/*
+ * Version macros.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFORMAT_VERSION_H
+#define AVFORMAT_VERSION_H
+
+/**
+ * @file
+ * @ingroup libavf
+ * Libavformat version macros
+ */
+
+#include "libavutil/version.h"
+
+#define LIBAVFORMAT_VERSION_MAJOR 56
+#define LIBAVFORMAT_VERSION_MINOR 18
+#define LIBAVFORMAT_VERSION_MICRO 101
+
+#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \
+ LIBAVFORMAT_VERSION_MINOR, \
+ LIBAVFORMAT_VERSION_MICRO)
+#define LIBAVFORMAT_VERSION AV_VERSION(LIBAVFORMAT_VERSION_MAJOR, \
+ LIBAVFORMAT_VERSION_MINOR, \
+ LIBAVFORMAT_VERSION_MICRO)
+#define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT
+
+#define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION)
+
+/**
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ */
+#ifndef FF_API_LAVF_BITEXACT
+#define FF_API_LAVF_BITEXACT (LIBAVFORMAT_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_LAVF_FRAC
+#define FF_API_LAVF_FRAC (LIBAVFORMAT_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_LAVF_CODEC_TB
+#define FF_API_LAVF_CODEC_TB (LIBAVFORMAT_VERSION_MAJOR < 57)
+#endif
+#ifndef FF_API_URL_FEOF
+#define FF_API_URL_FEOF (LIBAVFORMAT_VERSION_MAJOR < 57)
+#endif
+
+#ifndef FF_API_R_FRAME_RATE
+#define FF_API_R_FRAME_RATE 1
+#endif
+#endif /* AVFORMAT_VERSION_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/adler32.h b/Externals/ffmpeg/dev/include/libavutil/adler32.h
new file mode 100644
index 0000000000..0dc69ec0a8
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/adler32.h
@@ -0,0 +1,55 @@
+/*
+ * copyright (c) 2006 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_ADLER32_H
+#define AVUTIL_ADLER32_H
+
+#include
+#include "attributes.h"
+
+/**
+ * @file
+ * Public header for libavutil Adler32 hasher
+ *
+ * @defgroup lavu_adler32 Adler32
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+/**
+ * Calculate the Adler32 checksum of a buffer.
+ *
+ * Passing the return value to a subsequent av_adler32_update() call
+ * allows the checksum of multiple buffers to be calculated as though
+ * they were concatenated.
+ *
+ * @param adler initial checksum value
+ * @param buf pointer to input buffer
+ * @param len size of input buffer
+ * @return updated checksum
+ */
+unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf,
+ unsigned int len) av_pure;
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_ADLER32_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/aes.h b/Externals/ffmpeg/dev/include/libavutil/aes.h
new file mode 100644
index 0000000000..09efbda107
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/aes.h
@@ -0,0 +1,65 @@
+/*
+ * copyright (c) 2007 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AES_H
+#define AVUTIL_AES_H
+
+#include
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_aes AES
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_aes_size;
+
+struct AVAES;
+
+/**
+ * Allocate an AVAES context.
+ */
+struct AVAES *av_aes_alloc(void);
+
+/**
+ * Initialize an AVAES context.
+ * @param key_bits 128, 192 or 256
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+int av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ * @param count number of 16 byte blocks
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param iv initialization vector for CBC mode, if NULL then ECB will be used
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_AES_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/attributes.h b/Externals/ffmpeg/dev/include/libavutil/attributes.h
new file mode 100644
index 0000000000..7d3f4a91cf
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/attributes.h
@@ -0,0 +1,160 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Macro definitions for various function/variable attributes
+ */
+
+#ifndef AVUTIL_ATTRIBUTES_H
+#define AVUTIL_ATTRIBUTES_H
+
+#ifdef __GNUC__
+# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > x || __GNUC__ == x && __GNUC_MINOR__ >= y)
+#else
+# define AV_GCC_VERSION_AT_LEAST(x,y) 0
+#endif
+
+#ifndef av_always_inline
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_always_inline __attribute__((always_inline)) inline
+#elif defined(_MSC_VER)
+# define av_always_inline __forceinline
+#else
+# define av_always_inline inline
+#endif
+#endif
+
+#ifndef av_extern_inline
+#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__)
+# define av_extern_inline extern inline
+#else
+# define av_extern_inline inline
+#endif
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_noinline __attribute__((noinline))
+#elif defined(_MSC_VER)
+# define av_noinline __declspec(noinline)
+#else
+# define av_noinline
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_pure __attribute__((pure))
+#else
+# define av_pure
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2,6)
+# define av_const __attribute__((const))
+#else
+# define av_const
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,3)
+# define av_cold __attribute__((cold))
+#else
+# define av_cold
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,1) && !defined(__llvm__)
+# define av_flatten __attribute__((flatten))
+#else
+# define av_flatten
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define attribute_deprecated __attribute__((deprecated))
+#elif defined(_MSC_VER)
+# define attribute_deprecated __declspec(deprecated)
+#else
+# define attribute_deprecated
+#endif
+
+/**
+ * Disable warnings about deprecated features
+ * This is useful for sections of code kept for backward compatibility and
+ * scheduled for removal.
+ */
+#ifndef AV_NOWARN_DEPRECATED
+#if AV_GCC_VERSION_AT_LEAST(4,6)
+# define AV_NOWARN_DEPRECATED(code) \
+ _Pragma("GCC diagnostic push") \
+ _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \
+ code \
+ _Pragma("GCC diagnostic pop")
+#elif defined(_MSC_VER)
+# define AV_NOWARN_DEPRECATED(code) \
+ __pragma(warning(push)) \
+ __pragma(warning(disable : 4996)) \
+ code; \
+ __pragma(warning(pop))
+#else
+# define AV_NOWARN_DEPRECATED(code) code
+#endif
+#endif
+
+
+#if defined(__GNUC__)
+# define av_unused __attribute__((unused))
+#else
+# define av_unused
+#endif
+
+/**
+ * Mark a variable as used and prevent the compiler from optimizing it
+ * away. This is useful for variables accessed only from inline
+ * assembler without the compiler being aware.
+ */
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+# define av_used __attribute__((used))
+#else
+# define av_used
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,3)
+# define av_alias __attribute__((may_alias))
+#else
+# define av_alias
+#endif
+
+#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__)
+# define av_uninit(x) x=x
+#else
+# define av_uninit(x) x
+#endif
+
+#ifdef __GNUC__
+# define av_builtin_constant_p __builtin_constant_p
+# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos)))
+#else
+# define av_builtin_constant_p(x) 0
+# define av_printf_format(fmtpos, attrpos)
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(2,5)
+# define av_noreturn __attribute__((noreturn))
+#else
+# define av_noreturn
+#endif
+
+#endif /* AVUTIL_ATTRIBUTES_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/audio_fifo.h b/Externals/ffmpeg/dev/include/libavutil/audio_fifo.h
new file mode 100644
index 0000000000..d21e6a1318
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/audio_fifo.h
@@ -0,0 +1,153 @@
+/*
+ * Audio FIFO
+ * Copyright (c) 2012 Justin Ruggles
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Audio FIFO Buffer
+ */
+
+#ifndef AVUTIL_AUDIO_FIFO_H
+#define AVUTIL_AUDIO_FIFO_H
+
+#include "avutil.h"
+#include "fifo.h"
+#include "samplefmt.h"
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ *
+ * @defgroup lavu_audiofifo Audio FIFO Buffer
+ * @{
+ */
+
+/**
+ * Context for an Audio FIFO Buffer.
+ *
+ * - Operates at the sample level rather than the byte level.
+ * - Supports multiple channels with either planar or packed sample format.
+ * - Automatic reallocation when writing to a full buffer.
+ */
+typedef struct AVAudioFifo AVAudioFifo;
+
+/**
+ * Free an AVAudioFifo.
+ *
+ * @param af AVAudioFifo to free
+ */
+void av_audio_fifo_free(AVAudioFifo *af);
+
+/**
+ * Allocate an AVAudioFifo.
+ *
+ * @param sample_fmt sample format
+ * @param channels number of channels
+ * @param nb_samples initial allocation size, in samples
+ * @return newly allocated AVAudioFifo, or NULL on error
+ */
+AVAudioFifo *av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels,
+ int nb_samples);
+
+/**
+ * Reallocate an AVAudioFifo.
+ *
+ * @param af AVAudioFifo to reallocate
+ * @param nb_samples new allocation size, in samples
+ * @return 0 if OK, or negative AVERROR code on failure
+ */
+int av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples);
+
+/**
+ * Write data to an AVAudioFifo.
+ *
+ * The AVAudioFifo will be reallocated automatically if the available space
+ * is less than nb_samples.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param af AVAudioFifo to write to
+ * @param data audio data plane pointers
+ * @param nb_samples number of samples to write
+ * @return number of samples actually written, or negative AVERROR
+ * code on failure. If successful, the number of samples
+ * actually written will always be nb_samples.
+ */
+int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples);
+
+/**
+ * Read data from an AVAudioFifo.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param af AVAudioFifo to read from
+ * @param data audio data plane pointers
+ * @param nb_samples number of samples to read
+ * @return number of samples actually read, or negative AVERROR code
+ * on failure. The number of samples actually read will not
+ * be greater than nb_samples, and will only be less than
+ * nb_samples if av_audio_fifo_size is less than nb_samples.
+ */
+int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples);
+
+/**
+ * Drain data from an AVAudioFifo.
+ *
+ * Removes the data without reading it.
+ *
+ * @param af AVAudioFifo to drain
+ * @param nb_samples number of samples to drain
+ * @return 0 if OK, or negative AVERROR code on failure
+ */
+int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples);
+
+/**
+ * Reset the AVAudioFifo buffer.
+ *
+ * This empties all data in the buffer.
+ *
+ * @param af AVAudioFifo to reset
+ */
+void av_audio_fifo_reset(AVAudioFifo *af);
+
+/**
+ * Get the current number of samples in the AVAudioFifo available for reading.
+ *
+ * @param af the AVAudioFifo to query
+ * @return number of samples available for reading
+ */
+int av_audio_fifo_size(AVAudioFifo *af);
+
+/**
+ * Get the current number of samples in the AVAudioFifo available for writing.
+ *
+ * @param af the AVAudioFifo to query
+ * @return number of samples available for writing
+ */
+int av_audio_fifo_space(AVAudioFifo *af);
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_AUDIO_FIFO_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/audioconvert.h b/Externals/ffmpeg/dev/include/libavutil/audioconvert.h
new file mode 100644
index 0000000000..300a67cd3d
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/audioconvert.h
@@ -0,0 +1,6 @@
+
+#include "version.h"
+
+#if FF_API_AUDIOCONVERT
+#include "channel_layout.h"
+#endif
diff --git a/Externals/ffmpeg/dev/include/libavutil/avassert.h b/Externals/ffmpeg/dev/include/libavutil/avassert.h
new file mode 100644
index 0000000000..41f5e0eea7
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/avassert.h
@@ -0,0 +1,66 @@
+/*
+ * copyright (c) 2010 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple assert() macros that are a bit more flexible than ISO C assert().
+ * @author Michael Niedermayer
+ */
+
+#ifndef AVUTIL_AVASSERT_H
+#define AVUTIL_AVASSERT_H
+
+#include
+#include "avutil.h"
+#include "log.h"
+
+/**
+ * assert() equivalent, that is always enabled.
+ */
+#define av_assert0(cond) do { \
+ if (!(cond)) { \
+ av_log(NULL, AV_LOG_PANIC, "Assertion %s failed at %s:%d\n", \
+ AV_STRINGIFY(cond), __FILE__, __LINE__); \
+ abort(); \
+ } \
+} while (0)
+
+
+/**
+ * assert() equivalent, that does not lie in speed critical code.
+ * These asserts() thus can be enabled without fearing speedloss.
+ */
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0
+#define av_assert1(cond) av_assert0(cond)
+#else
+#define av_assert1(cond) ((void)0)
+#endif
+
+
+/**
+ * assert() equivalent, that does lie in speed critical code.
+ */
+#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1
+#define av_assert2(cond) av_assert0(cond)
+#else
+#define av_assert2(cond) ((void)0)
+#endif
+
+#endif /* AVUTIL_AVASSERT_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/avconfig.h b/Externals/ffmpeg/dev/include/libavutil/avconfig.h
new file mode 100644
index 0000000000..36a8cd14da
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/avconfig.h
@@ -0,0 +1,7 @@
+/* Generated by ffconf */
+#ifndef AVUTIL_AVCONFIG_H
+#define AVUTIL_AVCONFIG_H
+#define AV_HAVE_BIGENDIAN 0
+#define AV_HAVE_FAST_UNALIGNED 1
+#define AV_HAVE_INCOMPATIBLE_LIBAV_ABI 0
+#endif /* AVUTIL_AVCONFIG_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/avstring.h b/Externals/ffmpeg/dev/include/libavutil/avstring.h
new file mode 100644
index 0000000000..ffb7aa6bfa
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/avstring.h
@@ -0,0 +1,371 @@
+/*
+ * Copyright (c) 2007 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AVSTRING_H
+#define AVUTIL_AVSTRING_H
+
+#include
+#include
+#include "attributes.h"
+
+/**
+ * @addtogroup lavu_string
+ * @{
+ */
+
+/**
+ * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to
+ * the address of the first character in str after the prefix.
+ *
+ * @param str input string
+ * @param pfx prefix to test
+ * @param ptr updated if the prefix is matched inside str
+ * @return non-zero if the prefix matches, zero otherwise
+ */
+int av_strstart(const char *str, const char *pfx, const char **ptr);
+
+/**
+ * Return non-zero if pfx is a prefix of str independent of case. If
+ * it is, *ptr is set to the address of the first character in str
+ * after the prefix.
+ *
+ * @param str input string
+ * @param pfx prefix to test
+ * @param ptr updated if the prefix is matched inside str
+ * @return non-zero if the prefix matches, zero otherwise
+ */
+int av_stristart(const char *str, const char *pfx, const char **ptr);
+
+/**
+ * Locate the first case-independent occurrence in the string haystack
+ * of the string needle. A zero-length string needle is considered to
+ * match at the start of haystack.
+ *
+ * This function is a case-insensitive version of the standard strstr().
+ *
+ * @param haystack string to search in
+ * @param needle string to search for
+ * @return pointer to the located match within haystack
+ * or a null pointer if no match
+ */
+char *av_stristr(const char *haystack, const char *needle);
+
+/**
+ * Locate the first occurrence of the string needle in the string haystack
+ * where not more than hay_length characters are searched. A zero-length
+ * string needle is considered to match at the start of haystack.
+ *
+ * This function is a length-limited version of the standard strstr().
+ *
+ * @param haystack string to search in
+ * @param needle string to search for
+ * @param hay_length length of string to search in
+ * @return pointer to the located match within haystack
+ * or a null pointer if no match
+ */
+char *av_strnstr(const char *haystack, const char *needle, size_t hay_length);
+
+/**
+ * Copy the string src to dst, but no more than size - 1 bytes, and
+ * null-terminate dst.
+ *
+ * This function is the same as BSD strlcpy().
+ *
+ * @param dst destination buffer
+ * @param src source string
+ * @param size size of destination buffer
+ * @return the length of src
+ *
+ * @warning since the return value is the length of src, src absolutely
+ * _must_ be a properly 0-terminated string, otherwise this will read beyond
+ * the end of the buffer and possibly crash.
+ */
+size_t av_strlcpy(char *dst, const char *src, size_t size);
+
+/**
+ * Append the string src to the string dst, but to a total length of
+ * no more than size - 1 bytes, and null-terminate dst.
+ *
+ * This function is similar to BSD strlcat(), but differs when
+ * size <= strlen(dst).
+ *
+ * @param dst destination buffer
+ * @param src source string
+ * @param size size of destination buffer
+ * @return the total length of src and dst
+ *
+ * @warning since the return value use the length of src and dst, these
+ * absolutely _must_ be a properly 0-terminated strings, otherwise this
+ * will read beyond the end of the buffer and possibly crash.
+ */
+size_t av_strlcat(char *dst, const char *src, size_t size);
+
+/**
+ * Append output to a string, according to a format. Never write out of
+ * the destination buffer, and always put a terminating 0 within
+ * the buffer.
+ * @param dst destination buffer (string to which the output is
+ * appended)
+ * @param size total size of the destination buffer
+ * @param fmt printf-compatible format string, specifying how the
+ * following parameters are used
+ * @return the length of the string that would have been generated
+ * if enough space had been available
+ */
+size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4);
+
+/**
+ * Get the count of continuous non zero chars starting from the beginning.
+ *
+ * @param len maximum number of characters to check in the string, that
+ * is the maximum value which is returned by the function
+ */
+static inline size_t av_strnlen(const char *s, size_t len)
+{
+ size_t i;
+ for (i = 0; i < len && s[i]; i++)
+ ;
+ return i;
+}
+
+/**
+ * Print arguments following specified format into a large enough auto
+ * allocated buffer. It is similar to GNU asprintf().
+ * @param fmt printf-compatible format string, specifying how the
+ * following parameters are used.
+ * @return the allocated string
+ * @note You have to free the string yourself with av_free().
+ */
+char *av_asprintf(const char *fmt, ...) av_printf_format(1, 2);
+
+/**
+ * Convert a number to a av_malloced string.
+ */
+char *av_d2str(double d);
+
+/**
+ * Unescape the given string until a non escaped terminating char,
+ * and return the token corresponding to the unescaped string.
+ *
+ * The normal \ and ' escaping is supported. Leading and trailing
+ * whitespaces are removed, unless they are escaped with '\' or are
+ * enclosed between ''.
+ *
+ * @param buf the buffer to parse, buf will be updated to point to the
+ * terminating char
+ * @param term a 0-terminated list of terminating chars
+ * @return the malloced unescaped string, which must be av_freed by
+ * the user, NULL in case of allocation failure
+ */
+char *av_get_token(const char **buf, const char *term);
+
+/**
+ * Split the string into several tokens which can be accessed by
+ * successive calls to av_strtok().
+ *
+ * A token is defined as a sequence of characters not belonging to the
+ * set specified in delim.
+ *
+ * On the first call to av_strtok(), s should point to the string to
+ * parse, and the value of saveptr is ignored. In subsequent calls, s
+ * should be NULL, and saveptr should be unchanged since the previous
+ * call.
+ *
+ * This function is similar to strtok_r() defined in POSIX.1.
+ *
+ * @param s the string to parse, may be NULL
+ * @param delim 0-terminated list of token delimiters, must be non-NULL
+ * @param saveptr user-provided pointer which points to stored
+ * information necessary for av_strtok() to continue scanning the same
+ * string. saveptr is updated to point to the next character after the
+ * first delimiter found, or to NULL if the string was terminated
+ * @return the found token, or NULL when no token is found
+ */
+char *av_strtok(char *s, const char *delim, char **saveptr);
+
+/**
+ * Locale-independent conversion of ASCII isdigit.
+ */
+av_const int av_isdigit(int c);
+
+/**
+ * Locale-independent conversion of ASCII isgraph.
+ */
+av_const int av_isgraph(int c);
+
+/**
+ * Locale-independent conversion of ASCII isspace.
+ */
+av_const int av_isspace(int c);
+
+/**
+ * Locale-independent conversion of ASCII characters to uppercase.
+ */
+static inline av_const int av_toupper(int c)
+{
+ if (c >= 'a' && c <= 'z')
+ c ^= 0x20;
+ return c;
+}
+
+/**
+ * Locale-independent conversion of ASCII characters to lowercase.
+ */
+static inline av_const int av_tolower(int c)
+{
+ if (c >= 'A' && c <= 'Z')
+ c ^= 0x20;
+ return c;
+}
+
+/**
+ * Locale-independent conversion of ASCII isxdigit.
+ */
+av_const int av_isxdigit(int c);
+
+/**
+ * Locale-independent case-insensitive compare.
+ * @note This means only ASCII-range characters are case-insensitive
+ */
+int av_strcasecmp(const char *a, const char *b);
+
+/**
+ * Locale-independent case-insensitive compare.
+ * @note This means only ASCII-range characters are case-insensitive
+ */
+int av_strncasecmp(const char *a, const char *b, size_t n);
+
+
+/**
+ * Thread safe basename.
+ * @param path the path, on DOS both \ and / are considered separators.
+ * @return pointer to the basename substring.
+ */
+const char *av_basename(const char *path);
+
+/**
+ * Thread safe dirname.
+ * @param path the path, on DOS both \ and / are considered separators.
+ * @return the path with the separator replaced by the string terminator or ".".
+ * @note the function may change the input string.
+ */
+const char *av_dirname(char *path);
+
+/**
+ * Match instances of a name in a comma-separated list of names.
+ * @param name Name to look for.
+ * @param names List of names.
+ * @return 1 on match, 0 otherwise.
+ */
+int av_match_name(const char *name, const char *names);
+
+enum AVEscapeMode {
+ AV_ESCAPE_MODE_AUTO, ///< Use auto-selected escaping mode.
+ AV_ESCAPE_MODE_BACKSLASH, ///< Use backslash escaping.
+ AV_ESCAPE_MODE_QUOTE, ///< Use single-quote escaping.
+};
+
+/**
+ * Consider spaces special and escape them even in the middle of the
+ * string.
+ *
+ * This is equivalent to adding the whitespace characters to the special
+ * characters lists, except it is guaranteed to use the exact same list
+ * of whitespace characters as the rest of libavutil.
+ */
+#define AV_ESCAPE_FLAG_WHITESPACE 0x01
+
+/**
+ * Escape only specified special characters.
+ * Without this flag, escape also any characters that may be considered
+ * special by av_get_token(), such as the single quote.
+ */
+#define AV_ESCAPE_FLAG_STRICT 0x02
+
+/**
+ * Escape string in src, and put the escaped string in an allocated
+ * string in *dst, which must be freed with av_free().
+ *
+ * @param dst pointer where an allocated string is put
+ * @param src string to escape, must be non-NULL
+ * @param special_chars string containing the special characters which
+ * need to be escaped, can be NULL
+ * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros.
+ * Any unknown value for mode will be considered equivalent to
+ * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without
+ * notice.
+ * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_ macros
+ * @return the length of the allocated string, or a negative error code in case of error
+ * @see av_bprint_escape()
+ */
+int av_escape(char **dst, const char *src, const char *special_chars,
+ enum AVEscapeMode mode, int flags);
+
+#define AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES 1 ///< accept codepoints over 0x10FFFF
+#define AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS 2 ///< accept non-characters - 0xFFFE and 0xFFFF
+#define AV_UTF8_FLAG_ACCEPT_SURROGATES 4 ///< accept UTF-16 surrogates codes
+#define AV_UTF8_FLAG_EXCLUDE_XML_INVALID_CONTROL_CODES 8 ///< exclude control codes not accepted by XML
+
+#define AV_UTF8_FLAG_ACCEPT_ALL \
+ AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES|AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS|AV_UTF8_FLAG_ACCEPT_SURROGATES
+
+/**
+ * Read and decode a single UTF-8 code point (character) from the
+ * buffer in *buf, and update *buf to point to the next byte to
+ * decode.
+ *
+ * In case of an invalid byte sequence, the pointer will be updated to
+ * the next byte after the invalid sequence and the function will
+ * return an error code.
+ *
+ * Depending on the specified flags, the function will also fail in
+ * case the decoded code point does not belong to a valid range.
+ *
+ * @note For speed-relevant code a carefully implemented use of
+ * GET_UTF8() may be preferred.
+ *
+ * @param codep pointer used to return the parsed code in case of success.
+ * The value in *codep is set even in case the range check fails.
+ * @param bufp pointer to the address the first byte of the sequence
+ * to decode, updated by the function to point to the
+ * byte next after the decoded sequence
+ * @param buf_end pointer to the end of the buffer, points to the next
+ * byte past the last in the buffer. This is used to
+ * avoid buffer overreads (in case of an unfinished
+ * UTF-8 sequence towards the end of the buffer).
+ * @param flags a collection of AV_UTF8_FLAG_* flags
+ * @return >= 0 in case a sequence was successfully read, a negative
+ * value in case of invalid sequence
+ */
+int av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end,
+ unsigned int flags);
+
+/**
+ * Check if a name is in a list.
+ * @returns 0 if not found, or the 1 based index where it has been found in the
+ * list.
+ */
+int av_match_list(const char *name, const char *list, char separator);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_AVSTRING_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/avutil.h b/Externals/ffmpeg/dev/include/libavutil/avutil.h
new file mode 100644
index 0000000000..e6ebb6c43c
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/avutil.h
@@ -0,0 +1,344 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_AVUTIL_H
+#define AVUTIL_AVUTIL_H
+
+/**
+ * @file
+ * external API header
+ */
+
+/**
+ * @mainpage
+ *
+ * @section ffmpeg_intro Introduction
+ *
+ * This document describes the usage of the different libraries
+ * provided by FFmpeg.
+ *
+ * @li @ref libavc "libavcodec" encoding/decoding library
+ * @li @ref lavfi "libavfilter" graph-based frame editing library
+ * @li @ref libavf "libavformat" I/O and muxing/demuxing library
+ * @li @ref lavd "libavdevice" special devices muxing/demuxing library
+ * @li @ref lavu "libavutil" common utility library
+ * @li @ref lswr "libswresample" audio resampling, format conversion and mixing
+ * @li @ref lpp "libpostproc" post processing library
+ * @li @ref libsws "libswscale" color conversion and scaling library
+ *
+ * @section ffmpeg_versioning Versioning and compatibility
+ *
+ * Each of the FFmpeg libraries contains a version.h header, which defines a
+ * major, minor and micro version number with the
+ * LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO} macros. The major version
+ * number is incremented with backward incompatible changes - e.g. removing
+ * parts of the public API, reordering public struct members, etc. The minor
+ * version number is incremented for backward compatible API changes or major
+ * new features - e.g. adding a new public function or a new decoder. The micro
+ * version number is incremented for smaller changes that a calling program
+ * might still want to check for - e.g. changing behavior in a previously
+ * unspecified situation.
+ *
+ * FFmpeg guarantees backward API and ABI compatibility for each library as long
+ * as its major version number is unchanged. This means that no public symbols
+ * will be removed or renamed. Types and names of the public struct members and
+ * values of public macros and enums will remain the same (unless they were
+ * explicitly declared as not part of the public API). Documented behavior will
+ * not change.
+ *
+ * In other words, any correct program that works with a given FFmpeg snapshot
+ * should work just as well without any changes with any later snapshot with the
+ * same major versions. This applies to both rebuilding the program against new
+ * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program
+ * links against.
+ *
+ * However, new public symbols may be added and new members may be appended to
+ * public structs whose size is not part of public ABI (most public structs in
+ * FFmpeg). New macros and enum values may be added. Behavior in undocumented
+ * situations may change slightly (and be documented). All those are accompanied
+ * by an entry in doc/APIchanges and incrementing either the minor or micro
+ * version number.
+ */
+
+/**
+ * @defgroup lavu Common utility functions
+ *
+ * @brief
+ * libavutil contains the code shared across all the other FFmpeg
+ * libraries
+ *
+ * @note In order to use the functions provided by avutil you must include
+ * the specific header.
+ *
+ * @{
+ *
+ * @defgroup lavu_crypto Crypto and Hashing
+ *
+ * @{
+ * @}
+ *
+ * @defgroup lavu_math Maths
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_string String Manipulation
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_mem Memory Management
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_data Data Structures
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_audio Audio related
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_error Error Codes
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_log Logging Facility
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup lavu_misc Other
+ *
+ * @{
+ *
+ * @defgroup lavu_internal Internal
+ *
+ * Not exported functions, for internal usage only
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup preproc_misc Preprocessor String Macros
+ *
+ * @{
+ *
+ * @}
+ *
+ * @defgroup version_utils Library Version Macros
+ *
+ * @{
+ *
+ * @}
+ */
+
+
+/**
+ * @addtogroup lavu_ver
+ * @{
+ */
+
+/**
+ * Return the LIBAVUTIL_VERSION_INT constant.
+ */
+unsigned avutil_version(void);
+
+/**
+ * Return the libavutil build-time configuration.
+ */
+const char *avutil_configuration(void);
+
+/**
+ * Return the libavutil license.
+ */
+const char *avutil_license(void);
+
+/**
+ * @}
+ */
+
+/**
+ * @addtogroup lavu_media Media Type
+ * @brief Media Type
+ */
+
+enum AVMediaType {
+ AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA
+ AVMEDIA_TYPE_VIDEO,
+ AVMEDIA_TYPE_AUDIO,
+ AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous
+ AVMEDIA_TYPE_SUBTITLE,
+ AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse
+ AVMEDIA_TYPE_NB
+};
+
+/**
+ * Return a string describing the media_type enum, NULL if media_type
+ * is unknown.
+ */
+const char *av_get_media_type_string(enum AVMediaType media_type);
+
+/**
+ * @defgroup lavu_const Constants
+ * @{
+ *
+ * @defgroup lavu_enc Encoding specific
+ *
+ * @note those definition should move to avcodec
+ * @{
+ */
+
+#define FF_LAMBDA_SHIFT 7
+#define FF_LAMBDA_SCALE (1<
+
+/**
+ * @defgroup lavu_base64 Base64
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+
+/**
+ * Decode a base64-encoded string.
+ *
+ * @param out buffer for decoded data
+ * @param in null-terminated input string
+ * @param out_size size in bytes of the out buffer, must be at
+ * least 3/4 of the length of in
+ * @return number of bytes written, or a negative value in case of
+ * invalid input
+ */
+int av_base64_decode(uint8_t *out, const char *in, int out_size);
+
+/**
+ * Encode data to base64 and null-terminate.
+ *
+ * @param out buffer for encoded data
+ * @param out_size size in bytes of the out buffer (including the
+ * null terminator), must be at least AV_BASE64_SIZE(in_size)
+ * @param in input buffer containing the data to encode
+ * @param in_size size in bytes of the in buffer
+ * @return out or NULL in case of error
+ */
+char *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size);
+
+/**
+ * Calculate the output size needed to base64-encode x bytes to a
+ * null-terminated string.
+ */
+#define AV_BASE64_SIZE(x) (((x)+2) / 3 * 4 + 1)
+
+ /**
+ * @}
+ */
+
+#endif /* AVUTIL_BASE64_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/blowfish.h b/Externals/ffmpeg/dev/include/libavutil/blowfish.h
new file mode 100644
index 0000000000..0b004532de
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/blowfish.h
@@ -0,0 +1,77 @@
+/*
+ * Blowfish algorithm
+ * Copyright (c) 2012 Samuel Pitoiset
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_BLOWFISH_H
+#define AVUTIL_BLOWFISH_H
+
+#include
+
+/**
+ * @defgroup lavu_blowfish Blowfish
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+#define AV_BF_ROUNDS 16
+
+typedef struct AVBlowfish {
+ uint32_t p[AV_BF_ROUNDS + 2];
+ uint32_t s[4][256];
+} AVBlowfish;
+
+/**
+ * Initialize an AVBlowfish context.
+ *
+ * @param ctx an AVBlowfish context
+ * @param key a key
+ * @param key_len length of the key
+ */
+void av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ *
+ * @param ctx an AVBlowfish context
+ * @param xl left four bytes halves of input to be encrypted
+ * @param xr right four bytes halves of input to be encrypted
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr,
+ int decrypt);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ *
+ * @param ctx an AVBlowfish context
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param count number of 8 byte blocks
+ * @param iv initialization vector for CBC mode, if NULL ECB will be used
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src,
+ int count, uint8_t *iv, int decrypt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_BLOWFISH_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/bprint.h b/Externals/ffmpeg/dev/include/libavutil/bprint.h
new file mode 100644
index 0000000000..c09b1ac1e1
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/bprint.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2012 Nicolas George
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_BPRINT_H
+#define AVUTIL_BPRINT_H
+
+#include
+
+#include "attributes.h"
+#include "avstring.h"
+
+/**
+ * Define a structure with extra padding to a fixed size
+ * This helps ensuring binary compatibility with future versions.
+ */
+
+#define FF_PAD_STRUCTURE(name, size, ...) \
+struct ff_pad_helper_##name { __VA_ARGS__ }; \
+typedef struct name { \
+ __VA_ARGS__ \
+ char reserved_padding[size - sizeof(struct ff_pad_helper_##name)]; \
+} name;
+
+/**
+ * Buffer to print data progressively
+ *
+ * The string buffer grows as necessary and is always 0-terminated.
+ * The content of the string is never accessed, and thus is
+ * encoding-agnostic and can even hold binary data.
+ *
+ * Small buffers are kept in the structure itself, and thus require no
+ * memory allocation at all (unless the contents of the buffer is needed
+ * after the structure goes out of scope). This is almost as lightweight as
+ * declaring a local "char buf[512]".
+ *
+ * The length of the string can go beyond the allocated size: the buffer is
+ * then truncated, but the functions still keep account of the actual total
+ * length.
+ *
+ * In other words, buf->len can be greater than buf->size and records the
+ * total length of what would have been to the buffer if there had been
+ * enough memory.
+ *
+ * Append operations do not need to be tested for failure: if a memory
+ * allocation fails, data stop being appended to the buffer, but the length
+ * is still updated. This situation can be tested with
+ * av_bprint_is_complete().
+ *
+ * The size_max field determines several possible behaviours:
+ *
+ * size_max = -1 (= UINT_MAX) or any large value will let the buffer be
+ * reallocated as necessary, with an amortized linear cost.
+ *
+ * size_max = 0 prevents writing anything to the buffer: only the total
+ * length is computed. The write operations can then possibly be repeated in
+ * a buffer with exactly the necessary size
+ * (using size_init = size_max = len + 1).
+ *
+ * size_max = 1 is automatically replaced by the exact size available in the
+ * structure itself, thus ensuring no dynamic memory allocation. The
+ * internal buffer is large enough to hold a reasonable paragraph of text,
+ * such as the current paragraph.
+ */
+
+FF_PAD_STRUCTURE(AVBPrint, 1024,
+ char *str; /**< string so far */
+ unsigned len; /**< length so far */
+ unsigned size; /**< allocated memory */
+ unsigned size_max; /**< maximum allocated memory */
+ char reserved_internal_buffer[1];
+)
+
+/**
+ * Convenience macros for special values for av_bprint_init() size_max
+ * parameter.
+ */
+#define AV_BPRINT_SIZE_UNLIMITED ((unsigned)-1)
+#define AV_BPRINT_SIZE_AUTOMATIC 1
+#define AV_BPRINT_SIZE_COUNT_ONLY 0
+
+/**
+ * Init a print buffer.
+ *
+ * @param buf buffer to init
+ * @param size_init initial size (including the final 0)
+ * @param size_max maximum size;
+ * 0 means do not write anything, just count the length;
+ * 1 is replaced by the maximum value for automatic storage;
+ * any large value means that the internal buffer will be
+ * reallocated as needed up to that limit; -1 is converted to
+ * UINT_MAX, the largest limit possible.
+ * Check also AV_BPRINT_SIZE_* macros.
+ */
+void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max);
+
+/**
+ * Init a print buffer using a pre-existing buffer.
+ *
+ * The buffer will not be reallocated.
+ *
+ * @param buf buffer structure to init
+ * @param buffer byte buffer to use for the string data
+ * @param size size of buffer
+ */
+void av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size);
+
+/**
+ * Append a formatted string to a print buffer.
+ */
+void av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3);
+
+/**
+ * Append a formatted string to a print buffer.
+ */
+void av_vbprintf(AVBPrint *buf, const char *fmt, va_list vl_arg);
+
+/**
+ * Append char c n times to a print buffer.
+ */
+void av_bprint_chars(AVBPrint *buf, char c, unsigned n);
+
+/**
+ * Append data to a print buffer.
+ *
+ * param buf bprint buffer to use
+ * param data pointer to data
+ * param size size of data
+ */
+void av_bprint_append_data(AVBPrint *buf, const char *data, unsigned size);
+
+struct tm;
+/**
+ * Append a formatted date and time to a print buffer.
+ *
+ * param buf bprint buffer to use
+ * param fmt date and time format string, see strftime()
+ * param tm broken-down time structure to translate
+ *
+ * @note due to poor design of the standard strftime function, it may
+ * produce poor results if the format string expands to a very long text and
+ * the bprint buffer is near the limit stated by the size_max option.
+ */
+void av_bprint_strftime(AVBPrint *buf, const char *fmt, const struct tm *tm);
+
+/**
+ * Allocate bytes in the buffer for external use.
+ *
+ * @param[in] buf buffer structure
+ * @param[in] size required size
+ * @param[out] mem pointer to the memory area
+ * @param[out] actual_size size of the memory area after allocation;
+ * can be larger or smaller than size
+ */
+void av_bprint_get_buffer(AVBPrint *buf, unsigned size,
+ unsigned char **mem, unsigned *actual_size);
+
+/**
+ * Reset the string to "" but keep internal allocated data.
+ */
+void av_bprint_clear(AVBPrint *buf);
+
+/**
+ * Test if the print buffer is complete (not truncated).
+ *
+ * It may have been truncated due to a memory allocation failure
+ * or the size_max limit (compare size and size_max if necessary).
+ */
+static inline int av_bprint_is_complete(const AVBPrint *buf)
+{
+ return buf->len < buf->size;
+}
+
+/**
+ * Finalize a print buffer.
+ *
+ * The print buffer can no longer be used afterwards,
+ * but the len and size fields are still valid.
+ *
+ * @arg[out] ret_str if not NULL, used to return a permanent copy of the
+ * buffer contents, or NULL if memory allocation fails;
+ * if NULL, the buffer is discarded and freed
+ * @return 0 for success or error code (probably AVERROR(ENOMEM))
+ */
+int av_bprint_finalize(AVBPrint *buf, char **ret_str);
+
+/**
+ * Escape the content in src and append it to dstbuf.
+ *
+ * @param dstbuf already inited destination bprint buffer
+ * @param src string containing the text to escape
+ * @param special_chars string containing the special characters which
+ * need to be escaped, can be NULL
+ * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros.
+ * Any unknown value for mode will be considered equivalent to
+ * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without
+ * notice.
+ * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_* macros
+ */
+void av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars,
+ enum AVEscapeMode mode, int flags);
+
+#endif /* AVUTIL_BPRINT_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/bswap.h b/Externals/ffmpeg/dev/include/libavutil/bswap.h
new file mode 100644
index 0000000000..91cb79538d
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/bswap.h
@@ -0,0 +1,109 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * byte swapping routines
+ */
+
+#ifndef AVUTIL_BSWAP_H
+#define AVUTIL_BSWAP_H
+
+#include
+#include "libavutil/avconfig.h"
+#include "attributes.h"
+
+#ifdef HAVE_AV_CONFIG_H
+
+#include "config.h"
+
+#if ARCH_AARCH64
+# include "aarch64/bswap.h"
+#elif ARCH_ARM
+# include "arm/bswap.h"
+#elif ARCH_AVR32
+# include "avr32/bswap.h"
+#elif ARCH_SH4
+# include "sh4/bswap.h"
+#elif ARCH_X86
+# include "x86/bswap.h"
+#endif
+
+#endif /* HAVE_AV_CONFIG_H */
+
+#define AV_BSWAP16C(x) (((x) << 8 & 0xff00) | ((x) >> 8 & 0x00ff))
+#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16))
+#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32))
+
+#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x)
+
+#ifndef av_bswap16
+static av_always_inline av_const uint16_t av_bswap16(uint16_t x)
+{
+ x= (x>>8) | (x<<8);
+ return x;
+}
+#endif
+
+#ifndef av_bswap32
+static av_always_inline av_const uint32_t av_bswap32(uint32_t x)
+{
+ return AV_BSWAP32C(x);
+}
+#endif
+
+#ifndef av_bswap64
+static inline uint64_t av_const av_bswap64(uint64_t x)
+{
+ return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32);
+}
+#endif
+
+// be2ne ... big-endian to native-endian
+// le2ne ... little-endian to native-endian
+
+#if AV_HAVE_BIGENDIAN
+#define av_be2ne16(x) (x)
+#define av_be2ne32(x) (x)
+#define av_be2ne64(x) (x)
+#define av_le2ne16(x) av_bswap16(x)
+#define av_le2ne32(x) av_bswap32(x)
+#define av_le2ne64(x) av_bswap64(x)
+#define AV_BE2NEC(s, x) (x)
+#define AV_LE2NEC(s, x) AV_BSWAPC(s, x)
+#else
+#define av_be2ne16(x) av_bswap16(x)
+#define av_be2ne32(x) av_bswap32(x)
+#define av_be2ne64(x) av_bswap64(x)
+#define av_le2ne16(x) (x)
+#define av_le2ne32(x) (x)
+#define av_le2ne64(x) (x)
+#define AV_BE2NEC(s, x) AV_BSWAPC(s, x)
+#define AV_LE2NEC(s, x) (x)
+#endif
+
+#define AV_BE2NE16C(x) AV_BE2NEC(16, x)
+#define AV_BE2NE32C(x) AV_BE2NEC(32, x)
+#define AV_BE2NE64C(x) AV_BE2NEC(64, x)
+#define AV_LE2NE16C(x) AV_LE2NEC(16, x)
+#define AV_LE2NE32C(x) AV_LE2NEC(32, x)
+#define AV_LE2NE64C(x) AV_LE2NEC(64, x)
+
+#endif /* AVUTIL_BSWAP_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/buffer.h b/Externals/ffmpeg/dev/include/libavutil/buffer.h
new file mode 100644
index 0000000000..b4399fd39f
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/buffer.h
@@ -0,0 +1,274 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_buffer
+ * refcounted data buffer API
+ */
+
+#ifndef AVUTIL_BUFFER_H
+#define AVUTIL_BUFFER_H
+
+#include
+
+/**
+ * @defgroup lavu_buffer AVBuffer
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVBuffer is an API for reference-counted data buffers.
+ *
+ * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer
+ * represents the data buffer itself; it is opaque and not meant to be accessed
+ * by the caller directly, but only through AVBufferRef. However, the caller may
+ * e.g. compare two AVBuffer pointers to check whether two different references
+ * are describing the same data buffer. AVBufferRef represents a single
+ * reference to an AVBuffer and it is the object that may be manipulated by the
+ * caller directly.
+ *
+ * There are two functions provided for creating a new AVBuffer with a single
+ * reference -- av_buffer_alloc() to just allocate a new buffer, and
+ * av_buffer_create() to wrap an existing array in an AVBuffer. From an existing
+ * reference, additional references may be created with av_buffer_ref().
+ * Use av_buffer_unref() to free a reference (this will automatically free the
+ * data once all the references are freed).
+ *
+ * The convention throughout this API and the rest of FFmpeg is such that the
+ * buffer is considered writable if there exists only one reference to it (and
+ * it has not been marked as read-only). The av_buffer_is_writable() function is
+ * provided to check whether this is true and av_buffer_make_writable() will
+ * automatically create a new writable buffer when necessary.
+ * Of course nothing prevents the calling code from violating this convention,
+ * however that is safe only when all the existing references are under its
+ * control.
+ *
+ * @note Referencing and unreferencing the buffers is thread-safe and thus
+ * may be done from multiple threads simultaneously without any need for
+ * additional locking.
+ *
+ * @note Two different references to the same buffer can point to different
+ * parts of the buffer (i.e. their AVBufferRef.data will not be equal).
+ */
+
+/**
+ * A reference counted buffer type. It is opaque and is meant to be used through
+ * references (AVBufferRef).
+ */
+typedef struct AVBuffer AVBuffer;
+
+/**
+ * A reference to a data buffer.
+ *
+ * The size of this struct is not a part of the public ABI and it is not meant
+ * to be allocated directly.
+ */
+typedef struct AVBufferRef {
+ AVBuffer *buffer;
+
+ /**
+ * The data buffer. It is considered writable if and only if
+ * this is the only reference to the buffer, in which case
+ * av_buffer_is_writable() returns 1.
+ */
+ uint8_t *data;
+ /**
+ * Size of data in bytes.
+ */
+ int size;
+} AVBufferRef;
+
+/**
+ * Allocate an AVBuffer of the given size using av_malloc().
+ *
+ * @return an AVBufferRef of given size or NULL when out of memory
+ */
+AVBufferRef *av_buffer_alloc(int size);
+
+/**
+ * Same as av_buffer_alloc(), except the returned buffer will be initialized
+ * to zero.
+ */
+AVBufferRef *av_buffer_allocz(int size);
+
+/**
+ * Always treat the buffer as read-only, even when it has only one
+ * reference.
+ */
+#define AV_BUFFER_FLAG_READONLY (1 << 0)
+
+/**
+ * Create an AVBuffer from an existing array.
+ *
+ * If this function is successful, data is owned by the AVBuffer. The caller may
+ * only access data through the returned AVBufferRef and references derived from
+ * it.
+ * If this function fails, data is left untouched.
+ * @param data data array
+ * @param size size of data in bytes
+ * @param free a callback for freeing this buffer's data
+ * @param opaque parameter to be got for processing or passed to free
+ * @param flags a combination of AV_BUFFER_FLAG_*
+ *
+ * @return an AVBufferRef referring to data on success, NULL on failure.
+ */
+AVBufferRef *av_buffer_create(uint8_t *data, int size,
+ void (*free)(void *opaque, uint8_t *data),
+ void *opaque, int flags);
+
+/**
+ * Default free callback, which calls av_free() on the buffer data.
+ * This function is meant to be passed to av_buffer_create(), not called
+ * directly.
+ */
+void av_buffer_default_free(void *opaque, uint8_t *data);
+
+/**
+ * Create a new reference to an AVBuffer.
+ *
+ * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on
+ * failure.
+ */
+AVBufferRef *av_buffer_ref(AVBufferRef *buf);
+
+/**
+ * Free a given reference and automatically free the buffer if there are no more
+ * references to it.
+ *
+ * @param buf the reference to be freed. The pointer is set to NULL on return.
+ */
+void av_buffer_unref(AVBufferRef **buf);
+
+/**
+ * @return 1 if the caller may write to the data referred to by buf (which is
+ * true if and only if buf is the only reference to the underlying AVBuffer).
+ * Return 0 otherwise.
+ * A positive answer is valid until av_buffer_ref() is called on buf.
+ */
+int av_buffer_is_writable(const AVBufferRef *buf);
+
+/**
+ * @return the opaque parameter set by av_buffer_create.
+ */
+void *av_buffer_get_opaque(const AVBufferRef *buf);
+
+int av_buffer_get_ref_count(const AVBufferRef *buf);
+
+/**
+ * Create a writable reference from a given buffer reference, avoiding data copy
+ * if possible.
+ *
+ * @param buf buffer reference to make writable. On success, buf is either left
+ * untouched, or it is unreferenced and a new writable AVBufferRef is
+ * written in its place. On failure, buf is left untouched.
+ * @return 0 on success, a negative AVERROR on failure.
+ */
+int av_buffer_make_writable(AVBufferRef **buf);
+
+/**
+ * Reallocate a given buffer.
+ *
+ * @param buf a buffer reference to reallocate. On success, buf will be
+ * unreferenced and a new reference with the required size will be
+ * written in its place. On failure buf will be left untouched. *buf
+ * may be NULL, then a new buffer is allocated.
+ * @param size required new buffer size.
+ * @return 0 on success, a negative AVERROR on failure.
+ *
+ * @note the buffer is actually reallocated with av_realloc() only if it was
+ * initially allocated through av_buffer_realloc(NULL) and there is only one
+ * reference to it (i.e. the one passed to this function). In all other cases
+ * a new buffer is allocated and the data is copied.
+ */
+int av_buffer_realloc(AVBufferRef **buf, int size);
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup lavu_bufferpool AVBufferPool
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers.
+ *
+ * Frequently allocating and freeing large buffers may be slow. AVBufferPool is
+ * meant to solve this in cases when the caller needs a set of buffers of the
+ * same size (the most obvious use case being buffers for raw video or audio
+ * frames).
+ *
+ * At the beginning, the user must call av_buffer_pool_init() to create the
+ * buffer pool. Then whenever a buffer is needed, call av_buffer_pool_get() to
+ * get a reference to a new buffer, similar to av_buffer_alloc(). This new
+ * reference works in all aspects the same way as the one created by
+ * av_buffer_alloc(). However, when the last reference to this buffer is
+ * unreferenced, it is returned to the pool instead of being freed and will be
+ * reused for subsequent av_buffer_pool_get() calls.
+ *
+ * When the caller is done with the pool and no longer needs to allocate any new
+ * buffers, av_buffer_pool_uninit() must be called to mark the pool as freeable.
+ * Once all the buffers are released, it will automatically be freed.
+ *
+ * Allocating and releasing buffers with this API is thread-safe as long as
+ * either the default alloc callback is used, or the user-supplied one is
+ * thread-safe.
+ */
+
+/**
+ * The buffer pool. This structure is opaque and not meant to be accessed
+ * directly. It is allocated with av_buffer_pool_init() and freed with
+ * av_buffer_pool_uninit().
+ */
+typedef struct AVBufferPool AVBufferPool;
+
+/**
+ * Allocate and initialize a buffer pool.
+ *
+ * @param size size of each buffer in this pool
+ * @param alloc a function that will be used to allocate new buffers when the
+ * pool is empty. May be NULL, then the default allocator will be used
+ * (av_buffer_alloc()).
+ * @return newly created buffer pool on success, NULL on error.
+ */
+AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size));
+
+/**
+ * Mark the pool as being available for freeing. It will actually be freed only
+ * once all the allocated buffers associated with the pool are released. Thus it
+ * is safe to call this function while some of the allocated buffers are still
+ * in use.
+ *
+ * @param pool pointer to the pool to be freed. It will be set to NULL.
+ * @see av_buffer_pool_can_uninit()
+ */
+void av_buffer_pool_uninit(AVBufferPool **pool);
+
+/**
+ * Allocate a new AVBuffer, reusing an old buffer from the pool when available.
+ * This function may be called simultaneously from multiple threads.
+ *
+ * @return a reference to the new buffer on success, NULL on error.
+ */
+AVBufferRef *av_buffer_pool_get(AVBufferPool *pool);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_BUFFER_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/camellia.h b/Externals/ffmpeg/dev/include/libavutil/camellia.h
new file mode 100644
index 0000000000..e674c9b9a4
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/camellia.h
@@ -0,0 +1,70 @@
+/*
+ * An implementation of the CAMELLIA algorithm as mentioned in RFC3713
+ * Copyright (c) 2014 Supraja Meedinti
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CAMELLIA_H
+#define AVUTIL_CAMELLIA_H
+
+#include
+
+
+/**
+ * @file
+ * @brief Public header for libavutil CAMELLIA algorithm
+ * @defgroup lavu_camellia CAMELLIA
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_camellia_size;
+
+struct AVCAMELLIA;
+
+/**
+ * Allocate an AVCAMELLIA context
+ * To free the struct: av_free(ptr)
+ */
+struct AVCAMELLIA *av_camellia_alloc(void);
+
+/**
+ * Initialize an AVCAMELLIA context.
+ *
+ * @param ctx an AVCAMELLIA context
+ * @param key a key of 16, 24, 32 bytes used for encryption/decryption
+ * @param key_bits number of keybits: possible are 128, 192, 256
+ */
+int av_camellia_init(struct AVCAMELLIA *ctx, const uint8_t *key, int key_bits);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context
+ *
+ * @param ctx an AVCAMELLIA context
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param count number of 16 byte blocks
+ * @paran iv initialization vector for CBC mode, NULL for ECB mode
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_camellia_crypt(struct AVCAMELLIA *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t* iv, int decrypt);
+
+/**
+ * @}
+ */
+#endif /* AVUTIL_CAMELLIA_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/cast5.h b/Externals/ffmpeg/dev/include/libavutil/cast5.h
new file mode 100644
index 0000000000..e5cc8b1102
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/cast5.h
@@ -0,0 +1,79 @@
+/*
+ * An implementation of the CAST128 algorithm as mentioned in RFC2144
+ * Copyright (c) 2014 Supraja Meedinti
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CAST5_H
+#define AVUTIL_CAST5_H
+
+#include
+
+
+/**
+ * @file
+ * @brief Public header for libavutil CAST5 algorithm
+ * @defgroup lavu_cast5 CAST5
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_cast5_size;
+
+struct AVCAST5;
+
+/**
+ * Allocate an AVCAST5 context
+ * To free the struct: av_free(ptr)
+ */
+struct AVCAST5 *av_cast5_alloc(void);
+/**
+ * Initialize an AVCAST5 context.
+ *
+ * @param ctx an AVCAST5 context
+ * @param key a key of 5,6,...16 bytes used for encryption/decryption
+ * @param key_bits number of keybits: possible are 40,48,...,128
+ */
+int av_cast5_init(struct AVCAST5 *ctx, const uint8_t *key, int key_bits);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context, ECB mode only
+ *
+ * @param ctx an AVCAST5 context
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param count number of 8 byte blocks
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_cast5_crypt(struct AVCAST5 *ctx, uint8_t *dst, const uint8_t *src, int count, int decrypt);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context
+ *
+ * @param ctx an AVCAST5 context
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param count number of 8 byte blocks
+ * @param iv initialization vector for CBC mode, NULL for ECB mode
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_cast5_crypt2(struct AVCAST5 *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt);
+/**
+ * @}
+ */
+#endif /* AVUTIL_CAST5_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/channel_layout.h b/Externals/ffmpeg/dev/include/libavutil/channel_layout.h
new file mode 100644
index 0000000000..dea4d6093d
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/channel_layout.h
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2006 Michael Niedermayer
+ * Copyright (c) 2008 Peter Ross
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CHANNEL_LAYOUT_H
+#define AVUTIL_CHANNEL_LAYOUT_H
+
+#include
+
+/**
+ * @file
+ * audio channel layout utility functions
+ */
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ */
+
+/**
+ * @defgroup channel_masks Audio channel masks
+ *
+ * A channel layout is a 64-bits integer with a bit set for every channel.
+ * The number of bits set must be equal to the number of channels.
+ * The value 0 means that the channel layout is not known.
+ * @note this data structure is not powerful enough to handle channels
+ * combinations that have the same channel multiple times, such as
+ * dual-mono.
+ *
+ * @{
+ */
+#define AV_CH_FRONT_LEFT 0x00000001
+#define AV_CH_FRONT_RIGHT 0x00000002
+#define AV_CH_FRONT_CENTER 0x00000004
+#define AV_CH_LOW_FREQUENCY 0x00000008
+#define AV_CH_BACK_LEFT 0x00000010
+#define AV_CH_BACK_RIGHT 0x00000020
+#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040
+#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080
+#define AV_CH_BACK_CENTER 0x00000100
+#define AV_CH_SIDE_LEFT 0x00000200
+#define AV_CH_SIDE_RIGHT 0x00000400
+#define AV_CH_TOP_CENTER 0x00000800
+#define AV_CH_TOP_FRONT_LEFT 0x00001000
+#define AV_CH_TOP_FRONT_CENTER 0x00002000
+#define AV_CH_TOP_FRONT_RIGHT 0x00004000
+#define AV_CH_TOP_BACK_LEFT 0x00008000
+#define AV_CH_TOP_BACK_CENTER 0x00010000
+#define AV_CH_TOP_BACK_RIGHT 0x00020000
+#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix.
+#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT.
+#define AV_CH_WIDE_LEFT 0x0000000080000000ULL
+#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL
+#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL
+#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL
+#define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL
+
+/** Channel mask value used for AVCodecContext.request_channel_layout
+ to indicate that the user requests the channel order of the decoder output
+ to be the native codec channel order. */
+#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL
+
+/**
+ * @}
+ * @defgroup channel_mask_c Audio channel layouts
+ * @{
+ * */
+#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT)
+#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER)
+#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT)
+#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER)
+#define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY)
+#define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER)
+#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT)
+#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT)
+
+enum AVMatrixEncoding {
+ AV_MATRIX_ENCODING_NONE,
+ AV_MATRIX_ENCODING_DOLBY,
+ AV_MATRIX_ENCODING_DPLII,
+ AV_MATRIX_ENCODING_DPLIIX,
+ AV_MATRIX_ENCODING_DPLIIZ,
+ AV_MATRIX_ENCODING_DOLBYEX,
+ AV_MATRIX_ENCODING_DOLBYHEADPHONE,
+ AV_MATRIX_ENCODING_NB
+};
+
+/**
+ * Return a channel layout id that matches name, or 0 if no match is found.
+ *
+ * name can be one or several of the following notations,
+ * separated by '+' or '|':
+ * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0,
+ * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix);
+ * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC,
+ * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR);
+ * - a number of channels, in decimal, optionally followed by 'c', yielding
+ * the default channel layout for that number of channels (@see
+ * av_get_default_channel_layout);
+ * - a channel layout mask, in hexadecimal starting with "0x" (see the
+ * AV_CH_* macros).
+ *
+ * @warning Starting from the next major bump the trailing character
+ * 'c' to specify a number of channels will be required, while a
+ * channel layout mask could also be specified as a decimal number
+ * (if and only if not followed by "c").
+ *
+ * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7"
+ */
+uint64_t av_get_channel_layout(const char *name);
+
+/**
+ * Return a description of a channel layout.
+ * If nb_channels is <= 0, it is guessed from the channel_layout.
+ *
+ * @param buf put here the string containing the channel layout
+ * @param buf_size size in bytes of the buffer
+ */
+void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout);
+
+struct AVBPrint;
+/**
+ * Append a description of a channel layout to a bprint buffer.
+ */
+void av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout);
+
+/**
+ * Return the number of channels in the channel layout.
+ */
+int av_get_channel_layout_nb_channels(uint64_t channel_layout);
+
+/**
+ * Return default channel layout for a given number of channels.
+ */
+int64_t av_get_default_channel_layout(int nb_channels);
+
+/**
+ * Get the index of a channel in channel_layout.
+ *
+ * @param channel a channel layout describing exactly one channel which must be
+ * present in channel_layout.
+ *
+ * @return index of channel in channel_layout on success, a negative AVERROR
+ * on error.
+ */
+int av_get_channel_layout_channel_index(uint64_t channel_layout,
+ uint64_t channel);
+
+/**
+ * Get the channel with the given index in channel_layout.
+ */
+uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index);
+
+/**
+ * Get the name of a given channel.
+ *
+ * @return channel name on success, NULL on error.
+ */
+const char *av_get_channel_name(uint64_t channel);
+
+/**
+ * Get the description of a given channel.
+ *
+ * @param channel a channel layout with a single channel
+ * @return channel description on success, NULL on error
+ */
+const char *av_get_channel_description(uint64_t channel);
+
+/**
+ * Get the value and name of a standard channel layout.
+ *
+ * @param[in] index index in an internal list, starting at 0
+ * @param[out] layout channel layout mask
+ * @param[out] name name of the layout
+ * @return 0 if the layout exists,
+ * <0 if index is beyond the limits
+ */
+int av_get_standard_channel_layout(unsigned index, uint64_t *layout,
+ const char **name);
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* AVUTIL_CHANNEL_LAYOUT_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/common.h b/Externals/ffmpeg/dev/include/libavutil/common.h
new file mode 100644
index 0000000000..c82a3a6240
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/common.h
@@ -0,0 +1,469 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * common internal and external API header
+ */
+
+#ifndef AVUTIL_COMMON_H
+#define AVUTIL_COMMON_H
+
+#if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) && !defined(UINT64_C)
+#error missing -D__STDC_CONSTANT_MACROS / #define __STDC_CONSTANT_MACROS
+#endif
+
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+#include
+
+#include "attributes.h"
+#include "version.h"
+#include "libavutil/avconfig.h"
+
+#if AV_HAVE_BIGENDIAN
+# define AV_NE(be, le) (be)
+#else
+# define AV_NE(be, le) (le)
+#endif
+
+//rounded division & shift
+#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b))
+/* assume b>0 */
+#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b))
+/* assume a>0 and b>0 */
+#define FF_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \
+ : ((a) + (1<<(b)) - 1) >> (b))
+#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b))
+#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b))
+#define FFABS(a) ((a) >= 0 ? (a) : (-(a)))
+#define FFSIGN(a) ((a) > 0 ? 1 : -1)
+
+#define FFMAX(a,b) ((a) > (b) ? (a) : (b))
+#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c)
+#define FFMIN(a,b) ((a) > (b) ? (b) : (a))
+#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c)
+
+#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0)
+#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
+#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1))
+
+/* misc math functions */
+
+/**
+ * Reverse the order of the bits of an 8-bits unsigned integer.
+ */
+#if FF_API_AV_REVERSE
+extern attribute_deprecated const uint8_t av_reverse[256];
+#endif
+
+#ifdef HAVE_AV_CONFIG_H
+# include "config.h"
+# include "intmath.h"
+#endif
+
+/* Pull in unguarded fallback defines at the end of this file. */
+#include "common.h"
+
+#ifndef av_log2
+av_const int av_log2(unsigned v);
+#endif
+
+#ifndef av_log2_16bit
+av_const int av_log2_16bit(unsigned v);
+#endif
+
+/**
+ * Clip a signed integer value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const int av_clip_c(int a, int amin, int amax)
+{
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/**
+ * Clip a signed 64bit integer value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin, int64_t amax)
+{
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-255 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint8_t av_clip_uint8_c(int a)
+{
+ if (a&(~0xFF)) return (-a)>>31;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the -128,127 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int8_t av_clip_int8_c(int a)
+{
+ if ((a+0x80) & ~0xFF) return (a>>31) ^ 0x7F;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the 0-65535 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const uint16_t av_clip_uint16_c(int a)
+{
+ if (a&(~0xFFFF)) return (-a)>>31;
+ else return a;
+}
+
+/**
+ * Clip a signed integer value into the -32768,32767 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int16_t av_clip_int16_c(int a)
+{
+ if ((a+0x8000) & ~0xFFFF) return (a>>31) ^ 0x7FFF;
+ else return a;
+}
+
+/**
+ * Clip a signed 64-bit integer value into the -2147483648,2147483647 range.
+ * @param a value to clip
+ * @return clipped value
+ */
+static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a)
+{
+ if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (int32_t)((a>>63) ^ 0x7FFFFFFF);
+ else return (int32_t)a;
+}
+
+/**
+ * Clip a signed integer to an unsigned power of two range.
+ * @param a value to clip
+ * @param p bit position to clip at
+ * @return clipped value
+ */
+static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p)
+{
+ if (a & ~((1<> 31 & ((1<
= 2
+ if (amin > amax) abort();
+#endif
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/**
+ * Clip a double value into the amin-amax range.
+ * @param a value to clip
+ * @param amin minimum value of the clip range
+ * @param amax maximum value of the clip range
+ * @return clipped value
+ */
+static av_always_inline av_const double av_clipd_c(double a, double amin, double amax)
+{
+#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2
+ if (amin > amax) abort();
+#endif
+ if (a < amin) return amin;
+ else if (a > amax) return amax;
+ else return a;
+}
+
+/** Compute ceil(log2(x)).
+ * @param x value used to compute ceil(log2(x))
+ * @return computed ceiling of log2(x)
+ */
+static av_always_inline av_const int av_ceil_log2_c(int x)
+{
+ return av_log2((x - 1) << 1);
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount_c(uint32_t x)
+{
+ x -= (x >> 1) & 0x55555555;
+ x = (x & 0x33333333) + ((x >> 2) & 0x33333333);
+ x = (x + (x >> 4)) & 0x0F0F0F0F;
+ x += x >> 8;
+ return (x + (x >> 16)) & 0x3F;
+}
+
+/**
+ * Count number of bits set to one in x
+ * @param x value to count bits of
+ * @return the number of bits set to one in x
+ */
+static av_always_inline av_const int av_popcount64_c(uint64_t x)
+{
+ return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32));
+}
+
+#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24))
+#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24))
+
+/**
+ * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_BYTE Expression reading one byte from the input.
+ * Evaluated up to 7 times (4 for the currently
+ * assigned Unicode range). With a memory buffer
+ * input, this could be *ptr++.
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ *
+ * @warning ERROR should not contain a loop control statement which
+ * could interact with the internal while loop, and should force an
+ * exit from the macro code (e.g. through a goto or a return) in order
+ * to prevent undefined results.
+ */
+#define GET_UTF8(val, GET_BYTE, ERROR)\
+ val= GET_BYTE;\
+ {\
+ uint32_t top = (val & 128) >> 1;\
+ if ((val & 0xc0) == 0x80 || val >= 0xFE)\
+ ERROR\
+ while (val & top) {\
+ int tmp= GET_BYTE - 128;\
+ if(tmp>>6)\
+ ERROR\
+ val= (val<<6) + tmp;\
+ top <<= 5;\
+ }\
+ val &= (top << 1) - 1;\
+ }
+
+/**
+ * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form.
+ *
+ * @param val Output value, must be an lvalue of type uint32_t.
+ * @param GET_16BIT Expression returning two bytes of UTF-16 data converted
+ * to native byte order. Evaluated one or two times.
+ * @param ERROR Expression to be evaluated on invalid input,
+ * typically a goto statement.
+ */
+#define GET_UTF16(val, GET_16BIT, ERROR)\
+ val = GET_16BIT;\
+ {\
+ unsigned int hi = val - 0xD800;\
+ if (hi < 0x800) {\
+ val = GET_16BIT - 0xDC00;\
+ if (val > 0x3FFU || hi > 0x3FFU)\
+ ERROR\
+ val += (hi<<10) + 0x10000;\
+ }\
+ }\
+
+/**
+ * @def PUT_UTF8(val, tmp, PUT_BYTE)
+ * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint8_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_BYTE.
+ * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination.
+ * It could be a function or a statement, and uses tmp as the input byte.
+ * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be
+ * executed up to 4 times for values in the valid UTF-8 range and up to
+ * 7 times in the general case, depending on the length of the converted
+ * Unicode character.
+ */
+#define PUT_UTF8(val, tmp, PUT_BYTE)\
+ {\
+ int bytes, shift;\
+ uint32_t in = val;\
+ if (in < 0x80) {\
+ tmp = in;\
+ PUT_BYTE\
+ } else {\
+ bytes = (av_log2(in) + 4) / 5;\
+ shift = (bytes - 1) * 6;\
+ tmp = (256 - (256 >> bytes)) | (in >> shift);\
+ PUT_BYTE\
+ while (shift >= 6) {\
+ shift -= 6;\
+ tmp = 0x80 | ((in >> shift) & 0x3f);\
+ PUT_BYTE\
+ }\
+ }\
+ }
+
+/**
+ * @def PUT_UTF16(val, tmp, PUT_16BIT)
+ * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes).
+ * @param val is an input-only argument and should be of type uint32_t. It holds
+ * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If
+ * val is given as a function it is executed only once.
+ * @param tmp is a temporary variable and should be of type uint16_t. It
+ * represents an intermediate value during conversion that is to be
+ * output by PUT_16BIT.
+ * @param PUT_16BIT writes the converted UTF-16 data to any proper destination
+ * in desired endianness. It could be a function or a statement, and uses tmp
+ * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;"
+ * PUT_BYTE will be executed 1 or 2 times depending on input character.
+ */
+#define PUT_UTF16(val, tmp, PUT_16BIT)\
+ {\
+ uint32_t in = val;\
+ if (in < 0x10000) {\
+ tmp = in;\
+ PUT_16BIT\
+ } else {\
+ tmp = 0xD800 | ((in - 0x10000) >> 10);\
+ PUT_16BIT\
+ tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\
+ PUT_16BIT\
+ }\
+ }\
+
+
+
+#include "mem.h"
+
+#ifdef HAVE_AV_CONFIG_H
+# include "internal.h"
+#endif /* HAVE_AV_CONFIG_H */
+
+#endif /* AVUTIL_COMMON_H */
+
+/*
+ * The following definitions are outside the multiple inclusion guard
+ * to ensure they are immediately available in intmath.h.
+ */
+
+#ifndef av_ceil_log2
+# define av_ceil_log2 av_ceil_log2_c
+#endif
+#ifndef av_clip
+# define av_clip av_clip_c
+#endif
+#ifndef av_clip64
+# define av_clip64 av_clip64_c
+#endif
+#ifndef av_clip_uint8
+# define av_clip_uint8 av_clip_uint8_c
+#endif
+#ifndef av_clip_int8
+# define av_clip_int8 av_clip_int8_c
+#endif
+#ifndef av_clip_uint16
+# define av_clip_uint16 av_clip_uint16_c
+#endif
+#ifndef av_clip_int16
+# define av_clip_int16 av_clip_int16_c
+#endif
+#ifndef av_clipl_int32
+# define av_clipl_int32 av_clipl_int32_c
+#endif
+#ifndef av_clip_uintp2
+# define av_clip_uintp2 av_clip_uintp2_c
+#endif
+#ifndef av_sat_add32
+# define av_sat_add32 av_sat_add32_c
+#endif
+#ifndef av_sat_dadd32
+# define av_sat_dadd32 av_sat_dadd32_c
+#endif
+#ifndef av_clipf
+# define av_clipf av_clipf_c
+#endif
+#ifndef av_clipd
+# define av_clipd av_clipd_c
+#endif
+#ifndef av_popcount
+# define av_popcount av_popcount_c
+#endif
+#ifndef av_popcount64
+# define av_popcount64 av_popcount64_c
+#endif
diff --git a/Externals/ffmpeg/dev/include/libavutil/cpu.h b/Externals/ffmpeg/dev/include/libavutil/cpu.h
new file mode 100644
index 0000000000..277e489788
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/cpu.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (c) 2000, 2001, 2002 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CPU_H
+#define AVUTIL_CPU_H
+
+#include "attributes.h"
+
+#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */
+
+ /* lower 16 bits - CPU features */
+#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX
+#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext
+#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext
+#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW
+#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions
+#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions
+#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster
+ ///< than regular MMX/SSE (e.g. Core1)
+#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt
+#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions
+#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster
+ ///< than regular MMX/SSE (e.g. Core1)
+#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions
+#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower
+#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions
+#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions
+#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used
+#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions
+#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions
+// #if LIBAVUTIL_VERSION_MAJOR <52
+#define AV_CPU_FLAG_CMOV 0x1001000 ///< supports cmov instruction
+// #else
+// #define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction
+// #endif
+#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used
+#define AV_CPU_FLAG_FMA3 0x10000 ///< Haswell FMA3 functions
+#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1
+#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2
+
+#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard
+
+#define AV_CPU_FLAG_ARMV5TE (1 << 0)
+#define AV_CPU_FLAG_ARMV6 (1 << 1)
+#define AV_CPU_FLAG_ARMV6T2 (1 << 2)
+#define AV_CPU_FLAG_VFP (1 << 3)
+#define AV_CPU_FLAG_VFPV3 (1 << 4)
+#define AV_CPU_FLAG_NEON (1 << 5)
+#define AV_CPU_FLAG_ARMV8 (1 << 6)
+#define AV_CPU_FLAG_SETEND (1 <<16)
+
+/**
+ * Return the flags which specify extensions supported by the CPU.
+ * The returned value is affected by av_force_cpu_flags() if that was used
+ * before. So av_get_cpu_flags() can easily be used in a application to
+ * detect the enabled cpu flags.
+ */
+int av_get_cpu_flags(void);
+
+/**
+ * Disables cpu detection and forces the specified flags.
+ * -1 is a special case that disables forcing of specific flags.
+ */
+void av_force_cpu_flags(int flags);
+
+/**
+ * Set a mask on flags returned by av_get_cpu_flags().
+ * This function is mainly useful for testing.
+ * Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible
+ *
+ * @warning this function is not thread safe.
+ */
+attribute_deprecated void av_set_cpu_flags_mask(int mask);
+
+/**
+ * Parse CPU flags from a string.
+ *
+ * The returned flags contain the specified flags as well as related unspecified flags.
+ *
+ * This function exists only for compatibility with libav.
+ * Please use av_parse_cpu_caps() when possible.
+ * @return a combination of AV_CPU_* flags, negative on error.
+ */
+attribute_deprecated
+int av_parse_cpu_flags(const char *s);
+
+/**
+ * Parse CPU caps from a string and update the given AV_CPU_* flags based on that.
+ *
+ * @return negative on error.
+ */
+int av_parse_cpu_caps(unsigned *flags, const char *s);
+
+/**
+ * @return the number of logical CPU cores present.
+ */
+int av_cpu_count(void);
+
+#endif /* AVUTIL_CPU_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/crc.h b/Externals/ffmpeg/dev/include/libavutil/crc.h
new file mode 100644
index 0000000000..e86bf1deba
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/crc.h
@@ -0,0 +1,86 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_CRC_H
+#define AVUTIL_CRC_H
+
+#include
+#include
+#include "attributes.h"
+
+/**
+ * @defgroup lavu_crc32 CRC32
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+typedef uint32_t AVCRC;
+
+typedef enum {
+ AV_CRC_8_ATM,
+ AV_CRC_16_ANSI,
+ AV_CRC_16_CCITT,
+ AV_CRC_32_IEEE,
+ AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */
+ AV_CRC_16_ANSI_LE, /*< reversed bitorder version of AV_CRC_16_ANSI */
+ AV_CRC_24_IEEE = 12,
+ AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */
+}AVCRCId;
+
+/**
+ * Initialize a CRC table.
+ * @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024
+ * @param le If 1, the lowest bit represents the coefficient for the highest
+ * exponent of the corresponding polynomial (both for poly and
+ * actual CRC).
+ * If 0, you must swap the CRC parameter and the result of av_crc
+ * if you need the standard representation (can be simplified in
+ * most cases to e.g. bswap16):
+ * av_bswap32(crc << (32-bits))
+ * @param bits number of bits for the CRC
+ * @param poly generator polynomial without the x**bits coefficient, in the
+ * representation as specified by le
+ * @param ctx_size size of ctx in bytes
+ * @return <0 on failure
+ */
+int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size);
+
+/**
+ * Get an initialized standard CRC table.
+ * @param crc_id ID of a standard CRC
+ * @return a pointer to the CRC table or NULL on failure
+ */
+const AVCRC *av_crc_get_table(AVCRCId crc_id);
+
+/**
+ * Calculate the CRC of a block.
+ * @param crc CRC of previous blocks if any or initial value for CRC
+ * @return CRC updated with the data from the given block
+ *
+ * @see av_crc_init() "le" parameter
+ */
+uint32_t av_crc(const AVCRC *ctx, uint32_t crc,
+ const uint8_t *buffer, size_t length) av_pure;
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_CRC_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/dict.h b/Externals/ffmpeg/dev/include/libavutil/dict.h
new file mode 100644
index 0000000000..f2df687c03
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/dict.h
@@ -0,0 +1,196 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Public dictionary API.
+ * @deprecated
+ * AVDictionary is provided for compatibility with libav. It is both in
+ * implementation as well as API inefficient. It does not scale and is
+ * extremely slow with large dictionaries.
+ * It is recommended that new code uses our tree container from tree.c/h
+ * where applicable, which uses AVL trees to achieve O(log n) performance.
+ */
+
+#ifndef AVUTIL_DICT_H
+#define AVUTIL_DICT_H
+
+#include
+
+#include "version.h"
+
+/**
+ * @addtogroup lavu_dict AVDictionary
+ * @ingroup lavu_data
+ *
+ * @brief Simple key:value store
+ *
+ * @{
+ * Dictionaries are used for storing key:value pairs. To create
+ * an AVDictionary, simply pass an address of a NULL pointer to
+ * av_dict_set(). NULL can be used as an empty dictionary wherever
+ * a pointer to an AVDictionary is required.
+ * Use av_dict_get() to retrieve an entry or iterate over all
+ * entries and finally av_dict_free() to free the dictionary
+ * and all its contents.
+ *
+ @code
+ AVDictionary *d = NULL; // "create" an empty dictionary
+ AVDictionaryEntry *t = NULL;
+
+ av_dict_set(&d, "foo", "bar", 0); // add an entry
+
+ char *k = av_strdup("key"); // if your strings are already allocated,
+ char *v = av_strdup("value"); // you can avoid copying them like this
+ av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
+
+ while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) {
+ <....> // iterate over all entries in d
+ }
+ av_dict_free(&d);
+ @endcode
+ *
+ */
+
+#define AV_DICT_MATCH_CASE 1 /**< Only get an entry with exact-case key match. Only relevant in av_dict_get(). */
+#define AV_DICT_IGNORE_SUFFIX 2 /**< Return first entry in a dictionary whose first part corresponds to the search key,
+ ignoring the suffix of the found key string. Only relevant in av_dict_get(). */
+#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been
+ allocated with av_malloc() or another memory allocation function. */
+#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been
+ allocated with av_malloc() or another memory allocation function. */
+#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries.
+#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no
+ delimiter is added, the strings are simply concatenated. */
+
+typedef struct AVDictionaryEntry {
+ char *key;
+ char *value;
+} AVDictionaryEntry;
+
+typedef struct AVDictionary AVDictionary;
+
+/**
+ * Get a dictionary entry with matching key.
+ *
+ * The returned entry key or value must not be changed, or it will
+ * cause undefined behavior.
+ *
+ * To iterate through all the dictionary entries, you can set the matching key
+ * to the null string "" and set the AV_DICT_IGNORE_SUFFIX flag.
+ *
+ * @param prev Set to the previous matching element to find the next.
+ * If set to NULL the first matching element is returned.
+ * @param key matching key
+ * @param flags a collection of AV_DICT_* flags controlling how the entry is retrieved
+ * @return found entry or NULL in case no matching entry was found in the dictionary
+ */
+AVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key,
+ const AVDictionaryEntry *prev, int flags);
+
+/**
+ * Get number of entries in dictionary.
+ *
+ * @param m dictionary
+ * @return number of entries in dictionary
+ */
+int av_dict_count(const AVDictionary *m);
+
+/**
+ * Set the given entry in *pm, overwriting an existing entry.
+ *
+ * Note: If AV_DICT_DONT_STRDUP_KEY or AV_DICT_DONT_STRDUP_VAL is set,
+ * these arguments will be freed on error.
+ *
+ * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL
+ * a dictionary struct is allocated and put in *pm.
+ * @param key entry key to add to *pm (will be av_strduped depending on flags)
+ * @param value entry value to add to *pm (will be av_strduped depending on flags).
+ * Passing a NULL value will cause an existing entry to be deleted.
+ * @return >= 0 on success otherwise an error code <0
+ */
+int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags);
+
+/**
+ * Convenience wrapper for av_dict_set that converts the value to a string
+ * and stores it.
+ *
+ * Note: If AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error.
+ */
+int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags);
+
+/**
+ * Parse the key/value pairs list and add the parsed entries to a dictionary.
+ *
+ * In case of failure, all the successfully set entries are stored in
+ * *pm. You may need to manually free the created dictionary.
+ *
+ * @param key_val_sep a 0-terminated list of characters used to separate
+ * key from value
+ * @param pairs_sep a 0-terminated list of characters used to separate
+ * two pairs from each other
+ * @param flags flags to use when adding to dictionary.
+ * AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL
+ * are ignored since the key/value tokens will always
+ * be duplicated.
+ * @return 0 on success, negative AVERROR code on failure
+ */
+int av_dict_parse_string(AVDictionary **pm, const char *str,
+ const char *key_val_sep, const char *pairs_sep,
+ int flags);
+
+/**
+ * Copy entries from one AVDictionary struct into another.
+ * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL,
+ * this function will allocate a struct for you and put it in *dst
+ * @param src pointer to source AVDictionary struct
+ * @param flags flags to use when setting entries in *dst
+ * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag
+ */
+void av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags);
+
+/**
+ * Free all the memory allocated for an AVDictionary struct
+ * and all keys and values.
+ */
+void av_dict_free(AVDictionary **m);
+
+/**
+ * Get dictionary entries as a string.
+ *
+ * Create a string containing dictionary's entries.
+ * Such string may be passed back to av_dict_parse_string().
+ * @note String is escaped with backslashes ('\').
+ *
+ * @param[in] m dictionary
+ * @param[out] buffer Pointer to buffer that will be allocated with string containg entries.
+ * Buffer must be freed by the caller when is no longer needed.
+ * @param[in] key_val_sep character used to separate key from value
+ * @param[in] pairs_sep character used to separate two pairs from each other
+ * @return >= 0 on success, negative on error
+ * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same.
+ */
+int av_dict_get_string(const AVDictionary *m, char **buffer,
+ const char key_val_sep, const char pairs_sep);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_DICT_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/display.h b/Externals/ffmpeg/dev/include/libavutil/display.h
new file mode 100644
index 0000000000..2cb930dde1
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/display.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (c) 2014 Vittorio Giovara
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_DISPLAY_H
+#define AVUTIL_DISPLAY_H
+
+#include
+
+/**
+ * The display transformation matrix specifies an affine transformation that
+ * should be applied to video frames for correct presentation. It is compatible
+ * with the matrices stored in the ISO/IEC 14496-12 container format.
+ *
+ * The data is a 3x3 matrix represented as a 9-element array:
+ *
+ * | a b u |
+ * (a, b, u, c, d, v, x, y, w) -> | c d v |
+ * | x y w |
+ *
+ * All numbers are stored in native endianness, as 16.16 fixed-point values,
+ * except for u, v and w, which are stored as 2.30 fixed-point values.
+ *
+ * The transformation maps a point (p, q) in the source (pre-transformation)
+ * frame to the point (p', q') in the destination (post-transformation) frame as
+ * follows:
+ * | a b u |
+ * (p, q, 1) . | c d v | = z * (p', q', 1)
+ * | x y w |
+ *
+ * The transformation can also be more explicitly written in components as
+ * follows:
+ * p' = (a * p + c * q + x) / z;
+ * q' = (b * p + d * q + y) / z;
+ * z = u * p + v * q + w
+ */
+
+/**
+ * Extract the rotation component of the transformation matrix.
+ *
+ * @param matrix the transformation matrix
+ * @return the angle (in degrees) by which the transformation rotates the frame.
+ * The angle will be in range [-180.0, 180.0], or NaN if the matrix is
+ * singular.
+ *
+ * @note floating point numbers are inherently inexact, so callers are
+ * recommended to round the return value to nearest integer before use.
+ */
+double av_display_rotation_get(const int32_t matrix[9]);
+
+/**
+ * Initialize a transformation matrix describing a pure rotation by the
+ * specified angle (in degrees).
+ *
+ * @param matrix an allocated transformation matrix (will be fully overwritten
+ * by this function)
+ * @param angle rotation angle in degrees.
+ */
+void av_display_rotation_set(int32_t matrix[9], double angle);
+
+/**
+ * Flip the input matrix horizontally and/or vertically.
+ *
+ * @param matrix an allocated transformation matrix
+ * @param hflip whether the matrix should be flipped horizontally
+ * @param vflip whether the matrix should be flipped vertically
+ */
+void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip);
+
+#endif /* AVUTIL_DISPLAY_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/downmix_info.h b/Externals/ffmpeg/dev/include/libavutil/downmix_info.h
new file mode 100644
index 0000000000..221cf5bf9b
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/downmix_info.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2014 Tim Walker
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_DOWNMIX_INFO_H
+#define AVUTIL_DOWNMIX_INFO_H
+
+#include "frame.h"
+
+/**
+ * @file
+ * audio downmix medatata
+ */
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ */
+
+/**
+ * @defgroup downmix_info Audio downmix metadata
+ * @{
+ */
+
+/**
+ * Possible downmix types.
+ */
+enum AVDownmixType {
+ AV_DOWNMIX_TYPE_UNKNOWN, /**< Not indicated. */
+ AV_DOWNMIX_TYPE_LORO, /**< Lo/Ro 2-channel downmix (Stereo). */
+ AV_DOWNMIX_TYPE_LTRT, /**< Lt/Rt 2-channel downmix, Dolby Surround compatible. */
+ AV_DOWNMIX_TYPE_DPLII, /**< Lt/Rt 2-channel downmix, Dolby Pro Logic II compatible. */
+ AV_DOWNMIX_TYPE_NB /**< Number of downmix types. Not part of ABI. */
+};
+
+/**
+ * This structure describes optional metadata relevant to a downmix procedure.
+ *
+ * All fields are set by the decoder to the value indicated in the audio
+ * bitstream (if present), or to a "sane" default otherwise.
+ */
+typedef struct AVDownmixInfo {
+ /**
+ * Type of downmix preferred by the mastering engineer.
+ */
+ enum AVDownmixType preferred_downmix_type;
+
+ /**
+ * Absolute scale factor representing the nominal level of the center
+ * channel during a regular downmix.
+ */
+ double center_mix_level;
+
+ /**
+ * Absolute scale factor representing the nominal level of the center
+ * channel during an Lt/Rt compatible downmix.
+ */
+ double center_mix_level_ltrt;
+
+ /**
+ * Absolute scale factor representing the nominal level of the surround
+ * channels during a regular downmix.
+ */
+ double surround_mix_level;
+
+ /**
+ * Absolute scale factor representing the nominal level of the surround
+ * channels during an Lt/Rt compatible downmix.
+ */
+ double surround_mix_level_ltrt;
+
+ /**
+ * Absolute scale factor representing the level at which the LFE data is
+ * mixed into L/R channels during downmixing.
+ */
+ double lfe_mix_level;
+} AVDownmixInfo;
+
+/**
+ * Get a frame's AV_FRAME_DATA_DOWNMIX_INFO side data for editing.
+ *
+ * If the side data is absent, it is created and added to the frame.
+ *
+ * @param frame the frame for which the side data is to be obtained or created
+ *
+ * @return the AVDownmixInfo structure to be edited by the caller, or NULL if
+ * the structure cannot be allocated.
+ */
+AVDownmixInfo *av_downmix_info_update_side_data(AVFrame *frame);
+
+/**
+ * @}
+ */
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_DOWNMIX_INFO_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/error.h b/Externals/ffmpeg/dev/include/libavutil/error.h
new file mode 100644
index 0000000000..71df4da353
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/error.h
@@ -0,0 +1,126 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * error code definitions
+ */
+
+#ifndef AVUTIL_ERROR_H
+#define AVUTIL_ERROR_H
+
+#include
+#include
+
+/**
+ * @addtogroup lavu_error
+ *
+ * @{
+ */
+
+
+/* error handling */
+#if EDOM > 0
+#define AVERROR(e) (-(e)) ///< Returns a negative error code from a POSIX error code, to return from library functions.
+#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value.
+#else
+/* Some platforms have E* and errno already negated. */
+#define AVERROR(e) (e)
+#define AVUNERROR(e) (e)
+#endif
+
+#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d))
+
+#define AVERROR_BSF_NOT_FOUND FFERRTAG(0xF8,'B','S','F') ///< Bitstream filter not found
+#define AVERROR_BUG FFERRTAG( 'B','U','G','!') ///< Internal bug, also see AVERROR_BUG2
+#define AVERROR_BUFFER_TOO_SMALL FFERRTAG( 'B','U','F','S') ///< Buffer too small
+#define AVERROR_DECODER_NOT_FOUND FFERRTAG(0xF8,'D','E','C') ///< Decoder not found
+#define AVERROR_DEMUXER_NOT_FOUND FFERRTAG(0xF8,'D','E','M') ///< Demuxer not found
+#define AVERROR_ENCODER_NOT_FOUND FFERRTAG(0xF8,'E','N','C') ///< Encoder not found
+#define AVERROR_EOF FFERRTAG( 'E','O','F',' ') ///< End of file
+#define AVERROR_EXIT FFERRTAG( 'E','X','I','T') ///< Immediate exit was requested; the called function should not be restarted
+#define AVERROR_EXTERNAL FFERRTAG( 'E','X','T',' ') ///< Generic error in an external library
+#define AVERROR_FILTER_NOT_FOUND FFERRTAG(0xF8,'F','I','L') ///< Filter not found
+#define AVERROR_INVALIDDATA FFERRTAG( 'I','N','D','A') ///< Invalid data found when processing input
+#define AVERROR_MUXER_NOT_FOUND FFERRTAG(0xF8,'M','U','X') ///< Muxer not found
+#define AVERROR_OPTION_NOT_FOUND FFERRTAG(0xF8,'O','P','T') ///< Option not found
+#define AVERROR_PATCHWELCOME FFERRTAG( 'P','A','W','E') ///< Not yet implemented in FFmpeg, patches welcome
+#define AVERROR_PROTOCOL_NOT_FOUND FFERRTAG(0xF8,'P','R','O') ///< Protocol not found
+
+#define AVERROR_STREAM_NOT_FOUND FFERRTAG(0xF8,'S','T','R') ///< Stream not found
+/**
+ * This is semantically identical to AVERROR_BUG
+ * it has been introduced in Libav after our AVERROR_BUG and with a modified value.
+ */
+#define AVERROR_BUG2 FFERRTAG( 'B','U','G',' ')
+#define AVERROR_UNKNOWN FFERRTAG( 'U','N','K','N') ///< Unknown error, typically from an external library
+#define AVERROR_EXPERIMENTAL (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it.
+#define AVERROR_INPUT_CHANGED (-0x636e6701) ///< Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED)
+#define AVERROR_OUTPUT_CHANGED (-0x636e6702) ///< Output changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_INPUT_CHANGED)
+/* HTTP & RTSP errors */
+#define AVERROR_HTTP_BAD_REQUEST FFERRTAG(0xF8,'4','0','0')
+#define AVERROR_HTTP_UNAUTHORIZED FFERRTAG(0xF8,'4','0','1')
+#define AVERROR_HTTP_FORBIDDEN FFERRTAG(0xF8,'4','0','3')
+#define AVERROR_HTTP_NOT_FOUND FFERRTAG(0xF8,'4','0','4')
+#define AVERROR_HTTP_OTHER_4XX FFERRTAG(0xF8,'4','X','X')
+#define AVERROR_HTTP_SERVER_ERROR FFERRTAG(0xF8,'5','X','X')
+
+#define AV_ERROR_MAX_STRING_SIZE 64
+
+/**
+ * Put a description of the AVERROR code errnum in errbuf.
+ * In case of failure the global variable errno is set to indicate the
+ * error. Even in case of failure av_strerror() will print a generic
+ * error message indicating the errnum provided to errbuf.
+ *
+ * @param errnum error code to describe
+ * @param errbuf buffer to which description is written
+ * @param errbuf_size the size in bytes of errbuf
+ * @return 0 on success, a negative value if a description for errnum
+ * cannot be found
+ */
+int av_strerror(int errnum, char *errbuf, size_t errbuf_size);
+
+/**
+ * Fill the provided buffer with a string containing an error string
+ * corresponding to the AVERROR code errnum.
+ *
+ * @param errbuf a buffer
+ * @param errbuf_size size in bytes of errbuf
+ * @param errnum error code to describe
+ * @return the buffer in input, filled with the error description
+ * @see av_strerror()
+ */
+static inline char *av_make_error_string(char *errbuf, size_t errbuf_size, int errnum)
+{
+ av_strerror(errnum, errbuf, errbuf_size);
+ return errbuf;
+}
+
+/**
+ * Convenience macro, the return value should be used only directly in
+ * function arguments but never stand-alone.
+ */
+#define av_err2str(errnum) \
+ av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum)
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_ERROR_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/eval.h b/Externals/ffmpeg/dev/include/libavutil/eval.h
new file mode 100644
index 0000000000..6159b0fe58
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/eval.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (c) 2002 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * simple arithmetic expression evaluator
+ */
+
+#ifndef AVUTIL_EVAL_H
+#define AVUTIL_EVAL_H
+
+#include "avutil.h"
+
+typedef struct AVExpr AVExpr;
+
+/**
+ * Parse and evaluate an expression.
+ * Note, this is significantly slower than av_expr_eval().
+ *
+ * @param res a pointer to a double where is put the result value of
+ * the expression, or NAN in case of error
+ * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)"
+ * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0}
+ * @param const_values a zero terminated array of values for the identifiers from const_names
+ * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers
+ * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
+ * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
+ * @param log_ctx parent logging context
+ * @return >= 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_expr_parse_and_eval(double *res, const char *s,
+ const char * const *const_names, const double *const_values,
+ const char * const *func1_names, double (* const *funcs1)(void *, double),
+ const char * const *func2_names, double (* const *funcs2)(void *, double, double),
+ void *opaque, int log_offset, void *log_ctx);
+
+/**
+ * Parse an expression.
+ *
+ * @param expr a pointer where is put an AVExpr containing the parsed
+ * value in case of successful parsing, or NULL otherwise.
+ * The pointed to AVExpr must be freed with av_expr_free() by the user
+ * when it is not needed anymore.
+ * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)"
+ * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0}
+ * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers
+ * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument
+ * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers
+ * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments
+ * @param log_ctx parent logging context
+ * @return >= 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_expr_parse(AVExpr **expr, const char *s,
+ const char * const *const_names,
+ const char * const *func1_names, double (* const *funcs1)(void *, double),
+ const char * const *func2_names, double (* const *funcs2)(void *, double, double),
+ int log_offset, void *log_ctx);
+
+/**
+ * Evaluate a previously parsed expression.
+ *
+ * @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names
+ * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2
+ * @return the value of the expression
+ */
+double av_expr_eval(AVExpr *e, const double *const_values, void *opaque);
+
+/**
+ * Free a parsed expression previously created with av_expr_parse().
+ */
+void av_expr_free(AVExpr *e);
+
+/**
+ * Parse the string in numstr and return its value as a double. If
+ * the string is empty, contains only whitespaces, or does not contain
+ * an initial substring that has the expected syntax for a
+ * floating-point number, no conversion is performed. In this case,
+ * returns a value of zero and the value returned in tail is the value
+ * of numstr.
+ *
+ * @param numstr a string representing a number, may contain one of
+ * the International System number postfixes, for example 'K', 'M',
+ * 'G'. If 'i' is appended after the postfix, powers of 2 are used
+ * instead of powers of 10. The 'B' postfix multiplies the value for
+ * 8, and can be appended after another postfix or used alone. This
+ * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix.
+ * @param tail if non-NULL puts here the pointer to the char next
+ * after the last parsed character
+ */
+double av_strtod(const char *numstr, char **tail);
+
+#endif /* AVUTIL_EVAL_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/ffversion.h b/Externals/ffmpeg/dev/include/libavutil/ffversion.h
new file mode 100644
index 0000000000..15f077f414
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/ffversion.h
@@ -0,0 +1,4 @@
+#ifndef AVUTIL_FFVERSION_H
+#define AVUTIL_FFVERSION_H
+#define FFMPEG_VERSION "N-69060-gcd960c8"
+#endif /* AVUTIL_FFVERSION_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/fifo.h b/Externals/ffmpeg/dev/include/libavutil/fifo.h
new file mode 100644
index 0000000000..f3bdcbceb4
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/fifo.h
@@ -0,0 +1,158 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * a very simple circular buffer FIFO implementation
+ */
+
+#ifndef AVUTIL_FIFO_H
+#define AVUTIL_FIFO_H
+
+#include
+#include "avutil.h"
+#include "attributes.h"
+
+typedef struct AVFifoBuffer {
+ uint8_t *buffer;
+ uint8_t *rptr, *wptr, *end;
+ uint32_t rndx, wndx;
+} AVFifoBuffer;
+
+/**
+ * Initialize an AVFifoBuffer.
+ * @param size of FIFO
+ * @return AVFifoBuffer or NULL in case of memory allocation failure
+ */
+AVFifoBuffer *av_fifo_alloc(unsigned int size);
+
+/**
+ * Initialize an AVFifoBuffer.
+ * @param nmemb number of elements
+ * @param size size of the single element
+ * @return AVFifoBuffer or NULL in case of memory allocation failure
+ */
+AVFifoBuffer *av_fifo_alloc_array(size_t nmemb, size_t size);
+
+/**
+ * Free an AVFifoBuffer.
+ * @param f AVFifoBuffer to free
+ */
+void av_fifo_free(AVFifoBuffer *f);
+
+/**
+ * Free an AVFifoBuffer and reset pointer to NULL.
+ * @param f AVFifoBuffer to free
+ */
+void av_fifo_freep(AVFifoBuffer **f);
+
+/**
+ * Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied.
+ * @param f AVFifoBuffer to reset
+ */
+void av_fifo_reset(AVFifoBuffer *f);
+
+/**
+ * Return the amount of data in bytes in the AVFifoBuffer, that is the
+ * amount of data you can read from it.
+ * @param f AVFifoBuffer to read from
+ * @return size
+ */
+int av_fifo_size(const AVFifoBuffer *f);
+
+/**
+ * Return the amount of space in bytes in the AVFifoBuffer, that is the
+ * amount of data you can write into it.
+ * @param f AVFifoBuffer to write into
+ * @return size
+ */
+int av_fifo_space(const AVFifoBuffer *f);
+
+/**
+ * Feed data from an AVFifoBuffer to a user-supplied callback.
+ * @param f AVFifoBuffer to read from
+ * @param buf_size number of bytes to read
+ * @param func generic read function
+ * @param dest data destination
+ */
+int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int));
+
+/**
+ * Feed data from a user-supplied callback to an AVFifoBuffer.
+ * @param f AVFifoBuffer to write to
+ * @param src data source; non-const since it may be used as a
+ * modifiable context by the function defined in func
+ * @param size number of bytes to write
+ * @param func generic write function; the first parameter is src,
+ * the second is dest_buf, the third is dest_buf_size.
+ * func must return the number of bytes written to dest_buf, or <= 0 to
+ * indicate no more data available to write.
+ * If func is NULL, src is interpreted as a simple byte array for source data.
+ * @return the number of bytes written to the FIFO
+ */
+int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int));
+
+/**
+ * Resize an AVFifoBuffer.
+ * In case of reallocation failure, the old FIFO is kept unchanged.
+ *
+ * @param f AVFifoBuffer to resize
+ * @param size new AVFifoBuffer size in bytes
+ * @return <0 for failure, >=0 otherwise
+ */
+int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size);
+
+/**
+ * Enlarge an AVFifoBuffer.
+ * In case of reallocation failure, the old FIFO is kept unchanged.
+ * The new fifo size may be larger than the requested size.
+ *
+ * @param f AVFifoBuffer to resize
+ * @param additional_space the amount of space in bytes to allocate in addition to av_fifo_size()
+ * @return <0 for failure, >=0 otherwise
+ */
+int av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space);
+
+/**
+ * Read and discard the specified amount of data from an AVFifoBuffer.
+ * @param f AVFifoBuffer to read from
+ * @param size amount of data to read in bytes
+ */
+void av_fifo_drain(AVFifoBuffer *f, int size);
+
+/**
+ * Return a pointer to the data stored in a FIFO buffer at a certain offset.
+ * The FIFO buffer is not modified.
+ *
+ * @param f AVFifoBuffer to peek at, f must be non-NULL
+ * @param offs an offset in bytes, its absolute value must be less
+ * than the used buffer size or the returned pointer will
+ * point outside to the buffer data.
+ * The used buffer size can be checked with av_fifo_size().
+ */
+static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs)
+{
+ uint8_t *ptr = f->rptr + offs;
+ if (ptr >= f->end)
+ ptr = f->buffer + (ptr - f->end);
+ else if (ptr < f->buffer)
+ ptr = f->end - (f->buffer - ptr);
+ return ptr;
+}
+
+#endif /* AVUTIL_FIFO_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/file.h b/Externals/ffmpeg/dev/include/libavutil/file.h
new file mode 100644
index 0000000000..1cae2951be
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/file.h
@@ -0,0 +1,67 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_FILE_H
+#define AVUTIL_FILE_H
+
+#include
+
+#include "avutil.h"
+
+/**
+ * @file
+ * Misc file utilities.
+ */
+
+/**
+ * Read the file with name filename, and put its content in a newly
+ * allocated buffer or map it with mmap() when available.
+ * In case of success set *bufptr to the read or mmapped buffer, and
+ * *size to the size in bytes of the buffer in *bufptr.
+ * The returned buffer must be released with av_file_unmap().
+ *
+ * @param log_offset loglevel offset used for logging
+ * @param log_ctx context used for logging
+ * @return a non negative number in case of success, a negative value
+ * corresponding to an AVERROR error code in case of failure
+ */
+int av_file_map(const char *filename, uint8_t **bufptr, size_t *size,
+ int log_offset, void *log_ctx);
+
+/**
+ * Unmap or free the buffer bufptr created by av_file_map().
+ *
+ * @param size size in bytes of bufptr, must be the same as returned
+ * by av_file_map()
+ */
+void av_file_unmap(uint8_t *bufptr, size_t size);
+
+/**
+ * Wrapper to work around the lack of mkstemp() on mingw.
+ * Also, tries to create file in /tmp first, if possible.
+ * *prefix can be a character constant; *filename will be allocated internally.
+ * @return file descriptor of opened file (or negative value corresponding to an
+ * AVERROR code on error)
+ * and opened file name in **filename.
+ * @note On very old libcs it is necessary to set a secure umask before
+ * calling this, av_tempfile() can't call umask itself as it is used in
+ * libraries and could interfere with the calling application.
+ */
+int av_tempfile(const char *prefix, char **filename, int log_offset, void *log_ctx);
+
+#endif /* AVUTIL_FILE_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/frame.h b/Externals/ffmpeg/dev/include/libavutil/frame.h
new file mode 100644
index 0000000000..d335bee831
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/frame.h
@@ -0,0 +1,771 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu_frame
+ * reference-counted frame API
+ */
+
+#ifndef AVUTIL_FRAME_H
+#define AVUTIL_FRAME_H
+
+#include
+
+#include "avutil.h"
+#include "buffer.h"
+#include "dict.h"
+#include "rational.h"
+#include "samplefmt.h"
+#include "pixfmt.h"
+#include "version.h"
+
+
+/**
+ * @defgroup lavu_frame AVFrame
+ * @ingroup lavu_data
+ *
+ * @{
+ * AVFrame is an abstraction for reference-counted raw multimedia data.
+ */
+
+enum AVFrameSideDataType {
+ /**
+ * The data is the AVPanScan struct defined in libavcodec.
+ */
+ AV_FRAME_DATA_PANSCAN,
+ /**
+ * ATSC A53 Part 4 Closed Captions.
+ * A53 CC bitstream is stored as uint8_t in AVFrameSideData.data.
+ * The number of bytes of CC data is AVFrameSideData.size.
+ */
+ AV_FRAME_DATA_A53_CC,
+ /**
+ * Stereoscopic 3d metadata.
+ * The data is the AVStereo3D struct defined in libavutil/stereo3d.h.
+ */
+ AV_FRAME_DATA_STEREO3D,
+ /**
+ * The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h.
+ */
+ AV_FRAME_DATA_MATRIXENCODING,
+ /**
+ * Metadata relevant to a downmix procedure.
+ * The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h.
+ */
+ AV_FRAME_DATA_DOWNMIX_INFO,
+ /**
+ * ReplayGain information in the form of the AVReplayGain struct.
+ */
+ AV_FRAME_DATA_REPLAYGAIN,
+ /**
+ * This side data contains a 3x3 transformation matrix describing an affine
+ * transformation that needs to be applied to the frame for correct
+ * presentation.
+ *
+ * See libavutil/display.h for a detailed description of the data.
+ */
+ AV_FRAME_DATA_DISPLAYMATRIX,
+ /**
+ * Active Format Description data consisting of a single byte as specified
+ * in ETSI TS 101 154 using AVActiveFormatDescription enum.
+ */
+ AV_FRAME_DATA_AFD,
+ /**
+ * Motion vectors exported by some codecs (on demand through the export_mvs
+ * flag set in the libavcodec AVCodecContext flags2 option).
+ * The data is the AVMotionVector struct defined in
+ * libavutil/motion_vector.h.
+ */
+ AV_FRAME_DATA_MOTION_VECTORS,
+ /**
+ * Recommmends skipping the specified number of samples. This is exported
+ * only if the "skip_manual" AVOption is set in libavcodec.
+ * This has the same format as AV_PKT_DATA_SKIP_SAMPLES.
+ * @code
+ * u32le number of samples to skip from start of this packet
+ * u32le number of samples to skip from end of this packet
+ * u8 reason for start skip
+ * u8 reason for end skip (0=padding silence, 1=convergence)
+ * @endcode
+ */
+ AV_FRAME_DATA_SKIP_SAMPLES,
+};
+
+enum AVActiveFormatDescription {
+ AV_AFD_SAME = 8,
+ AV_AFD_4_3 = 9,
+ AV_AFD_16_9 = 10,
+ AV_AFD_14_9 = 11,
+ AV_AFD_4_3_SP_14_9 = 13,
+ AV_AFD_16_9_SP_14_9 = 14,
+ AV_AFD_SP_4_3 = 15,
+};
+
+typedef struct AVFrameSideData {
+ enum AVFrameSideDataType type;
+ uint8_t *data;
+ int size;
+ AVDictionary *metadata;
+} AVFrameSideData;
+
+/**
+ * This structure describes decoded (raw) audio or video data.
+ *
+ * AVFrame must be allocated using av_frame_alloc(). Note that this only
+ * allocates the AVFrame itself, the buffers for the data must be managed
+ * through other means (see below).
+ * AVFrame must be freed with av_frame_free().
+ *
+ * AVFrame is typically allocated once and then reused multiple times to hold
+ * different data (e.g. a single AVFrame to hold frames received from a
+ * decoder). In such a case, av_frame_unref() will free any references held by
+ * the frame and reset it to its original clean state before it
+ * is reused again.
+ *
+ * The data described by an AVFrame is usually reference counted through the
+ * AVBuffer API. The underlying buffer references are stored in AVFrame.buf /
+ * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at
+ * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case,
+ * every single data plane must be contained in one of the buffers in
+ * AVFrame.buf or AVFrame.extended_buf.
+ * There may be a single buffer for all the data, or one separate buffer for
+ * each plane, or anything in between.
+ *
+ * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added
+ * to the end with a minor bump.
+ * Similarly fields that are marked as to be only accessed by
+ * av_opt_ptr() can be reordered. This allows 2 forks to add fields
+ * without breaking compatibility with each other.
+ */
+typedef struct AVFrame {
+#define AV_NUM_DATA_POINTERS 8
+ /**
+ * pointer to the picture/channel planes.
+ * This might be different from the first allocated byte
+ *
+ * Some decoders access areas outside 0,0 - width,height, please
+ * see avcodec_align_dimensions2(). Some filters and swscale can read
+ * up to 16 bytes beyond the planes, if these filters are to be used,
+ * then 16 extra bytes must be allocated.
+ */
+ uint8_t *data[AV_NUM_DATA_POINTERS];
+
+ /**
+ * For video, size in bytes of each picture line.
+ * For audio, size in bytes of each plane.
+ *
+ * For audio, only linesize[0] may be set. For planar audio, each channel
+ * plane must be the same size.
+ *
+ * For video the linesizes should be multiples of the CPUs alignment
+ * preference, this is 16 or 32 for modern desktop CPUs.
+ * Some code requires such alignment other code can be slower without
+ * correct alignment, for yet other it makes no difference.
+ *
+ * @note The linesize may be larger than the size of usable data -- there
+ * may be extra padding present for performance reasons.
+ */
+ int linesize[AV_NUM_DATA_POINTERS];
+
+ /**
+ * pointers to the data planes/channels.
+ *
+ * For video, this should simply point to data[].
+ *
+ * For planar audio, each channel has a separate data pointer, and
+ * linesize[0] contains the size of each channel buffer.
+ * For packed audio, there is just one data pointer, and linesize[0]
+ * contains the total size of the buffer for all channels.
+ *
+ * Note: Both data and extended_data should always be set in a valid frame,
+ * but for planar audio with more channels that can fit in data,
+ * extended_data must be used in order to access all channels.
+ */
+ uint8_t **extended_data;
+
+ /**
+ * width and height of the video frame
+ */
+ int width, height;
+
+ /**
+ * number of audio samples (per channel) described by this frame
+ */
+ int nb_samples;
+
+ /**
+ * format of the frame, -1 if unknown or unset
+ * Values correspond to enum AVPixelFormat for video frames,
+ * enum AVSampleFormat for audio)
+ */
+ int format;
+
+ /**
+ * 1 -> keyframe, 0-> not
+ */
+ int key_frame;
+
+ /**
+ * Picture type of the frame.
+ */
+ enum AVPictureType pict_type;
+
+#if FF_API_AVFRAME_LAVC
+ attribute_deprecated
+ uint8_t *base[AV_NUM_DATA_POINTERS];
+#endif
+
+ /**
+ * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified.
+ */
+ AVRational sample_aspect_ratio;
+
+ /**
+ * Presentation timestamp in time_base units (time when frame should be shown to user).
+ */
+ int64_t pts;
+
+ /**
+ * PTS copied from the AVPacket that was decoded to produce this frame.
+ */
+ int64_t pkt_pts;
+
+ /**
+ * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used)
+ * This is also the Presentation time of this AVFrame calculated from
+ * only AVPacket.dts values without pts values.
+ */
+ int64_t pkt_dts;
+
+ /**
+ * picture number in bitstream order
+ */
+ int coded_picture_number;
+ /**
+ * picture number in display order
+ */
+ int display_picture_number;
+
+ /**
+ * quality (between 1 (good) and FF_LAMBDA_MAX (bad))
+ */
+ int quality;
+
+#if FF_API_AVFRAME_LAVC
+ attribute_deprecated
+ int reference;
+
+ /**
+ * QP table
+ */
+ attribute_deprecated
+ int8_t *qscale_table;
+ /**
+ * QP store stride
+ */
+ attribute_deprecated
+ int qstride;
+
+ attribute_deprecated
+ int qscale_type;
+
+ /**
+ * mbskip_table[mb]>=1 if MB didn't change
+ * stride= mb_width = (width+15)>>4
+ */
+ attribute_deprecated
+ uint8_t *mbskip_table;
+
+ /**
+ * motion vector table
+ * @code
+ * example:
+ * int mv_sample_log2= 4 - motion_subsample_log2;
+ * int mb_width= (width+15)>>4;
+ * int mv_stride= (mb_width << mv_sample_log2) + 1;
+ * motion_val[direction][x + y*mv_stride][0->mv_x, 1->mv_y];
+ * @endcode
+ */
+ int16_t (*motion_val[2])[2];
+
+ /**
+ * macroblock type table
+ * mb_type_base + mb_width + 2
+ */
+ attribute_deprecated
+ uint32_t *mb_type;
+
+ /**
+ * DCT coefficients
+ */
+ attribute_deprecated
+ short *dct_coeff;
+
+ /**
+ * motion reference frame index
+ * the order in which these are stored can depend on the codec.
+ */
+ attribute_deprecated
+ int8_t *ref_index[2];
+#endif
+
+ /**
+ * for some private data of the user
+ */
+ void *opaque;
+
+ /**
+ * error
+ */
+ uint64_t error[AV_NUM_DATA_POINTERS];
+
+#if FF_API_AVFRAME_LAVC
+ attribute_deprecated
+ int type;
+#endif
+
+ /**
+ * When decoding, this signals how much the picture must be delayed.
+ * extra_delay = repeat_pict / (2*fps)
+ */
+ int repeat_pict;
+
+ /**
+ * The content of the picture is interlaced.
+ */
+ int interlaced_frame;
+
+ /**
+ * If the content is interlaced, is top field displayed first.
+ */
+ int top_field_first;
+
+ /**
+ * Tell user application that palette has changed from previous frame.
+ */
+ int palette_has_changed;
+
+#if FF_API_AVFRAME_LAVC
+ attribute_deprecated
+ int buffer_hints;
+
+ /**
+ * Pan scan.
+ */
+ attribute_deprecated
+ struct AVPanScan *pan_scan;
+#endif
+
+ /**
+ * reordered opaque 64bit (generally an integer or a double precision float
+ * PTS but can be anything).
+ * The user sets AVCodecContext.reordered_opaque to represent the input at
+ * that time,
+ * the decoder reorders values as needed and sets AVFrame.reordered_opaque
+ * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque
+ * @deprecated in favor of pkt_pts
+ */
+ int64_t reordered_opaque;
+
+#if FF_API_AVFRAME_LAVC
+ /**
+ * @deprecated this field is unused
+ */
+ attribute_deprecated void *hwaccel_picture_private;
+
+ attribute_deprecated
+ struct AVCodecContext *owner;
+ attribute_deprecated
+ void *thread_opaque;
+
+ /**
+ * log2 of the size of the block which a single vector in motion_val represents:
+ * (4->16x16, 3->8x8, 2-> 4x4, 1-> 2x2)
+ */
+ uint8_t motion_subsample_log2;
+#endif
+
+ /**
+ * Sample rate of the audio data.
+ */
+ int sample_rate;
+
+ /**
+ * Channel layout of the audio data.
+ */
+ uint64_t channel_layout;
+
+ /**
+ * AVBuffer references backing the data for this frame. If all elements of
+ * this array are NULL, then this frame is not reference counted.
+ *
+ * There may be at most one AVBuffer per data plane, so for video this array
+ * always contains all the references. For planar audio with more than
+ * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in
+ * this array. Then the extra AVBufferRef pointers are stored in the
+ * extended_buf array.
+ */
+ AVBufferRef *buf[AV_NUM_DATA_POINTERS];
+
+ /**
+ * For planar audio which requires more than AV_NUM_DATA_POINTERS
+ * AVBufferRef pointers, this array will hold all the references which
+ * cannot fit into AVFrame.buf.
+ *
+ * Note that this is different from AVFrame.extended_data, which always
+ * contains all the pointers. This array only contains the extra pointers,
+ * which cannot fit into AVFrame.buf.
+ *
+ * This array is always allocated using av_malloc() by whoever constructs
+ * the frame. It is freed in av_frame_unref().
+ */
+ AVBufferRef **extended_buf;
+ /**
+ * Number of elements in extended_buf.
+ */
+ int nb_extended_buf;
+
+ AVFrameSideData **side_data;
+ int nb_side_data;
+
+/**
+ * @defgroup lavu_frame_flags AV_FRAME_FLAGS
+ * Flags describing additional frame properties.
+ *
+ * @{
+ */
+
+/**
+ * The frame data may be corrupted, e.g. due to decoding errors.
+ */
+#define AV_FRAME_FLAG_CORRUPT (1 << 0)
+/**
+ * @}
+ */
+
+ /**
+ * Frame flags, a combination of @ref lavu_frame_flags
+ */
+ int flags;
+
+ /**
+ * MPEG vs JPEG YUV range.
+ * It must be accessed using av_frame_get_color_range() and
+ * av_frame_set_color_range().
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorRange color_range;
+
+ enum AVColorPrimaries color_primaries;
+
+ enum AVColorTransferCharacteristic color_trc;
+
+ /**
+ * YUV colorspace type.
+ * It must be accessed using av_frame_get_colorspace() and
+ * av_frame_set_colorspace().
+ * - encoding: Set by user
+ * - decoding: Set by libavcodec
+ */
+ enum AVColorSpace colorspace;
+
+ enum AVChromaLocation chroma_location;
+
+ /**
+ * frame timestamp estimated using various heuristics, in stream time base
+ * Code outside libavcodec should access this field using:
+ * av_frame_get_best_effort_timestamp(frame)
+ * - encoding: unused
+ * - decoding: set by libavcodec, read by user.
+ */
+ int64_t best_effort_timestamp;
+
+ /**
+ * reordered pos from the last AVPacket that has been input into the decoder
+ * Code outside libavcodec should access this field using:
+ * av_frame_get_pkt_pos(frame)
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int64_t pkt_pos;
+
+ /**
+ * duration of the corresponding packet, expressed in
+ * AVStream->time_base units, 0 if unknown.
+ * Code outside libavcodec should access this field using:
+ * av_frame_get_pkt_duration(frame)
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int64_t pkt_duration;
+
+ /**
+ * metadata.
+ * Code outside libavcodec should access this field using:
+ * av_frame_get_metadata(frame)
+ * - encoding: Set by user.
+ * - decoding: Set by libavcodec.
+ */
+ AVDictionary *metadata;
+
+ /**
+ * decode error flags of the frame, set to a combination of
+ * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there
+ * were errors during the decoding.
+ * Code outside libavcodec should access this field using:
+ * av_frame_get_decode_error_flags(frame)
+ * - encoding: unused
+ * - decoding: set by libavcodec, read by user.
+ */
+ int decode_error_flags;
+#define FF_DECODE_ERROR_INVALID_BITSTREAM 1
+#define FF_DECODE_ERROR_MISSING_REFERENCE 2
+
+ /**
+ * number of audio channels, only used for audio.
+ * Code outside libavcodec should access this field using:
+ * av_frame_get_channels(frame)
+ * - encoding: unused
+ * - decoding: Read by user.
+ */
+ int channels;
+
+ /**
+ * size of the corresponding packet containing the compressed
+ * frame. It must be accessed using av_frame_get_pkt_size() and
+ * av_frame_set_pkt_size().
+ * It is set to a negative value if unknown.
+ * - encoding: unused
+ * - decoding: set by libavcodec, read by user.
+ */
+ int pkt_size;
+
+ /**
+ * Not to be accessed directly from outside libavutil
+ */
+ AVBufferRef *qp_table_buf;
+} AVFrame;
+
+/**
+ * Accessors for some AVFrame fields.
+ * The position of these field in the structure is not part of the ABI,
+ * they should not be accessed directly outside libavcodec.
+ */
+int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame);
+void av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val);
+int64_t av_frame_get_pkt_duration (const AVFrame *frame);
+void av_frame_set_pkt_duration (AVFrame *frame, int64_t val);
+int64_t av_frame_get_pkt_pos (const AVFrame *frame);
+void av_frame_set_pkt_pos (AVFrame *frame, int64_t val);
+int64_t av_frame_get_channel_layout (const AVFrame *frame);
+void av_frame_set_channel_layout (AVFrame *frame, int64_t val);
+int av_frame_get_channels (const AVFrame *frame);
+void av_frame_set_channels (AVFrame *frame, int val);
+int av_frame_get_sample_rate (const AVFrame *frame);
+void av_frame_set_sample_rate (AVFrame *frame, int val);
+AVDictionary *av_frame_get_metadata (const AVFrame *frame);
+void av_frame_set_metadata (AVFrame *frame, AVDictionary *val);
+int av_frame_get_decode_error_flags (const AVFrame *frame);
+void av_frame_set_decode_error_flags (AVFrame *frame, int val);
+int av_frame_get_pkt_size(const AVFrame *frame);
+void av_frame_set_pkt_size(AVFrame *frame, int val);
+AVDictionary **avpriv_frame_get_metadatap(AVFrame *frame);
+int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type);
+int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type);
+enum AVColorSpace av_frame_get_colorspace(const AVFrame *frame);
+void av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val);
+enum AVColorRange av_frame_get_color_range(const AVFrame *frame);
+void av_frame_set_color_range(AVFrame *frame, enum AVColorRange val);
+
+/**
+ * Get the name of a colorspace.
+ * @return a static string identifying the colorspace; can be NULL.
+ */
+const char *av_get_colorspace_name(enum AVColorSpace val);
+
+/**
+ * Allocate an AVFrame and set its fields to default values. The resulting
+ * struct must be freed using av_frame_free().
+ *
+ * @return An AVFrame filled with default values or NULL on failure.
+ *
+ * @note this only allocates the AVFrame itself, not the data buffers. Those
+ * must be allocated through other means, e.g. with av_frame_get_buffer() or
+ * manually.
+ */
+AVFrame *av_frame_alloc(void);
+
+/**
+ * Free the frame and any dynamically allocated objects in it,
+ * e.g. extended_data. If the frame is reference counted, it will be
+ * unreferenced first.
+ *
+ * @param frame frame to be freed. The pointer will be set to NULL.
+ */
+void av_frame_free(AVFrame **frame);
+
+/**
+ * Set up a new reference to the data described by the source frame.
+ *
+ * Copy frame properties from src to dst and create a new reference for each
+ * AVBufferRef from src.
+ *
+ * If src is not reference counted, new buffers are allocated and the data is
+ * copied.
+ *
+ * @return 0 on success, a negative AVERROR on error
+ */
+int av_frame_ref(AVFrame *dst, const AVFrame *src);
+
+/**
+ * Create a new frame that references the same data as src.
+ *
+ * This is a shortcut for av_frame_alloc()+av_frame_ref().
+ *
+ * @return newly created AVFrame on success, NULL on error.
+ */
+AVFrame *av_frame_clone(const AVFrame *src);
+
+/**
+ * Unreference all the buffers referenced by frame and reset the frame fields.
+ */
+void av_frame_unref(AVFrame *frame);
+
+/**
+ * Move everythnig contained in src to dst and reset src.
+ */
+void av_frame_move_ref(AVFrame *dst, AVFrame *src);
+
+/**
+ * Allocate new buffer(s) for audio or video data.
+ *
+ * The following fields must be set on frame before calling this function:
+ * - format (pixel format for video, sample format for audio)
+ * - width and height for video
+ * - nb_samples and channel_layout for audio
+ *
+ * This function will fill AVFrame.data and AVFrame.buf arrays and, if
+ * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf.
+ * For planar formats, one buffer will be allocated for each plane.
+ *
+ * @param frame frame in which to store the new buffers.
+ * @param align required buffer size alignment
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ */
+int av_frame_get_buffer(AVFrame *frame, int align);
+
+/**
+ * Check if the frame data is writable.
+ *
+ * @return A positive value if the frame data is writable (which is true if and
+ * only if each of the underlying buffers has only one reference, namely the one
+ * stored in this frame). Return 0 otherwise.
+ *
+ * If 1 is returned the answer is valid until av_buffer_ref() is called on any
+ * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly).
+ *
+ * @see av_frame_make_writable(), av_buffer_is_writable()
+ */
+int av_frame_is_writable(AVFrame *frame);
+
+/**
+ * Ensure that the frame data is writable, avoiding data copy if possible.
+ *
+ * Do nothing if the frame is writable, allocate new buffers and copy the data
+ * if it is not.
+ *
+ * @return 0 on success, a negative AVERROR on error.
+ *
+ * @see av_frame_is_writable(), av_buffer_is_writable(),
+ * av_buffer_make_writable()
+ */
+int av_frame_make_writable(AVFrame *frame);
+
+/**
+ * Copy the frame data from src to dst.
+ *
+ * This function does not allocate anything, dst must be already initialized and
+ * allocated with the same parameters as src.
+ *
+ * This function only copies the frame data (i.e. the contents of the data /
+ * extended data arrays), not any other properties.
+ *
+ * @return >= 0 on success, a negative AVERROR on error.
+ */
+int av_frame_copy(AVFrame *dst, const AVFrame *src);
+
+/**
+ * Copy only "metadata" fields from src to dst.
+ *
+ * Metadata for the purpose of this function are those fields that do not affect
+ * the data layout in the buffers. E.g. pts, sample rate (for audio) or sample
+ * aspect ratio (for video), but not width/height or channel layout.
+ * Side data is also copied.
+ */
+int av_frame_copy_props(AVFrame *dst, const AVFrame *src);
+
+/**
+ * Get the buffer reference a given data plane is stored in.
+ *
+ * @param plane index of the data plane of interest in frame->extended_data.
+ *
+ * @return the buffer reference that contains the plane or NULL if the input
+ * frame is not valid.
+ */
+AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane);
+
+/**
+ * Add a new side data to a frame.
+ *
+ * @param frame a frame to which the side data should be added
+ * @param type type of the added side data
+ * @param size size of the side data
+ *
+ * @return newly added side data on success, NULL on error
+ */
+AVFrameSideData *av_frame_new_side_data(AVFrame *frame,
+ enum AVFrameSideDataType type,
+ int size);
+
+/**
+ * @return a pointer to the side data of a given type on success, NULL if there
+ * is no side data with such type in this frame.
+ */
+AVFrameSideData *av_frame_get_side_data(const AVFrame *frame,
+ enum AVFrameSideDataType type);
+
+/**
+ * If side data of the supplied type exists in the frame, free it and remove it
+ * from the frame.
+ */
+void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type);
+
+/**
+ * @return a string identifying the side data type
+ */
+const char *av_frame_side_data_name(enum AVFrameSideDataType type);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_FRAME_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/hash.h b/Externals/ffmpeg/dev/include/libavutil/hash.h
new file mode 100644
index 0000000000..d4bcbf8cc8
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/hash.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2013 Reimar Döffinger
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_HASH_H
+#define AVUTIL_HASH_H
+
+#include
+
+struct AVHashContext;
+
+/**
+ * Allocate a hash context for the algorithm specified by name.
+ *
+ * @return >= 0 for success, a negative error code for failure
+ * @note The context is not initialized, you must call av_hash_init().
+ */
+int av_hash_alloc(struct AVHashContext **ctx, const char *name);
+
+/**
+ * Get the names of available hash algorithms.
+ *
+ * This function can be used to enumerate the algorithms.
+ *
+ * @param i index of the hash algorithm, starting from 0
+ * @return a pointer to a static string or NULL if i is out of range
+ */
+const char *av_hash_names(int i);
+
+/**
+ * Get the name of the algorithm corresponding to the given hash context.
+ */
+const char *av_hash_get_name(const struct AVHashContext *ctx);
+
+/**
+ * Maximum value that av_hash_get_size will currently return.
+ *
+ * You can use this if you absolutely want or need to use static allocation
+ * and are fine with not supporting hashes newly added to libavutil without
+ * recompilation.
+ * Note that you still need to check against av_hash_get_size, adding new hashes
+ * with larger sizes will not be considered an ABI change and should not cause
+ * your code to overflow a buffer.
+ */
+#define AV_HASH_MAX_SIZE 64
+
+/**
+ * Get the size of the resulting hash value in bytes.
+ *
+ * The pointer passed to av_hash_final have space for at least this many bytes.
+ */
+int av_hash_get_size(const struct AVHashContext *ctx);
+
+/**
+ * Initialize or reset a hash context.
+ */
+void av_hash_init(struct AVHashContext *ctx);
+
+/**
+ * Update a hash context with additional data.
+ */
+void av_hash_update(struct AVHashContext *ctx, const uint8_t *src, int len);
+
+/**
+ * Finalize a hash context and compute the actual hash value.
+ */
+void av_hash_final(struct AVHashContext *ctx, uint8_t *dst);
+
+/**
+ * Finalize a hash context and compute the actual hash value.
+ * If size is smaller than the hash size, the hash is truncated;
+ * if size is larger, the buffer is padded with 0.
+ */
+void av_hash_final_bin(struct AVHashContext *ctx, uint8_t *dst, int size);
+
+/**
+ * Finalize a hash context and compute the actual hash value as a hex string.
+ * The string is always 0-terminated.
+ * If size is smaller than 2 * hash_size + 1, the hex string is truncated.
+ */
+void av_hash_final_hex(struct AVHashContext *ctx, uint8_t *dst, int size);
+
+/**
+ * Finalize a hash context and compute the actual hash value as a base64 string.
+ * The string is always 0-terminated.
+ * If size is smaller than AV_BASE64_SIZE(hash_size), the base64 string is
+ * truncated.
+ */
+void av_hash_final_b64(struct AVHashContext *ctx, uint8_t *dst, int size);
+
+/**
+ * Free hash context.
+ */
+void av_hash_freep(struct AVHashContext **ctx);
+
+#endif /* AVUTIL_HASH_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/hmac.h b/Externals/ffmpeg/dev/include/libavutil/hmac.h
new file mode 100644
index 0000000000..d36d4de19e
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/hmac.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2012 Martin Storsjo
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_HMAC_H
+#define AVUTIL_HMAC_H
+
+#include
+
+/**
+ * @defgroup lavu_hmac HMAC
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+enum AVHMACType {
+ AV_HMAC_MD5,
+ AV_HMAC_SHA1,
+ AV_HMAC_SHA224 = 10,
+ AV_HMAC_SHA256,
+ AV_HMAC_SHA384,
+ AV_HMAC_SHA512,
+};
+
+typedef struct AVHMAC AVHMAC;
+
+/**
+ * Allocate an AVHMAC context.
+ * @param type The hash function used for the HMAC.
+ */
+AVHMAC *av_hmac_alloc(enum AVHMACType type);
+
+/**
+ * Free an AVHMAC context.
+ * @param ctx The context to free, may be NULL
+ */
+void av_hmac_free(AVHMAC *ctx);
+
+/**
+ * Initialize an AVHMAC context with an authentication key.
+ * @param ctx The HMAC context
+ * @param key The authentication key
+ * @param keylen The length of the key, in bytes
+ */
+void av_hmac_init(AVHMAC *ctx, const uint8_t *key, unsigned int keylen);
+
+/**
+ * Hash data with the HMAC.
+ * @param ctx The HMAC context
+ * @param data The data to hash
+ * @param len The length of the data, in bytes
+ */
+void av_hmac_update(AVHMAC *ctx, const uint8_t *data, unsigned int len);
+
+/**
+ * Finish hashing and output the HMAC digest.
+ * @param ctx The HMAC context
+ * @param out The output buffer to write the digest into
+ * @param outlen The length of the out buffer, in bytes
+ * @return The number of bytes written to out, or a negative error code.
+ */
+int av_hmac_final(AVHMAC *ctx, uint8_t *out, unsigned int outlen);
+
+/**
+ * Hash an array of data with a key.
+ * @param ctx The HMAC context
+ * @param data The data to hash
+ * @param len The length of the data, in bytes
+ * @param key The authentication key
+ * @param keylen The length of the key, in bytes
+ * @param out The output buffer to write the digest into
+ * @param outlen The length of the out buffer, in bytes
+ * @return The number of bytes written to out, or a negative error code.
+ */
+int av_hmac_calc(AVHMAC *ctx, const uint8_t *data, unsigned int len,
+ const uint8_t *key, unsigned int keylen,
+ uint8_t *out, unsigned int outlen);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_HMAC_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/imgutils.h b/Externals/ffmpeg/dev/include/libavutil/imgutils.h
new file mode 100644
index 0000000000..2ec246aa48
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/imgutils.h
@@ -0,0 +1,213 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_IMGUTILS_H
+#define AVUTIL_IMGUTILS_H
+
+/**
+ * @file
+ * misc image utilities
+ *
+ * @addtogroup lavu_picture
+ * @{
+ */
+
+#include "avutil.h"
+#include "pixdesc.h"
+#include "rational.h"
+
+/**
+ * Compute the max pixel step for each plane of an image with a
+ * format described by pixdesc.
+ *
+ * The pixel step is the distance in bytes between the first byte of
+ * the group of bytes which describe a pixel component and the first
+ * byte of the successive group in the same plane for the same
+ * component.
+ *
+ * @param max_pixsteps an array which is filled with the max pixel step
+ * for each plane. Since a plane may contain different pixel
+ * components, the computed max_pixsteps[plane] is relative to the
+ * component in the plane with the max pixel step.
+ * @param max_pixstep_comps an array which is filled with the component
+ * for each plane which has the max pixel step. May be NULL.
+ */
+void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4],
+ const AVPixFmtDescriptor *pixdesc);
+
+/**
+ * Compute the size of an image line with format pix_fmt and width
+ * width for the plane plane.
+ *
+ * @return the computed size in bytes
+ */
+int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane);
+
+/**
+ * Fill plane linesizes for an image with pixel format pix_fmt and
+ * width width.
+ *
+ * @param linesizes array to be filled with the linesize for each plane
+ * @return >= 0 in case of success, a negative error code otherwise
+ */
+int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width);
+
+/**
+ * Fill plane data pointers for an image with pixel format pix_fmt and
+ * height height.
+ *
+ * @param data pointers array to be filled with the pointer for each image plane
+ * @param ptr the pointer to a buffer which will contain the image
+ * @param linesizes the array containing the linesize for each
+ * plane, should be filled by av_image_fill_linesizes()
+ * @return the size in bytes required for the image buffer, a negative
+ * error code in case of failure
+ */
+int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height,
+ uint8_t *ptr, const int linesizes[4]);
+
+/**
+ * Allocate an image with size w and h and pixel format pix_fmt, and
+ * fill pointers and linesizes accordingly.
+ * The allocated image buffer has to be freed by using
+ * av_freep(&pointers[0]).
+ *
+ * @param align the value to use for buffer size alignment
+ * @return the size in bytes required for the image buffer, a negative
+ * error code in case of failure
+ */
+int av_image_alloc(uint8_t *pointers[4], int linesizes[4],
+ int w, int h, enum AVPixelFormat pix_fmt, int align);
+
+/**
+ * Copy image plane from src to dst.
+ * That is, copy "height" number of lines of "bytewidth" bytes each.
+ * The first byte of each successive line is separated by *_linesize
+ * bytes.
+ *
+ * bytewidth must be contained by both absolute values of dst_linesize
+ * and src_linesize, otherwise the function behavior is undefined.
+ *
+ * @param dst_linesize linesize for the image plane in dst
+ * @param src_linesize linesize for the image plane in src
+ */
+void av_image_copy_plane(uint8_t *dst, int dst_linesize,
+ const uint8_t *src, int src_linesize,
+ int bytewidth, int height);
+
+/**
+ * Copy image in src_data to dst_data.
+ *
+ * @param dst_linesizes linesizes for the image in dst_data
+ * @param src_linesizes linesizes for the image in src_data
+ */
+void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4],
+ const uint8_t *src_data[4], const int src_linesizes[4],
+ enum AVPixelFormat pix_fmt, int width, int height);
+
+/**
+ * Setup the data pointers and linesizes based on the specified image
+ * parameters and the provided array.
+ *
+ * The fields of the given image are filled in by using the src
+ * address which points to the image data buffer. Depending on the
+ * specified pixel format, one or multiple image data pointers and
+ * line sizes will be set. If a planar format is specified, several
+ * pointers will be set pointing to the different picture planes and
+ * the line sizes of the different planes will be stored in the
+ * lines_sizes array. Call with !src to get the required
+ * size for the src buffer.
+ *
+ * To allocate the buffer and fill in the dst_data and dst_linesize in
+ * one call, use av_image_alloc().
+ *
+ * @param dst_data data pointers to be filled in
+ * @param dst_linesizes linesizes for the image in dst_data to be filled in
+ * @param src buffer which will contain or contains the actual image data, can be NULL
+ * @param pix_fmt the pixel format of the image
+ * @param width the width of the image in pixels
+ * @param height the height of the image in pixels
+ * @param align the value used in src for linesize alignment
+ * @return the size in bytes required for src, a negative error code
+ * in case of failure
+ */
+int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4],
+ const uint8_t *src,
+ enum AVPixelFormat pix_fmt, int width, int height, int align);
+
+/**
+ * Return the size in bytes of the amount of data required to store an
+ * image with the given parameters.
+ *
+ * @param[in] align the assumed linesize alignment
+ */
+int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align);
+
+/**
+ * Copy image data from an image into a buffer.
+ *
+ * av_image_get_buffer_size() can be used to compute the required size
+ * for the buffer to fill.
+ *
+ * @param dst a buffer into which picture data will be copied
+ * @param dst_size the size in bytes of dst
+ * @param src_data pointers containing the source image data
+ * @param src_linesizes linesizes for the image in src_data
+ * @param pix_fmt the pixel format of the source image
+ * @param width the width of the source image in pixels
+ * @param height the height of the source image in pixels
+ * @param align the assumed linesize alignment for dst
+ * @return the number of bytes written to dst, or a negative value
+ * (error code) on error
+ */
+int av_image_copy_to_buffer(uint8_t *dst, int dst_size,
+ const uint8_t * const src_data[4], const int src_linesize[4],
+ enum AVPixelFormat pix_fmt, int width, int height, int align);
+
+/**
+ * Check if the given dimension of an image is valid, meaning that all
+ * bytes of the image can be addressed with a signed int.
+ *
+ * @param w the width of the picture
+ * @param h the height of the picture
+ * @param log_offset the offset to sum to the log level for logging with log_ctx
+ * @param log_ctx the parent logging context, it may be NULL
+ * @return >= 0 if valid, a negative error code otherwise
+ */
+int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx);
+
+/**
+ * Check if the given sample aspect ratio of an image is valid.
+ *
+ * It is considered invalid if the denominator is 0 or if applying the ratio
+ * to the image size would make the smaller dimension less than 1. If the
+ * sar numerator is 0, it is considered unknown and will return as valid.
+ *
+ * @param w width of the image
+ * @param h height of the image
+ * @param sar sample aspect ratio of the image
+ * @return 0 if valid, a negative AVERROR code otherwise
+ */
+int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar);
+
+/**
+ * @}
+ */
+
+
+#endif /* AVUTIL_IMGUTILS_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/intfloat.h b/Externals/ffmpeg/dev/include/libavutil/intfloat.h
new file mode 100644
index 0000000000..fe3d7ec4a5
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/intfloat.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2011 Mans Rullgard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTFLOAT_H
+#define AVUTIL_INTFLOAT_H
+
+#include
+#include "attributes.h"
+
+union av_intfloat32 {
+ uint32_t i;
+ float f;
+};
+
+union av_intfloat64 {
+ uint64_t i;
+ double f;
+};
+
+/**
+ * Reinterpret a 32-bit integer as a float.
+ */
+static av_always_inline float av_int2float(uint32_t i)
+{
+ union av_intfloat32 v;
+ v.i = i;
+ return v.f;
+}
+
+/**
+ * Reinterpret a float as a 32-bit integer.
+ */
+static av_always_inline uint32_t av_float2int(float f)
+{
+ union av_intfloat32 v;
+ v.f = f;
+ return v.i;
+}
+
+/**
+ * Reinterpret a 64-bit integer as a double.
+ */
+static av_always_inline double av_int2double(uint64_t i)
+{
+ union av_intfloat64 v;
+ v.i = i;
+ return v.f;
+}
+
+/**
+ * Reinterpret a double as a 64-bit integer.
+ */
+static av_always_inline uint64_t av_double2int(double f)
+{
+ union av_intfloat64 v;
+ v.f = f;
+ return v.i;
+}
+
+#endif /* AVUTIL_INTFLOAT_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/intreadwrite.h b/Externals/ffmpeg/dev/include/libavutil/intreadwrite.h
new file mode 100644
index 0000000000..51fbe30a23
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/intreadwrite.h
@@ -0,0 +1,629 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_INTREADWRITE_H
+#define AVUTIL_INTREADWRITE_H
+
+#include
+#include "libavutil/avconfig.h"
+#include "attributes.h"
+#include "bswap.h"
+
+typedef union {
+ uint64_t u64;
+ uint32_t u32[2];
+ uint16_t u16[4];
+ uint8_t u8 [8];
+ double f64;
+ float f32[2];
+} av_alias av_alias64;
+
+typedef union {
+ uint32_t u32;
+ uint16_t u16[2];
+ uint8_t u8 [4];
+ float f32;
+} av_alias av_alias32;
+
+typedef union {
+ uint16_t u16;
+ uint8_t u8 [2];
+} av_alias av_alias16;
+
+/*
+ * Arch-specific headers can provide any combination of
+ * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros.
+ * Preprocessor symbols must be defined, even if these are implemented
+ * as inline functions.
+ *
+ * R/W means read/write, B/L/N means big/little/native endianness.
+ * The following macros require aligned access, compared to their
+ * unaligned variants: AV_(COPY|SWAP|ZERO)(64|128), AV_[RW]N[8-64]A.
+ * Incorrect usage may range from abysmal performance to crash
+ * depending on the platform.
+ *
+ * The unaligned variants are AV_[RW][BLN][8-64] and AV_COPY*U.
+ */
+
+#ifdef HAVE_AV_CONFIG_H
+
+#include "config.h"
+
+#if ARCH_ARM
+# include "arm/intreadwrite.h"
+#elif ARCH_AVR32
+# include "avr32/intreadwrite.h"
+#elif ARCH_MIPS
+# include "mips/intreadwrite.h"
+#elif ARCH_PPC
+# include "ppc/intreadwrite.h"
+#elif ARCH_TOMI
+# include "tomi/intreadwrite.h"
+#elif ARCH_X86
+# include "x86/intreadwrite.h"
+#endif
+
+#endif /* HAVE_AV_CONFIG_H */
+
+/*
+ * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers.
+ */
+
+#if AV_HAVE_BIGENDIAN
+
+# if defined(AV_RN16) && !defined(AV_RB16)
+# define AV_RB16(p) AV_RN16(p)
+# elif !defined(AV_RN16) && defined(AV_RB16)
+# define AV_RN16(p) AV_RB16(p)
+# endif
+
+# if defined(AV_WN16) && !defined(AV_WB16)
+# define AV_WB16(p, v) AV_WN16(p, v)
+# elif !defined(AV_WN16) && defined(AV_WB16)
+# define AV_WN16(p, v) AV_WB16(p, v)
+# endif
+
+# if defined(AV_RN24) && !defined(AV_RB24)
+# define AV_RB24(p) AV_RN24(p)
+# elif !defined(AV_RN24) && defined(AV_RB24)
+# define AV_RN24(p) AV_RB24(p)
+# endif
+
+# if defined(AV_WN24) && !defined(AV_WB24)
+# define AV_WB24(p, v) AV_WN24(p, v)
+# elif !defined(AV_WN24) && defined(AV_WB24)
+# define AV_WN24(p, v) AV_WB24(p, v)
+# endif
+
+# if defined(AV_RN32) && !defined(AV_RB32)
+# define AV_RB32(p) AV_RN32(p)
+# elif !defined(AV_RN32) && defined(AV_RB32)
+# define AV_RN32(p) AV_RB32(p)
+# endif
+
+# if defined(AV_WN32) && !defined(AV_WB32)
+# define AV_WB32(p, v) AV_WN32(p, v)
+# elif !defined(AV_WN32) && defined(AV_WB32)
+# define AV_WN32(p, v) AV_WB32(p, v)
+# endif
+
+# if defined(AV_RN48) && !defined(AV_RB48)
+# define AV_RB48(p) AV_RN48(p)
+# elif !defined(AV_RN48) && defined(AV_RB48)
+# define AV_RN48(p) AV_RB48(p)
+# endif
+
+# if defined(AV_WN48) && !defined(AV_WB48)
+# define AV_WB48(p, v) AV_WN48(p, v)
+# elif !defined(AV_WN48) && defined(AV_WB48)
+# define AV_WN48(p, v) AV_WB48(p, v)
+# endif
+
+# if defined(AV_RN64) && !defined(AV_RB64)
+# define AV_RB64(p) AV_RN64(p)
+# elif !defined(AV_RN64) && defined(AV_RB64)
+# define AV_RN64(p) AV_RB64(p)
+# endif
+
+# if defined(AV_WN64) && !defined(AV_WB64)
+# define AV_WB64(p, v) AV_WN64(p, v)
+# elif !defined(AV_WN64) && defined(AV_WB64)
+# define AV_WN64(p, v) AV_WB64(p, v)
+# endif
+
+#else /* AV_HAVE_BIGENDIAN */
+
+# if defined(AV_RN16) && !defined(AV_RL16)
+# define AV_RL16(p) AV_RN16(p)
+# elif !defined(AV_RN16) && defined(AV_RL16)
+# define AV_RN16(p) AV_RL16(p)
+# endif
+
+# if defined(AV_WN16) && !defined(AV_WL16)
+# define AV_WL16(p, v) AV_WN16(p, v)
+# elif !defined(AV_WN16) && defined(AV_WL16)
+# define AV_WN16(p, v) AV_WL16(p, v)
+# endif
+
+# if defined(AV_RN24) && !defined(AV_RL24)
+# define AV_RL24(p) AV_RN24(p)
+# elif !defined(AV_RN24) && defined(AV_RL24)
+# define AV_RN24(p) AV_RL24(p)
+# endif
+
+# if defined(AV_WN24) && !defined(AV_WL24)
+# define AV_WL24(p, v) AV_WN24(p, v)
+# elif !defined(AV_WN24) && defined(AV_WL24)
+# define AV_WN24(p, v) AV_WL24(p, v)
+# endif
+
+# if defined(AV_RN32) && !defined(AV_RL32)
+# define AV_RL32(p) AV_RN32(p)
+# elif !defined(AV_RN32) && defined(AV_RL32)
+# define AV_RN32(p) AV_RL32(p)
+# endif
+
+# if defined(AV_WN32) && !defined(AV_WL32)
+# define AV_WL32(p, v) AV_WN32(p, v)
+# elif !defined(AV_WN32) && defined(AV_WL32)
+# define AV_WN32(p, v) AV_WL32(p, v)
+# endif
+
+# if defined(AV_RN48) && !defined(AV_RL48)
+# define AV_RL48(p) AV_RN48(p)
+# elif !defined(AV_RN48) && defined(AV_RL48)
+# define AV_RN48(p) AV_RL48(p)
+# endif
+
+# if defined(AV_WN48) && !defined(AV_WL48)
+# define AV_WL48(p, v) AV_WN48(p, v)
+# elif !defined(AV_WN48) && defined(AV_WL48)
+# define AV_WN48(p, v) AV_WL48(p, v)
+# endif
+
+# if defined(AV_RN64) && !defined(AV_RL64)
+# define AV_RL64(p) AV_RN64(p)
+# elif !defined(AV_RN64) && defined(AV_RL64)
+# define AV_RN64(p) AV_RL64(p)
+# endif
+
+# if defined(AV_WN64) && !defined(AV_WL64)
+# define AV_WL64(p, v) AV_WN64(p, v)
+# elif !defined(AV_WN64) && defined(AV_WL64)
+# define AV_WN64(p, v) AV_WL64(p, v)
+# endif
+
+#endif /* !AV_HAVE_BIGENDIAN */
+
+/*
+ * Define AV_[RW]N helper macros to simplify definitions not provided
+ * by per-arch headers.
+ */
+
+#if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__)
+
+union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias;
+union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias;
+union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias;
+
+# define AV_RN(s, p) (((const union unaligned_##s *) (p))->l)
+# define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v))
+
+#elif defined(__DECC)
+
+# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p)))
+# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v))
+
+#elif AV_HAVE_FAST_UNALIGNED
+
+# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s)
+# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v))
+
+#else
+
+#ifndef AV_RB16
+# define AV_RB16(x) \
+ ((((const uint8_t*)(x))[0] << 8) | \
+ ((const uint8_t*)(x))[1])
+#endif
+#ifndef AV_WB16
+# define AV_WB16(p, darg) do { \
+ unsigned d = (darg); \
+ ((uint8_t*)(p))[1] = (d); \
+ ((uint8_t*)(p))[0] = (d)>>8; \
+ } while(0)
+#endif
+
+#ifndef AV_RL16
+# define AV_RL16(x) \
+ ((((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL16
+# define AV_WL16(p, darg) do { \
+ unsigned d = (darg); \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ } while(0)
+#endif
+
+#ifndef AV_RB32
+# define AV_RB32(x) \
+ (((uint32_t)((const uint8_t*)(x))[0] << 24) | \
+ (((const uint8_t*)(x))[1] << 16) | \
+ (((const uint8_t*)(x))[2] << 8) | \
+ ((const uint8_t*)(x))[3])
+#endif
+#ifndef AV_WB32
+# define AV_WB32(p, darg) do { \
+ unsigned d = (darg); \
+ ((uint8_t*)(p))[3] = (d); \
+ ((uint8_t*)(p))[2] = (d)>>8; \
+ ((uint8_t*)(p))[1] = (d)>>16; \
+ ((uint8_t*)(p))[0] = (d)>>24; \
+ } while(0)
+#endif
+
+#ifndef AV_RL32
+# define AV_RL32(x) \
+ (((uint32_t)((const uint8_t*)(x))[3] << 24) | \
+ (((const uint8_t*)(x))[2] << 16) | \
+ (((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL32
+# define AV_WL32(p, darg) do { \
+ unsigned d = (darg); \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ ((uint8_t*)(p))[3] = (d)>>24; \
+ } while(0)
+#endif
+
+#ifndef AV_RB64
+# define AV_RB64(x) \
+ (((uint64_t)((const uint8_t*)(x))[0] << 56) | \
+ ((uint64_t)((const uint8_t*)(x))[1] << 48) | \
+ ((uint64_t)((const uint8_t*)(x))[2] << 40) | \
+ ((uint64_t)((const uint8_t*)(x))[3] << 32) | \
+ ((uint64_t)((const uint8_t*)(x))[4] << 24) | \
+ ((uint64_t)((const uint8_t*)(x))[5] << 16) | \
+ ((uint64_t)((const uint8_t*)(x))[6] << 8) | \
+ (uint64_t)((const uint8_t*)(x))[7])
+#endif
+#ifndef AV_WB64
+# define AV_WB64(p, darg) do { \
+ uint64_t d = (darg); \
+ ((uint8_t*)(p))[7] = (d); \
+ ((uint8_t*)(p))[6] = (d)>>8; \
+ ((uint8_t*)(p))[5] = (d)>>16; \
+ ((uint8_t*)(p))[4] = (d)>>24; \
+ ((uint8_t*)(p))[3] = (d)>>32; \
+ ((uint8_t*)(p))[2] = (d)>>40; \
+ ((uint8_t*)(p))[1] = (d)>>48; \
+ ((uint8_t*)(p))[0] = (d)>>56; \
+ } while(0)
+#endif
+
+#ifndef AV_RL64
+# define AV_RL64(x) \
+ (((uint64_t)((const uint8_t*)(x))[7] << 56) | \
+ ((uint64_t)((const uint8_t*)(x))[6] << 48) | \
+ ((uint64_t)((const uint8_t*)(x))[5] << 40) | \
+ ((uint64_t)((const uint8_t*)(x))[4] << 32) | \
+ ((uint64_t)((const uint8_t*)(x))[3] << 24) | \
+ ((uint64_t)((const uint8_t*)(x))[2] << 16) | \
+ ((uint64_t)((const uint8_t*)(x))[1] << 8) | \
+ (uint64_t)((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL64
+# define AV_WL64(p, darg) do { \
+ uint64_t d = (darg); \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ ((uint8_t*)(p))[3] = (d)>>24; \
+ ((uint8_t*)(p))[4] = (d)>>32; \
+ ((uint8_t*)(p))[5] = (d)>>40; \
+ ((uint8_t*)(p))[6] = (d)>>48; \
+ ((uint8_t*)(p))[7] = (d)>>56; \
+ } while(0)
+#endif
+
+#if AV_HAVE_BIGENDIAN
+# define AV_RN(s, p) AV_RB##s(p)
+# define AV_WN(s, p, v) AV_WB##s(p, v)
+#else
+# define AV_RN(s, p) AV_RL##s(p)
+# define AV_WN(s, p, v) AV_WL##s(p, v)
+#endif
+
+#endif /* HAVE_FAST_UNALIGNED */
+
+#ifndef AV_RN16
+# define AV_RN16(p) AV_RN(16, p)
+#endif
+
+#ifndef AV_RN32
+# define AV_RN32(p) AV_RN(32, p)
+#endif
+
+#ifndef AV_RN64
+# define AV_RN64(p) AV_RN(64, p)
+#endif
+
+#ifndef AV_WN16
+# define AV_WN16(p, v) AV_WN(16, p, v)
+#endif
+
+#ifndef AV_WN32
+# define AV_WN32(p, v) AV_WN(32, p, v)
+#endif
+
+#ifndef AV_WN64
+# define AV_WN64(p, v) AV_WN(64, p, v)
+#endif
+
+#if AV_HAVE_BIGENDIAN
+# define AV_RB(s, p) AV_RN##s(p)
+# define AV_WB(s, p, v) AV_WN##s(p, v)
+# define AV_RL(s, p) av_bswap##s(AV_RN##s(p))
+# define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v))
+#else
+# define AV_RB(s, p) av_bswap##s(AV_RN##s(p))
+# define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v))
+# define AV_RL(s, p) AV_RN##s(p)
+# define AV_WL(s, p, v) AV_WN##s(p, v)
+#endif
+
+#define AV_RB8(x) (((const uint8_t*)(x))[0])
+#define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0)
+
+#define AV_RL8(x) AV_RB8(x)
+#define AV_WL8(p, d) AV_WB8(p, d)
+
+#ifndef AV_RB16
+# define AV_RB16(p) AV_RB(16, p)
+#endif
+#ifndef AV_WB16
+# define AV_WB16(p, v) AV_WB(16, p, v)
+#endif
+
+#ifndef AV_RL16
+# define AV_RL16(p) AV_RL(16, p)
+#endif
+#ifndef AV_WL16
+# define AV_WL16(p, v) AV_WL(16, p, v)
+#endif
+
+#ifndef AV_RB32
+# define AV_RB32(p) AV_RB(32, p)
+#endif
+#ifndef AV_WB32
+# define AV_WB32(p, v) AV_WB(32, p, v)
+#endif
+
+#ifndef AV_RL32
+# define AV_RL32(p) AV_RL(32, p)
+#endif
+#ifndef AV_WL32
+# define AV_WL32(p, v) AV_WL(32, p, v)
+#endif
+
+#ifndef AV_RB64
+# define AV_RB64(p) AV_RB(64, p)
+#endif
+#ifndef AV_WB64
+# define AV_WB64(p, v) AV_WB(64, p, v)
+#endif
+
+#ifndef AV_RL64
+# define AV_RL64(p) AV_RL(64, p)
+#endif
+#ifndef AV_WL64
+# define AV_WL64(p, v) AV_WL(64, p, v)
+#endif
+
+#ifndef AV_RB24
+# define AV_RB24(x) \
+ ((((const uint8_t*)(x))[0] << 16) | \
+ (((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[2])
+#endif
+#ifndef AV_WB24
+# define AV_WB24(p, d) do { \
+ ((uint8_t*)(p))[2] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[0] = (d)>>16; \
+ } while(0)
+#endif
+
+#ifndef AV_RL24
+# define AV_RL24(x) \
+ ((((const uint8_t*)(x))[2] << 16) | \
+ (((const uint8_t*)(x))[1] << 8) | \
+ ((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL24
+# define AV_WL24(p, d) do { \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ } while(0)
+#endif
+
+#ifndef AV_RB48
+# define AV_RB48(x) \
+ (((uint64_t)((const uint8_t*)(x))[0] << 40) | \
+ ((uint64_t)((const uint8_t*)(x))[1] << 32) | \
+ ((uint64_t)((const uint8_t*)(x))[2] << 24) | \
+ ((uint64_t)((const uint8_t*)(x))[3] << 16) | \
+ ((uint64_t)((const uint8_t*)(x))[4] << 8) | \
+ (uint64_t)((const uint8_t*)(x))[5])
+#endif
+#ifndef AV_WB48
+# define AV_WB48(p, darg) do { \
+ uint64_t d = (darg); \
+ ((uint8_t*)(p))[5] = (d); \
+ ((uint8_t*)(p))[4] = (d)>>8; \
+ ((uint8_t*)(p))[3] = (d)>>16; \
+ ((uint8_t*)(p))[2] = (d)>>24; \
+ ((uint8_t*)(p))[1] = (d)>>32; \
+ ((uint8_t*)(p))[0] = (d)>>40; \
+ } while(0)
+#endif
+
+#ifndef AV_RL48
+# define AV_RL48(x) \
+ (((uint64_t)((const uint8_t*)(x))[5] << 40) | \
+ ((uint64_t)((const uint8_t*)(x))[4] << 32) | \
+ ((uint64_t)((const uint8_t*)(x))[3] << 24) | \
+ ((uint64_t)((const uint8_t*)(x))[2] << 16) | \
+ ((uint64_t)((const uint8_t*)(x))[1] << 8) | \
+ (uint64_t)((const uint8_t*)(x))[0])
+#endif
+#ifndef AV_WL48
+# define AV_WL48(p, darg) do { \
+ uint64_t d = (darg); \
+ ((uint8_t*)(p))[0] = (d); \
+ ((uint8_t*)(p))[1] = (d)>>8; \
+ ((uint8_t*)(p))[2] = (d)>>16; \
+ ((uint8_t*)(p))[3] = (d)>>24; \
+ ((uint8_t*)(p))[4] = (d)>>32; \
+ ((uint8_t*)(p))[5] = (d)>>40; \
+ } while(0)
+#endif
+
+/*
+ * The AV_[RW]NA macros access naturally aligned data
+ * in a type-safe way.
+ */
+
+#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s)
+#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v))
+
+#ifndef AV_RN16A
+# define AV_RN16A(p) AV_RNA(16, p)
+#endif
+
+#ifndef AV_RN32A
+# define AV_RN32A(p) AV_RNA(32, p)
+#endif
+
+#ifndef AV_RN64A
+# define AV_RN64A(p) AV_RNA(64, p)
+#endif
+
+#ifndef AV_WN16A
+# define AV_WN16A(p, v) AV_WNA(16, p, v)
+#endif
+
+#ifndef AV_WN32A
+# define AV_WN32A(p, v) AV_WNA(32, p, v)
+#endif
+
+#ifndef AV_WN64A
+# define AV_WN64A(p, v) AV_WNA(64, p, v)
+#endif
+
+/*
+ * The AV_COPYxxU macros are suitable for copying data to/from unaligned
+ * memory locations.
+ */
+
+#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s));
+
+#ifndef AV_COPY16U
+# define AV_COPY16U(d, s) AV_COPYU(16, d, s)
+#endif
+
+#ifndef AV_COPY32U
+# define AV_COPY32U(d, s) AV_COPYU(32, d, s)
+#endif
+
+#ifndef AV_COPY64U
+# define AV_COPY64U(d, s) AV_COPYU(64, d, s)
+#endif
+
+#ifndef AV_COPY128U
+# define AV_COPY128U(d, s) \
+ do { \
+ AV_COPY64U(d, s); \
+ AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8); \
+ } while(0)
+#endif
+
+/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be
+ * naturally aligned. They may be implemented using MMX,
+ * so emms_c() must be called before using any float code
+ * afterwards.
+ */
+
+#define AV_COPY(n, d, s) \
+ (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n)
+
+#ifndef AV_COPY16
+# define AV_COPY16(d, s) AV_COPY(16, d, s)
+#endif
+
+#ifndef AV_COPY32
+# define AV_COPY32(d, s) AV_COPY(32, d, s)
+#endif
+
+#ifndef AV_COPY64
+# define AV_COPY64(d, s) AV_COPY(64, d, s)
+#endif
+
+#ifndef AV_COPY128
+# define AV_COPY128(d, s) \
+ do { \
+ AV_COPY64(d, s); \
+ AV_COPY64((char*)(d)+8, (char*)(s)+8); \
+ } while(0)
+#endif
+
+#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b))
+
+#ifndef AV_SWAP64
+# define AV_SWAP64(a, b) AV_SWAP(64, a, b)
+#endif
+
+#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0)
+
+#ifndef AV_ZERO16
+# define AV_ZERO16(d) AV_ZERO(16, d)
+#endif
+
+#ifndef AV_ZERO32
+# define AV_ZERO32(d) AV_ZERO(32, d)
+#endif
+
+#ifndef AV_ZERO64
+# define AV_ZERO64(d) AV_ZERO(64, d)
+#endif
+
+#ifndef AV_ZERO128
+# define AV_ZERO128(d) \
+ do { \
+ AV_ZERO64(d); \
+ AV_ZERO64((char*)(d)+8); \
+ } while(0)
+#endif
+
+#endif /* AVUTIL_INTREADWRITE_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/lfg.h b/Externals/ffmpeg/dev/include/libavutil/lfg.h
new file mode 100644
index 0000000000..ec90562cf2
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/lfg.h
@@ -0,0 +1,62 @@
+/*
+ * Lagged Fibonacci PRNG
+ * Copyright (c) 2008 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LFG_H
+#define AVUTIL_LFG_H
+
+typedef struct AVLFG {
+ unsigned int state[64];
+ int index;
+} AVLFG;
+
+void av_lfg_init(AVLFG *c, unsigned int seed);
+
+/**
+ * Get the next random unsigned 32-bit number using an ALFG.
+ *
+ * Please also consider a simple LCG like state= state*1664525+1013904223,
+ * it may be good enough and faster for your specific use case.
+ */
+static inline unsigned int av_lfg_get(AVLFG *c){
+ c->state[c->index & 63] = c->state[(c->index-24) & 63] + c->state[(c->index-55) & 63];
+ return c->state[c->index++ & 63];
+}
+
+/**
+ * Get the next random unsigned 32-bit number using a MLFG.
+ *
+ * Please also consider av_lfg_get() above, it is faster.
+ */
+static inline unsigned int av_mlfg_get(AVLFG *c){
+ unsigned int a= c->state[(c->index-55) & 63];
+ unsigned int b= c->state[(c->index-24) & 63];
+ return c->state[c->index++ & 63] = 2*a*b+a+b;
+}
+
+/**
+ * Get the next two numbers generated by a Box-Muller Gaussian
+ * generator using the random numbers issued by lfg.
+ *
+ * @param out array where the two generated numbers are placed
+ */
+void av_bmg_get(AVLFG *lfg, double out[2]);
+
+#endif /* AVUTIL_LFG_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/log.h b/Externals/ffmpeg/dev/include/libavutil/log.h
new file mode 100644
index 0000000000..e1ff09bc65
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/log.h
@@ -0,0 +1,350 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LOG_H
+#define AVUTIL_LOG_H
+
+#include
+#include "avutil.h"
+#include "attributes.h"
+
+typedef enum {
+ AV_CLASS_CATEGORY_NA = 0,
+ AV_CLASS_CATEGORY_INPUT,
+ AV_CLASS_CATEGORY_OUTPUT,
+ AV_CLASS_CATEGORY_MUXER,
+ AV_CLASS_CATEGORY_DEMUXER,
+ AV_CLASS_CATEGORY_ENCODER,
+ AV_CLASS_CATEGORY_DECODER,
+ AV_CLASS_CATEGORY_FILTER,
+ AV_CLASS_CATEGORY_BITSTREAM_FILTER,
+ AV_CLASS_CATEGORY_SWSCALER,
+ AV_CLASS_CATEGORY_SWRESAMPLER,
+ AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT = 40,
+ AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
+ AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
+ AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
+ AV_CLASS_CATEGORY_DEVICE_OUTPUT,
+ AV_CLASS_CATEGORY_DEVICE_INPUT,
+ AV_CLASS_CATEGORY_NB, ///< not part of ABI/API
+}AVClassCategory;
+
+#define AV_IS_INPUT_DEVICE(category) \
+ (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_INPUT))
+
+#define AV_IS_OUTPUT_DEVICE(category) \
+ (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT) || \
+ ((category) == AV_CLASS_CATEGORY_DEVICE_OUTPUT))
+
+struct AVOptionRanges;
+
+/**
+ * Describe the class of an AVClass context structure. That is an
+ * arbitrary struct of which the first field is a pointer to an
+ * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.).
+ */
+typedef struct AVClass {
+ /**
+ * The name of the class; usually it is the same name as the
+ * context structure type to which the AVClass is associated.
+ */
+ const char* class_name;
+
+ /**
+ * A pointer to a function which returns the name of a context
+ * instance ctx associated with the class.
+ */
+ const char* (*item_name)(void* ctx);
+
+ /**
+ * a pointer to the first option specified in the class if any or NULL
+ *
+ * @see av_set_default_options()
+ */
+ const struct AVOption *option;
+
+ /**
+ * LIBAVUTIL_VERSION with which this structure was created.
+ * This is used to allow fields to be added without requiring major
+ * version bumps everywhere.
+ */
+
+ int version;
+
+ /**
+ * Offset in the structure where log_level_offset is stored.
+ * 0 means there is no such variable
+ */
+ int log_level_offset_offset;
+
+ /**
+ * Offset in the structure where a pointer to the parent context for
+ * logging is stored. For example a decoder could pass its AVCodecContext
+ * to eval as such a parent context, which an av_log() implementation
+ * could then leverage to display the parent context.
+ * The offset can be NULL.
+ */
+ int parent_log_context_offset;
+
+ /**
+ * Return next AVOptions-enabled child or NULL
+ */
+ void* (*child_next)(void *obj, void *prev);
+
+ /**
+ * Return an AVClass corresponding to the next potential
+ * AVOptions-enabled child.
+ *
+ * The difference between child_next and this is that
+ * child_next iterates over _already existing_ objects, while
+ * child_class_next iterates over _all possible_ children.
+ */
+ const struct AVClass* (*child_class_next)(const struct AVClass *prev);
+
+ /**
+ * Category used for visualization (like color)
+ * This is only set if the category is equal for all objects using this class.
+ * available since version (51 << 16 | 56 << 8 | 100)
+ */
+ AVClassCategory category;
+
+ /**
+ * Callback to return the category.
+ * available since version (51 << 16 | 59 << 8 | 100)
+ */
+ AVClassCategory (*get_category)(void* ctx);
+
+ /**
+ * Callback to return the supported/allowed ranges.
+ * available since version (52.12)
+ */
+ int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags);
+} AVClass;
+
+/**
+ * @addtogroup lavu_log
+ *
+ * @{
+ *
+ * @defgroup lavu_log_constants Logging Constants
+ *
+ * @{
+ */
+
+/**
+ * Print no output.
+ */
+#define AV_LOG_QUIET -8
+
+/**
+ * Something went really wrong and we will crash now.
+ */
+#define AV_LOG_PANIC 0
+
+/**
+ * Something went wrong and recovery is not possible.
+ * For example, no header was found for a format which depends
+ * on headers or an illegal combination of parameters is used.
+ */
+#define AV_LOG_FATAL 8
+
+/**
+ * Something went wrong and cannot losslessly be recovered.
+ * However, not all future data is affected.
+ */
+#define AV_LOG_ERROR 16
+
+/**
+ * Something somehow does not look correct. This may or may not
+ * lead to problems. An example would be the use of '-vstrict -2'.
+ */
+#define AV_LOG_WARNING 24
+
+/**
+ * Standard information.
+ */
+#define AV_LOG_INFO 32
+
+/**
+ * Detailed information.
+ */
+#define AV_LOG_VERBOSE 40
+
+/**
+ * Stuff which is only useful for libav* developers.
+ */
+#define AV_LOG_DEBUG 48
+
+#define AV_LOG_MAX_OFFSET (AV_LOG_DEBUG - AV_LOG_QUIET)
+
+/**
+ * @}
+ */
+
+/**
+ * Sets additional colors for extended debugging sessions.
+ * @code
+ av_log(ctx, AV_LOG_DEBUG|AV_LOG_C(134), "Message in purple\n");
+ @endcode
+ * Requires 256color terminal support. Uses outside debugging is not
+ * recommended.
+ */
+#define AV_LOG_C(x) (x << 8)
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log_set_callback
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ */
+void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4);
+
+
+/**
+ * Send the specified message to the log if the level is less than or equal
+ * to the current av_log_level. By default, all logging messages are sent to
+ * stderr. This behavior can be altered by setting a different logging callback
+ * function.
+ * @see av_log_set_callback
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param vl The arguments referenced by the format string.
+ */
+void av_vlog(void *avcl, int level, const char *fmt, va_list vl);
+
+/**
+ * Get the current log level
+ *
+ * @see lavu_log_constants
+ *
+ * @return Current log level
+ */
+int av_log_get_level(void);
+
+/**
+ * Set the log level
+ *
+ * @see lavu_log_constants
+ *
+ * @param level Logging level
+ */
+void av_log_set_level(int level);
+
+/**
+ * Set the logging callback
+ *
+ * @note The callback must be thread safe, even if the application does not use
+ * threads itself as some codecs are multithreaded.
+ *
+ * @see av_log_default_callback
+ *
+ * @param callback A logging function with a compatible signature.
+ */
+void av_log_set_callback(void (*callback)(void*, int, const char*, va_list));
+
+/**
+ * Default logging callback
+ *
+ * It prints the message to stderr, optionally colorizing it.
+ *
+ * @param avcl A pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct.
+ * @param level The importance level of the message expressed using a @ref
+ * lavu_log_constants "Logging Constant".
+ * @param fmt The format string (printf-compatible) that specifies how
+ * subsequent arguments are converted to output.
+ * @param vl The arguments referenced by the format string.
+ */
+void av_log_default_callback(void *avcl, int level, const char *fmt,
+ va_list vl);
+
+/**
+ * Return the context name
+ *
+ * @param ctx The AVClass context
+ *
+ * @return The AVClass class_name
+ */
+const char* av_default_item_name(void* ctx);
+AVClassCategory av_default_get_category(void *ptr);
+
+/**
+ * Format a line of log the same way as the default callback.
+ * @param line buffer to receive the formated line
+ * @param line_size size of the buffer
+ * @param print_prefix used to store whether the prefix must be printed;
+ * must point to a persistent integer initially set to 1
+ */
+void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl,
+ char *line, int line_size, int *print_prefix);
+
+/**
+ * av_dlog macros
+ * Useful to print debug messages that shouldn't get compiled in normally.
+ */
+
+#ifdef DEBUG
+# define av_dlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__)
+#else
+# define av_dlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0)
+#endif
+
+/**
+ * Skip repeated messages, this requires the user app to use av_log() instead of
+ * (f)printf as the 2 would otherwise interfere and lead to
+ * "Last message repeated x times" messages below (f)printf messages with some
+ * bad luck.
+ * Also to receive the last, "last repeated" line if any, the user app must
+ * call av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end
+ */
+#define AV_LOG_SKIP_REPEATED 1
+
+/**
+ * Include the log severity in messages originating from codecs.
+ *
+ * Results in messages such as:
+ * [rawvideo @ 0xDEADBEEF] [error] encode did not produce valid pts
+ */
+#define AV_LOG_PRINT_LEVEL 2
+
+void av_log_set_flags(int arg);
+int av_log_get_flags(void);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_LOG_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/lzo.h b/Externals/ffmpeg/dev/include/libavutil/lzo.h
new file mode 100644
index 0000000000..c03403992d
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/lzo.h
@@ -0,0 +1,66 @@
+/*
+ * LZO 1x decompression
+ * copyright (c) 2006 Reimar Doeffinger
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_LZO_H
+#define AVUTIL_LZO_H
+
+/**
+ * @defgroup lavu_lzo LZO
+ * @ingroup lavu_crypto
+ *
+ * @{
+ */
+
+#include
+
+/** @name Error flags returned by av_lzo1x_decode
+ * @{ */
+/// end of the input buffer reached before decoding finished
+#define AV_LZO_INPUT_DEPLETED 1
+/// decoded data did not fit into output buffer
+#define AV_LZO_OUTPUT_FULL 2
+/// a reference to previously decoded data was wrong
+#define AV_LZO_INVALID_BACKPTR 4
+/// a non-specific error in the compressed bitstream
+#define AV_LZO_ERROR 8
+/** @} */
+
+#define AV_LZO_INPUT_PADDING 8
+#define AV_LZO_OUTPUT_PADDING 12
+
+/**
+ * @brief Decodes LZO 1x compressed data.
+ * @param out output buffer
+ * @param outlen size of output buffer, number of bytes left are returned here
+ * @param in input buffer
+ * @param inlen size of input buffer, number of bytes left are returned here
+ * @return 0 on success, otherwise a combination of the error flags above
+ *
+ * Make sure all buffers are appropriately padded, in must provide
+ * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes.
+ */
+int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_LZO_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/macros.h b/Externals/ffmpeg/dev/include/libavutil/macros.h
new file mode 100644
index 0000000000..446532377a
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/macros.h
@@ -0,0 +1,48 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * @ingroup lavu
+ * Utility Preprocessor macros
+ */
+
+#ifndef AVUTIL_MACROS_H
+#define AVUTIL_MACROS_H
+
+/**
+ * @addtogroup preproc_misc Preprocessor String Macros
+ *
+ * String manipulation macros
+ *
+ * @{
+ */
+
+#define AV_STRINGIFY(s) AV_TOSTRING(s)
+#define AV_TOSTRING(s) #s
+
+#define AV_GLUE(a, b) a ## b
+#define AV_JOIN(a, b) AV_GLUE(a, b)
+
+/**
+ * @}
+ */
+
+#define AV_PRAGMA(s) _Pragma(#s)
+
+#endif /* AVUTIL_MACROS_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/mathematics.h b/Externals/ffmpeg/dev/include/libavutil/mathematics.h
new file mode 100644
index 0000000000..ac94488729
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/mathematics.h
@@ -0,0 +1,164 @@
+/*
+ * copyright (c) 2005-2012 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MATHEMATICS_H
+#define AVUTIL_MATHEMATICS_H
+
+#include
+#include
+#include "attributes.h"
+#include "rational.h"
+#include "intfloat.h"
+
+#ifndef M_E
+#define M_E 2.7182818284590452354 /* e */
+#endif
+#ifndef M_LN2
+#define M_LN2 0.69314718055994530942 /* log_e 2 */
+#endif
+#ifndef M_LN10
+#define M_LN10 2.30258509299404568402 /* log_e 10 */
+#endif
+#ifndef M_LOG2_10
+#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */
+#endif
+#ifndef M_PHI
+#define M_PHI 1.61803398874989484820 /* phi / golden ratio */
+#endif
+#ifndef M_PI
+#define M_PI 3.14159265358979323846 /* pi */
+#endif
+#ifndef M_PI_2
+#define M_PI_2 1.57079632679489661923 /* pi/2 */
+#endif
+#ifndef M_SQRT1_2
+#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */
+#endif
+#ifndef M_SQRT2
+#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */
+#endif
+#ifndef NAN
+#define NAN av_int2float(0x7fc00000)
+#endif
+#ifndef INFINITY
+#define INFINITY av_int2float(0x7f800000)
+#endif
+
+/**
+ * @addtogroup lavu_math
+ * @{
+ */
+
+
+enum AVRounding {
+ AV_ROUND_ZERO = 0, ///< Round toward zero.
+ AV_ROUND_INF = 1, ///< Round away from zero.
+ AV_ROUND_DOWN = 2, ///< Round toward -infinity.
+ AV_ROUND_UP = 3, ///< Round toward +infinity.
+ AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero.
+ AV_ROUND_PASS_MINMAX = 8192, ///< Flag to pass INT64_MIN/MAX through instead of rescaling, this avoids special cases for AV_NOPTS_VALUE
+};
+
+/**
+ * Return the greatest common divisor of a and b.
+ * If both a and b are 0 or either or both are <0 then behavior is
+ * undefined.
+ */
+int64_t av_const av_gcd(int64_t a, int64_t b);
+
+/**
+ * Rescale a 64-bit integer with rounding to nearest.
+ * A simple a*b/c isn't possible as it can overflow.
+ */
+int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const;
+
+/**
+ * Rescale a 64-bit integer with specified rounding.
+ * A simple a*b/c isn't possible as it can overflow.
+ *
+ * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is
+ * INT64_MIN or INT64_MAX then a is passed through unchanged.
+ */
+int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers.
+ */
+int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const;
+
+/**
+ * Rescale a 64-bit integer by 2 rational numbers with specified rounding.
+ *
+ * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is
+ * INT64_MIN or INT64_MAX then a is passed through unchanged.
+ */
+int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq,
+ enum AVRounding) av_const;
+
+/**
+ * Compare 2 timestamps each in its own timebases.
+ * The result of the function is undefined if one of the timestamps
+ * is outside the int64_t range when represented in the others timebase.
+ * @return -1 if ts_a is before ts_b, 1 if ts_a is after ts_b or 0 if they represent the same position
+ */
+int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b);
+
+/**
+ * Compare 2 integers modulo mod.
+ * That is we compare integers a and b for which only the least
+ * significant log2(mod) bits are known.
+ *
+ * @param mod must be a power of 2
+ * @return a negative value if a is smaller than b
+ * a positive value if a is greater than b
+ * 0 if a equals b
+ */
+int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod);
+
+/**
+ * Rescale a timestamp while preserving known durations.
+ *
+ * @param in_ts Input timestamp
+ * @param in_tb Input timebase
+ * @param fs_tb Duration and *last timebase
+ * @param duration duration till the next call
+ * @param out_tb Output timebase
+ */
+int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb);
+
+/**
+ * Add a value to a timestamp.
+ *
+ * This function guarantees that when the same value is repeatly added that
+ * no accumulation of rounding errors occurs.
+ *
+ * @param ts Input timestamp
+ * @param ts_tb Input timestamp timebase
+ * @param inc value to add to ts
+ * @param inc_tb inc timebase
+ */
+int64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t inc);
+
+
+ /**
+ * @}
+ */
+
+#endif /* AVUTIL_MATHEMATICS_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/md5.h b/Externals/ffmpeg/dev/include/libavutil/md5.h
new file mode 100644
index 0000000000..79702c88c2
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/md5.h
@@ -0,0 +1,81 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MD5_H
+#define AVUTIL_MD5_H
+
+#include
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_md5 MD5
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_md5_size;
+
+struct AVMD5;
+
+/**
+ * Allocate an AVMD5 context.
+ */
+struct AVMD5 *av_md5_alloc(void);
+
+/**
+ * Initialize MD5 hashing.
+ *
+ * @param ctx pointer to the function context (of size av_md5_size)
+ */
+void av_md5_init(struct AVMD5 *ctx);
+
+/**
+ * Update hash value.
+ *
+ * @param ctx hash function context
+ * @param src input data to update hash with
+ * @param len input data length
+ */
+void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, int len);
+
+/**
+ * Finish hashing and output digest value.
+ *
+ * @param ctx hash function context
+ * @param dst buffer where output digest value is stored
+ */
+void av_md5_final(struct AVMD5 *ctx, uint8_t *dst);
+
+/**
+ * Hash an array of data.
+ *
+ * @param dst The output buffer to write the digest into
+ * @param src The data to hash
+ * @param len The length of the data, in bytes
+ */
+void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MD5_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/mem.h b/Externals/ffmpeg/dev/include/libavutil/mem.h
new file mode 100644
index 0000000000..2a1e36d69f
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/mem.h
@@ -0,0 +1,389 @@
+/*
+ * copyright (c) 2006 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * memory handling functions
+ */
+
+#ifndef AVUTIL_MEM_H
+#define AVUTIL_MEM_H
+
+#include
+#include
+
+#include "attributes.h"
+#include "error.h"
+#include "avutil.h"
+
+/**
+ * @addtogroup lavu_mem
+ * @{
+ */
+
+
+#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v
+#elif defined(__TI_COMPILER_VERSION__)
+ #define DECLARE_ALIGNED(n,t,v) \
+ AV_PRAGMA(DATA_ALIGN(v,n)) \
+ t __attribute__((aligned(n))) v
+ #define DECLARE_ASM_CONST(n,t,v) \
+ AV_PRAGMA(DATA_ALIGN(v,n)) \
+ static const t __attribute__((aligned(n))) v
+#elif defined(__GNUC__)
+ #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v
+ #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v
+#elif defined(_MSC_VER)
+ #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v
+ #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v
+#else
+ #define DECLARE_ALIGNED(n,t,v) t v
+ #define DECLARE_ASM_CONST(n,t,v) static const t v
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(3,1)
+ #define av_malloc_attrib __attribute__((__malloc__))
+#else
+ #define av_malloc_attrib
+#endif
+
+#if AV_GCC_VERSION_AT_LEAST(4,3)
+ #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__)))
+#else
+ #define av_alloc_size(...)
+#endif
+
+/**
+ * Allocate a block of size bytes with alignment suitable for all
+ * memory accesses (including vectors if available on the CPU).
+ * @param size Size in bytes for the memory block to be allocated.
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_mallocz()
+ */
+void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Allocate a block of size * nmemb bytes with av_malloc().
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_malloc()
+ */
+av_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t size)
+{
+ if (!size || nmemb >= INT_MAX / size)
+ return NULL;
+ return av_malloc(nmemb * size);
+}
+
+/**
+ * Allocate or reallocate a block of memory.
+ * If ptr is NULL and size > 0, allocate a new block. If
+ * size is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a memory block already allocated with
+ * av_realloc() or NULL.
+ * @param size Size in bytes of the memory block to be allocated or
+ * reallocated.
+ * @return Pointer to a newly-reallocated block or NULL if the block
+ * cannot be reallocated or the function is used to free the memory block.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ * not be passed to av_realloc(). The former can be implemented using
+ * memalign() (or other functions), and there is no guarantee that
+ * pointers from such functions can be passed to realloc() at all.
+ * The situation is undefined according to POSIX and may crash with
+ * some libc implementations.
+ * @see av_fast_realloc()
+ */
+void *av_realloc(void *ptr, size_t size) av_alloc_size(2);
+
+/**
+ * Allocate or reallocate a block of memory.
+ * This function does the same thing as av_realloc, except:
+ * - It takes two arguments and checks the result of the multiplication for
+ * integer overflow.
+ * - It frees the input block in case of failure, thus avoiding the memory
+ * leak with the classic "buf = realloc(buf); if (!buf) return -1;".
+ */
+void *av_realloc_f(void *ptr, size_t nelem, size_t elsize);
+
+/**
+ * Allocate or reallocate a block of memory.
+ * If *ptr is NULL and size > 0, allocate a new block. If
+ * size is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a pointer to a memory block already allocated
+ * with av_realloc(), or pointer to a pointer to NULL.
+ * The pointer is updated on success, or freed on failure.
+ * @param size Size in bytes for the memory block to be allocated or
+ * reallocated
+ * @return Zero on success, an AVERROR error code on failure.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ * not be passed to av_reallocp(). The former can be implemented using
+ * memalign() (or other functions), and there is no guarantee that
+ * pointers from such functions can be passed to realloc() at all.
+ * The situation is undefined according to POSIX and may crash with
+ * some libc implementations.
+ */
+int av_reallocp(void *ptr, size_t size);
+
+/**
+ * Allocate or reallocate an array.
+ * If ptr is NULL and nmemb > 0, allocate a new block. If
+ * nmemb is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a memory block already allocated with
+ * av_realloc() or NULL.
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to a newly-reallocated block or NULL if the block
+ * cannot be reallocated or the function is used to free the memory block.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ * not be passed to av_realloc(). The former can be implemented using
+ * memalign() (or other functions), and there is no guarantee that
+ * pointers from such functions can be passed to realloc() at all.
+ * The situation is undefined according to POSIX and may crash with
+ * some libc implementations.
+ */
+av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size);
+
+/**
+ * Allocate or reallocate an array through a pointer to a pointer.
+ * If *ptr is NULL and nmemb > 0, allocate a new block. If
+ * nmemb is zero, free the memory block pointed to by ptr.
+ * @param ptr Pointer to a pointer to a memory block already allocated
+ * with av_realloc(), or pointer to a pointer to NULL.
+ * The pointer is updated on success, or freed on failure.
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Zero on success, an AVERROR error code on failure.
+ * @warning Pointers originating from the av_malloc() family of functions must
+ * not be passed to av_realloc(). The former can be implemented using
+ * memalign() (or other functions), and there is no guarantee that
+ * pointers from such functions can be passed to realloc() at all.
+ * The situation is undefined according to POSIX and may crash with
+ * some libc implementations.
+ */
+av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size);
+
+/**
+ * Free a memory block which has been allocated with av_malloc(z)() or
+ * av_realloc().
+ * @param ptr Pointer to the memory block which should be freed.
+ * @note ptr = NULL is explicitly allowed.
+ * @note It is recommended that you use av_freep() instead.
+ * @see av_freep()
+ */
+void av_free(void *ptr);
+
+/**
+ * Allocate a block of size bytes with alignment suitable for all
+ * memory accesses (including vectors if available on the CPU) and
+ * zero all the bytes of the block.
+ * @param size Size in bytes for the memory block to be allocated.
+ * @return Pointer to the allocated block, NULL if it cannot be allocated.
+ * @see av_malloc()
+ */
+void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1);
+
+/**
+ * Allocate a block of nmemb * size bytes with alignment suitable for all
+ * memory accesses (including vectors if available on the CPU) and
+ * zero all the bytes of the block.
+ * The allocation will fail if nmemb * size is greater than or equal
+ * to INT_MAX.
+ * @param nmemb
+ * @param size
+ * @return Pointer to the allocated block, NULL if it cannot be allocated.
+ */
+void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib;
+
+/**
+ * Allocate a block of size * nmemb bytes with av_mallocz().
+ * @param nmemb Number of elements
+ * @param size Size of the single element
+ * @return Pointer to the allocated block, NULL if the block cannot
+ * be allocated.
+ * @see av_mallocz()
+ * @see av_malloc_array()
+ */
+av_alloc_size(1, 2) static inline void *av_mallocz_array(size_t nmemb, size_t size)
+{
+ if (!size || nmemb >= INT_MAX / size)
+ return NULL;
+ return av_mallocz(nmemb * size);
+}
+
+/**
+ * Duplicate the string s.
+ * @param s string to be duplicated
+ * @return Pointer to a newly-allocated string containing a
+ * copy of s or NULL if the string cannot be allocated.
+ */
+char *av_strdup(const char *s) av_malloc_attrib;
+
+/**
+ * Duplicate a substring of the string s.
+ * @param s string to be duplicated
+ * @param len the maximum length of the resulting string (not counting the
+ * terminating byte).
+ * @return Pointer to a newly-allocated string containing a
+ * copy of s or NULL if the string cannot be allocated.
+ */
+char *av_strndup(const char *s, size_t len) av_malloc_attrib;
+
+/**
+ * Duplicate the buffer p.
+ * @param p buffer to be duplicated
+ * @return Pointer to a newly allocated buffer containing a
+ * copy of p or NULL if the buffer cannot be allocated.
+ */
+void *av_memdup(const void *p, size_t size);
+
+/**
+ * Free a memory block which has been allocated with av_malloc(z)() or
+ * av_realloc() and set the pointer pointing to it to NULL.
+ * @param ptr Pointer to the pointer to the memory block which should
+ * be freed.
+ * @note passing a pointer to a NULL pointer is safe and leads to no action.
+ * @see av_free()
+ */
+void av_freep(void *ptr);
+
+/**
+ * Add an element to a dynamic array.
+ *
+ * The array to grow is supposed to be an array of pointers to
+ * structures, and the element to add must be a pointer to an already
+ * allocated structure.
+ *
+ * The array is reallocated when its size reaches powers of 2.
+ * Therefore, the amortized cost of adding an element is constant.
+ *
+ * In case of success, the pointer to the array is updated in order to
+ * point to the new grown array, and the number pointed to by nb_ptr
+ * is incremented.
+ * In case of failure, the array is freed, *tab_ptr is set to NULL and
+ * *nb_ptr is set to 0.
+ *
+ * @param tab_ptr pointer to the array to grow
+ * @param nb_ptr pointer to the number of elements in the array
+ * @param elem element to add
+ * @see av_dynarray_add_nofree(), av_dynarray2_add()
+ */
+void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem);
+
+/**
+ * Add an element to a dynamic array.
+ *
+ * Function has the same functionality as av_dynarray_add(),
+ * but it doesn't free memory on fails. It returns error code
+ * instead and leave current buffer untouched.
+ *
+ * @param tab_ptr pointer to the array to grow
+ * @param nb_ptr pointer to the number of elements in the array
+ * @param elem element to add
+ * @return >=0 on success, negative otherwise.
+ * @see av_dynarray_add(), av_dynarray2_add()
+ */
+int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem);
+
+/**
+ * Add an element of size elem_size to a dynamic array.
+ *
+ * The array is reallocated when its number of elements reaches powers of 2.
+ * Therefore, the amortized cost of adding an element is constant.
+ *
+ * In case of success, the pointer to the array is updated in order to
+ * point to the new grown array, and the number pointed to by nb_ptr
+ * is incremented.
+ * In case of failure, the array is freed, *tab_ptr is set to NULL and
+ * *nb_ptr is set to 0.
+ *
+ * @param tab_ptr pointer to the array to grow
+ * @param nb_ptr pointer to the number of elements in the array
+ * @param elem_size size in bytes of the elements in the array
+ * @param elem_data pointer to the data of the element to add. If NULL, the space of
+ * the new added element is not filled.
+ * @return pointer to the data of the element to copy in the new allocated space.
+ * If NULL, the new allocated space is left uninitialized."
+ * @see av_dynarray_add(), av_dynarray_add_nofree()
+ */
+void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size,
+ const uint8_t *elem_data);
+
+/**
+ * Multiply two size_t values checking for overflow.
+ * @return 0 if success, AVERROR(EINVAL) if overflow.
+ */
+static inline int av_size_mult(size_t a, size_t b, size_t *r)
+{
+ size_t t = a * b;
+ /* Hack inspired from glibc: only try the division if nelem and elsize
+ * are both greater than sqrt(SIZE_MAX). */
+ if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b)
+ return AVERROR(EINVAL);
+ *r = t;
+ return 0;
+}
+
+/**
+ * Set the maximum size that may me allocated in one block.
+ */
+void av_max_alloc(size_t max);
+
+/**
+ * deliberately overlapping memcpy implementation
+ * @param dst destination buffer
+ * @param back how many bytes back we start (the initial size of the overlapping window), must be > 0
+ * @param cnt number of bytes to copy, must be >= 0
+ *
+ * cnt > back is valid, this will copy the bytes we just copied,
+ * thus creating a repeating pattern with a period length of back.
+ */
+void av_memcpy_backptr(uint8_t *dst, int back, int cnt);
+
+/**
+ * Reallocate the given block if it is not large enough, otherwise do nothing.
+ *
+ * @see av_realloc
+ */
+void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * Allocate a buffer, reusing the given one if large enough.
+ *
+ * Contrary to av_fast_realloc the current buffer contents might not be
+ * preserved and on error the old buffer is freed, thus no special
+ * handling to avoid memleaks is necessary.
+ *
+ * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer
+ * @param size size of the buffer *ptr points to
+ * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and
+ * *size 0 if an error occurred.
+ */
+void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_MEM_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/motion_vector.h b/Externals/ffmpeg/dev/include/libavutil/motion_vector.h
new file mode 100644
index 0000000000..30cfb994b7
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/motion_vector.h
@@ -0,0 +1,50 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MOTION_VECTOR_H
+#define AVUTIL_MOTION_VECTOR_H
+
+#include
+
+typedef struct AVMotionVector {
+ /**
+ * Where the current macroblock comes from; negative value when it comes
+ * from the past, positive value when it comes from the future.
+ * XXX: set exact relative ref frame reference instead of a +/- 1 "direction".
+ */
+ int32_t source;
+ /**
+ * Width and height of the block.
+ */
+ uint8_t w, h;
+ /**
+ * Absolute source position. Can be outside the frame area.
+ */
+ int16_t src_x, src_y;
+ /**
+ * Absolute destination position. Can be outside the frame area.
+ */
+ int16_t dst_x, dst_y;
+ /**
+ * Extra flag information.
+ * Currently unused.
+ */
+ uint64_t flags;
+} AVMotionVector;
+
+#endif /* AVUTIL_MOTION_VECTOR_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/murmur3.h b/Externals/ffmpeg/dev/include/libavutil/murmur3.h
new file mode 100644
index 0000000000..f29ed973e9
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/murmur3.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2013 Reimar Döffinger
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_MURMUR3_H
+#define AVUTIL_MURMUR3_H
+
+#include
+
+struct AVMurMur3 *av_murmur3_alloc(void);
+void av_murmur3_init_seeded(struct AVMurMur3 *c, uint64_t seed);
+void av_murmur3_init(struct AVMurMur3 *c);
+void av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len);
+void av_murmur3_final(struct AVMurMur3 *c, uint8_t dst[16]);
+
+#endif /* AVUTIL_MURMUR3_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/old_pix_fmts.h b/Externals/ffmpeg/dev/include/libavutil/old_pix_fmts.h
new file mode 100644
index 0000000000..cd1ed7c19f
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/old_pix_fmts.h
@@ -0,0 +1,177 @@
+/*
+ * copyright (c) 2006-2012 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_OLD_PIX_FMTS_H
+#define AVUTIL_OLD_PIX_FMTS_H
+
+/*
+ * This header exists to prevent new pixel formats from being accidentally added
+ * to the deprecated list.
+ * Do not include it directly. It will be removed on next major bump
+ *
+ * Do not add new items to this list. Use the AVPixelFormat enum instead.
+ */
+ PIX_FMT_NONE = AV_PIX_FMT_NONE,
+ PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
+ PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+ PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
+ PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
+ PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
+ PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
+ PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
+ PIX_FMT_GRAY8, ///< Y , 8bpp
+ PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
+ PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
+ PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette
+ PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range
+ PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range
+ PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range
+#if FF_API_XVMC
+ PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing
+ PIX_FMT_XVMC_MPEG2_IDCT,
+#endif /* FF_API_XVMC */
+ PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+ PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+ PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
+ PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
+ PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
+ PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
+ PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+ PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
+
+ PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
+ PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
+ PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
+ PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
+
+ PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
+ PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
+ PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
+ PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range
+ PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
+#if FF_API_VDPAU
+ PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+#endif
+ PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
+ PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
+
+ PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
+ PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
+ PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0
+ PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0
+
+ PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
+ PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
+ PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1
+ PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1
+
+ PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
+ PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
+ PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+
+ PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+#if FF_API_VDPAU
+ PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+#endif
+ PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
+
+ PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0
+ PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0
+ PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
+ PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
+ PIX_FMT_GRAY8A, ///< 8bit gray, 8bit alpha
+ PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
+ PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
+
+ //the following 10 formats have the disadvantage of needing 1 format for each bit depth, thus
+ //If you want to support multiple bit depths, then using PIX_FMT_YUV420P16* with the bpp stored separately
+ //is better
+ PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_VDA_VLD, ///< hardware decoding through VDA
+
+#ifdef AV_PIX_FMT_ABI_GIT_MASTER
+ PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+ PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+#endif
+ PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
+ PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big endian
+ PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little endian
+ PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big endian
+ PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little endian
+ PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big endian
+ PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little endian
+
+#ifndef AV_PIX_FMT_ABI_GIT_MASTER
+ PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+ PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+#endif
+ PIX_FMT_0RGB=0x123+4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB...
+ PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0...
+ PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR...
+ PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0...
+ PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
+ PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
+
+ PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big endian
+ PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little endian
+ PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big endian
+ PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little endian
+
+ PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+#endif /* AVUTIL_OLD_PIX_FMTS_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/opt.h b/Externals/ffmpeg/dev/include/libavutil/opt.h
new file mode 100644
index 0000000000..5fc8a9b58a
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/opt.h
@@ -0,0 +1,901 @@
+/*
+ * AVOptions
+ * copyright (c) 2005 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_OPT_H
+#define AVUTIL_OPT_H
+
+/**
+ * @file
+ * AVOptions
+ */
+
+#include "rational.h"
+#include "avutil.h"
+#include "dict.h"
+#include "log.h"
+#include "pixfmt.h"
+#include "samplefmt.h"
+#include "version.h"
+
+/**
+ * @defgroup avoptions AVOptions
+ * @ingroup lavu_data
+ * @{
+ * AVOptions provide a generic system to declare options on arbitrary structs
+ * ("objects"). An option can have a help text, a type and a range of possible
+ * values. Options may then be enumerated, read and written to.
+ *
+ * @section avoptions_implement Implementing AVOptions
+ * This section describes how to add AVOptions capabilities to a struct.
+ *
+ * All AVOptions-related information is stored in an AVClass. Therefore
+ * the first member of the struct should be a pointer to an AVClass describing it.
+ * The option field of the AVClass must be set to a NULL-terminated static array
+ * of AVOptions. Each AVOption must have a non-empty name, a type, a default
+ * value and for number-type AVOptions also a range of allowed values. It must
+ * also declare an offset in bytes from the start of the struct, where the field
+ * associated with this AVOption is located. Other fields in the AVOption struct
+ * should also be set when applicable, but are not required.
+ *
+ * The following example illustrates an AVOptions-enabled struct:
+ * @code
+ * typedef struct test_struct {
+ * AVClass *class;
+ * int int_opt;
+ * char *str_opt;
+ * uint8_t *bin_opt;
+ * int bin_len;
+ * } test_struct;
+ *
+ * static const AVOption test_options[] = {
+ * { "test_int", "This is a test option of int type.", offsetof(test_struct, int_opt),
+ * AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX },
+ * { "test_str", "This is a test option of string type.", offsetof(test_struct, str_opt),
+ * AV_OPT_TYPE_STRING },
+ * { "test_bin", "This is a test option of binary type.", offsetof(test_struct, bin_opt),
+ * AV_OPT_TYPE_BINARY },
+ * { NULL },
+ * };
+ *
+ * static const AVClass test_class = {
+ * .class_name = "test class",
+ * .item_name = av_default_item_name,
+ * .option = test_options,
+ * .version = LIBAVUTIL_VERSION_INT,
+ * };
+ * @endcode
+ *
+ * Next, when allocating your struct, you must ensure that the AVClass pointer
+ * is set to the correct value. Then, av_opt_set_defaults() can be called to
+ * initialize defaults. After that the struct is ready to be used with the
+ * AVOptions API.
+ *
+ * When cleaning up, you may use the av_opt_free() function to automatically
+ * free all the allocated string and binary options.
+ *
+ * Continuing with the above example:
+ *
+ * @code
+ * test_struct *alloc_test_struct(void)
+ * {
+ * test_struct *ret = av_malloc(sizeof(*ret));
+ * ret->class = &test_class;
+ * av_opt_set_defaults(ret);
+ * return ret;
+ * }
+ * void free_test_struct(test_struct **foo)
+ * {
+ * av_opt_free(*foo);
+ * av_freep(foo);
+ * }
+ * @endcode
+ *
+ * @subsection avoptions_implement_nesting Nesting
+ * It may happen that an AVOptions-enabled struct contains another
+ * AVOptions-enabled struct as a member (e.g. AVCodecContext in
+ * libavcodec exports generic options, while its priv_data field exports
+ * codec-specific options). In such a case, it is possible to set up the
+ * parent struct to export a child's options. To do that, simply
+ * implement AVClass.child_next() and AVClass.child_class_next() in the
+ * parent struct's AVClass.
+ * Assuming that the test_struct from above now also contains a
+ * child_struct field:
+ *
+ * @code
+ * typedef struct child_struct {
+ * AVClass *class;
+ * int flags_opt;
+ * } child_struct;
+ * static const AVOption child_opts[] = {
+ * { "test_flags", "This is a test option of flags type.",
+ * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX },
+ * { NULL },
+ * };
+ * static const AVClass child_class = {
+ * .class_name = "child class",
+ * .item_name = av_default_item_name,
+ * .option = child_opts,
+ * .version = LIBAVUTIL_VERSION_INT,
+ * };
+ *
+ * void *child_next(void *obj, void *prev)
+ * {
+ * test_struct *t = obj;
+ * if (!prev && t->child_struct)
+ * return t->child_struct;
+ * return NULL
+ * }
+ * const AVClass child_class_next(const AVClass *prev)
+ * {
+ * return prev ? NULL : &child_class;
+ * }
+ * @endcode
+ * Putting child_next() and child_class_next() as defined above into
+ * test_class will now make child_struct's options accessible through
+ * test_struct (again, proper setup as described above needs to be done on
+ * child_struct right after it is created).
+ *
+ * From the above example it might not be clear why both child_next()
+ * and child_class_next() are needed. The distinction is that child_next()
+ * iterates over actually existing objects, while child_class_next()
+ * iterates over all possible child classes. E.g. if an AVCodecContext
+ * was initialized to use a codec which has private options, then its
+ * child_next() will return AVCodecContext.priv_data and finish
+ * iterating. OTOH child_class_next() on AVCodecContext.av_class will
+ * iterate over all available codecs with private options.
+ *
+ * @subsection avoptions_implement_named_constants Named constants
+ * It is possible to create named constants for options. Simply set the unit
+ * field of the option the constants should apply to a string and
+ * create the constants themselves as options of type AV_OPT_TYPE_CONST
+ * with their unit field set to the same string.
+ * Their default_val field should contain the value of the named
+ * constant.
+ * For example, to add some named constants for the test_flags option
+ * above, put the following into the child_opts array:
+ * @code
+ * { "test_flags", "This is a test option of flags type.",
+ * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, "test_unit" },
+ * { "flag1", "This is a flag with value 16", 0, AV_OPT_TYPE_CONST, { .i64 = 16 }, 0, 0, "test_unit" },
+ * @endcode
+ *
+ * @section avoptions_use Using AVOptions
+ * This section deals with accessing options in an AVOptions-enabled struct.
+ * Such structs in FFmpeg are e.g. AVCodecContext in libavcodec or
+ * AVFormatContext in libavformat.
+ *
+ * @subsection avoptions_use_examine Examining AVOptions
+ * The basic functions for examining options are av_opt_next(), which iterates
+ * over all options defined for one object, and av_opt_find(), which searches
+ * for an option with the given name.
+ *
+ * The situation is more complicated with nesting. An AVOptions-enabled struct
+ * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag
+ * to av_opt_find() will make the function search children recursively.
+ *
+ * For enumerating there are basically two cases. The first is when you want to
+ * get all options that may potentially exist on the struct and its children
+ * (e.g. when constructing documentation). In that case you should call
+ * av_opt_child_class_next() recursively on the parent struct's AVClass. The
+ * second case is when you have an already initialized struct with all its
+ * children and you want to get all options that can be actually written or read
+ * from it. In that case you should call av_opt_child_next() recursively (and
+ * av_opt_next() on each result).
+ *
+ * @subsection avoptions_use_get_set Reading and writing AVOptions
+ * When setting options, you often have a string read directly from the
+ * user. In such a case, simply passing it to av_opt_set() is enough. For
+ * non-string type options, av_opt_set() will parse the string according to the
+ * option type.
+ *
+ * Similarly av_opt_get() will read any option type and convert it to a string
+ * which will be returned. Do not forget that the string is allocated, so you
+ * have to free it with av_free().
+ *
+ * In some cases it may be more convenient to put all options into an
+ * AVDictionary and call av_opt_set_dict() on it. A specific case of this
+ * are the format/codec open functions in lavf/lavc which take a dictionary
+ * filled with option as a parameter. This allows to set some options
+ * that cannot be set otherwise, since e.g. the input file format is not known
+ * before the file is actually opened.
+ */
+
+enum AVOptionType{
+ AV_OPT_TYPE_FLAGS,
+ AV_OPT_TYPE_INT,
+ AV_OPT_TYPE_INT64,
+ AV_OPT_TYPE_DOUBLE,
+ AV_OPT_TYPE_FLOAT,
+ AV_OPT_TYPE_STRING,
+ AV_OPT_TYPE_RATIONAL,
+ AV_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length
+ AV_OPT_TYPE_DICT,
+ AV_OPT_TYPE_CONST = 128,
+ AV_OPT_TYPE_IMAGE_SIZE = MKBETAG('S','I','Z','E'), ///< offset must point to two consecutive integers
+ AV_OPT_TYPE_PIXEL_FMT = MKBETAG('P','F','M','T'),
+ AV_OPT_TYPE_SAMPLE_FMT = MKBETAG('S','F','M','T'),
+ AV_OPT_TYPE_VIDEO_RATE = MKBETAG('V','R','A','T'), ///< offset must point to AVRational
+ AV_OPT_TYPE_DURATION = MKBETAG('D','U','R',' '),
+ AV_OPT_TYPE_COLOR = MKBETAG('C','O','L','R'),
+ AV_OPT_TYPE_CHANNEL_LAYOUT = MKBETAG('C','H','L','A'),
+#if FF_API_OLD_AVOPTIONS
+ FF_OPT_TYPE_FLAGS = 0,
+ FF_OPT_TYPE_INT,
+ FF_OPT_TYPE_INT64,
+ FF_OPT_TYPE_DOUBLE,
+ FF_OPT_TYPE_FLOAT,
+ FF_OPT_TYPE_STRING,
+ FF_OPT_TYPE_RATIONAL,
+ FF_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length
+ FF_OPT_TYPE_CONST=128,
+#endif
+};
+
+/**
+ * AVOption
+ */
+typedef struct AVOption {
+ const char *name;
+
+ /**
+ * short English help text
+ * @todo What about other languages?
+ */
+ const char *help;
+
+ /**
+ * The offset relative to the context structure where the option
+ * value is stored. It should be 0 for named constants.
+ */
+ int offset;
+ enum AVOptionType type;
+
+ /**
+ * the default value for scalar options
+ */
+ union {
+ int64_t i64;
+ double dbl;
+ const char *str;
+ /* TODO those are unused now */
+ AVRational q;
+ } default_val;
+ double min; ///< minimum valid value for the option
+ double max; ///< maximum valid value for the option
+
+ int flags;
+#define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding
+#define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding
+#if FF_API_OPT_TYPE_METADATA
+#define AV_OPT_FLAG_METADATA 4 ///< some data extracted or inserted into the file like title, comment, ...
+#endif
+#define AV_OPT_FLAG_AUDIO_PARAM 8
+#define AV_OPT_FLAG_VIDEO_PARAM 16
+#define AV_OPT_FLAG_SUBTITLE_PARAM 32
+/**
+ * The option is inteded for exporting values to the caller.
+ */
+#define AV_OPT_FLAG_EXPORT 64
+/**
+ * The option may not be set through the AVOptions API, only read.
+ * This flag only makes sense when AV_OPT_FLAG_EXPORT is also set.
+ */
+#define AV_OPT_FLAG_READONLY 128
+#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering
+//FIXME think about enc-audio, ... style flags
+
+ /**
+ * The logical unit to which the option belongs. Non-constant
+ * options and corresponding named constants share the same
+ * unit. May be NULL.
+ */
+ const char *unit;
+} AVOption;
+
+/**
+ * A single allowed range of values, or a single allowed value.
+ */
+typedef struct AVOptionRange {
+ const char *str;
+ /**
+ * Value range.
+ * For string ranges this represents the min/max length.
+ * For dimensions this represents the min/max pixel count or width/height in multi-component case.
+ */
+ double value_min, value_max;
+ /**
+ * Value's component range.
+ * For string this represents the unicode range for chars, 0-127 limits to ASCII.
+ */
+ double component_min, component_max;
+ /**
+ * Range flag.
+ * If set to 1 the struct encodes a range, if set to 0 a single value.
+ */
+ int is_range;
+} AVOptionRange;
+
+/**
+ * List of AVOptionRange structs.
+ */
+typedef struct AVOptionRanges {
+ /**
+ * Array of option ranges.
+ *
+ * Most of option types use just one component.
+ * Following describes multi-component option types:
+ *
+ * AV_OPT_TYPE_IMAGE_SIZE:
+ * component index 0: range of pixel count (width * height).
+ * component index 1: range of width.
+ * component index 2: range of height.
+ *
+ * @note To obtain multi-component version of this structure, user must
+ * provide AV_OPT_MULTI_COMPONENT_RANGE to av_opt_query_ranges or
+ * av_opt_query_ranges_default function.
+ *
+ * Multi-component range can be read as in following example:
+ *
+ * @code
+ * int range_index, component_index;
+ * AVOptionRanges *ranges;
+ * AVOptionRange *range[3]; //may require more than 3 in the future.
+ * av_opt_query_ranges(&ranges, obj, key, AV_OPT_MULTI_COMPONENT_RANGE);
+ * for (range_index = 0; range_index < ranges->nb_ranges; range_index++) {
+ * for (component_index = 0; component_index < ranges->nb_components; component_index++)
+ * range[component_index] = ranges->range[ranges->nb_ranges * component_index + range_index];
+ * //do something with range here.
+ * }
+ * av_opt_freep_ranges(&ranges);
+ * @endcode
+ */
+ AVOptionRange **range;
+ /**
+ * Number of ranges per component.
+ */
+ int nb_ranges;
+ /**
+ * Number of componentes.
+ */
+ int nb_components;
+} AVOptionRanges;
+
+
+#if FF_API_OLD_AVOPTIONS
+/**
+ * Set the field of obj with the given name to value.
+ *
+ * @param[in] obj A struct whose first element is a pointer to an
+ * AVClass.
+ * @param[in] name the name of the field to set
+ * @param[in] val The value to set. If the field is not of a string
+ * type, then the given string is parsed.
+ * SI postfixes and some named scalars are supported.
+ * If the field is of a numeric type, it has to be a numeric or named
+ * scalar. Behavior with more than one scalar and +- infix operators
+ * is undefined.
+ * If the field is of a flags type, it has to be a sequence of numeric
+ * scalars or named flags separated by '+' or '-'. Prefixing a flag
+ * with '+' causes it to be set without affecting the other flags;
+ * similarly, '-' unsets a flag.
+ * @param[out] o_out if non-NULL put here a pointer to the AVOption
+ * found
+ * @param alloc this parameter is currently ignored
+ * @return 0 if the value has been set, or an AVERROR code in case of
+ * error:
+ * AVERROR_OPTION_NOT_FOUND if no matching option exists
+ * AVERROR(ERANGE) if the value is out of range
+ * AVERROR(EINVAL) if the value is not valid
+ * @deprecated use av_opt_set()
+ */
+attribute_deprecated
+int av_set_string3(void *obj, const char *name, const char *val, int alloc, const AVOption **o_out);
+
+attribute_deprecated const AVOption *av_set_double(void *obj, const char *name, double n);
+attribute_deprecated const AVOption *av_set_q(void *obj, const char *name, AVRational n);
+attribute_deprecated const AVOption *av_set_int(void *obj, const char *name, int64_t n);
+
+double av_get_double(void *obj, const char *name, const AVOption **o_out);
+AVRational av_get_q(void *obj, const char *name, const AVOption **o_out);
+int64_t av_get_int(void *obj, const char *name, const AVOption **o_out);
+attribute_deprecated const char *av_get_string(void *obj, const char *name, const AVOption **o_out, char *buf, int buf_len);
+attribute_deprecated const AVOption *av_next_option(FF_CONST_AVUTIL55 void *obj, const AVOption *last);
+#endif
+
+/**
+ * Show the obj options.
+ *
+ * @param req_flags requested flags for the options to show. Show only the
+ * options for which it is opt->flags & req_flags.
+ * @param rej_flags rejected flags for the options to show. Show only the
+ * options for which it is !(opt->flags & req_flags).
+ * @param av_log_obj log context to use for showing the options
+ */
+int av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags);
+
+/**
+ * Set the values of all AVOption fields to their default values.
+ *
+ * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass)
+ */
+void av_opt_set_defaults(void *s);
+
+#if FF_API_OLD_AVOPTIONS
+attribute_deprecated
+void av_opt_set_defaults2(void *s, int mask, int flags);
+#endif
+
+/**
+ * Parse the key/value pairs list in opts. For each key/value pair
+ * found, stores the value in the field in ctx that is named like the
+ * key. ctx must be an AVClass context, storing is done using
+ * AVOptions.
+ *
+ * @param opts options string to parse, may be NULL
+ * @param key_val_sep a 0-terminated list of characters used to
+ * separate key from value
+ * @param pairs_sep a 0-terminated list of characters used to separate
+ * two pairs from each other
+ * @return the number of successfully set key/value pairs, or a negative
+ * value corresponding to an AVERROR code in case of error:
+ * AVERROR(EINVAL) if opts cannot be parsed,
+ * the error code issued by av_opt_set() if a key/value pair
+ * cannot be set
+ */
+int av_set_options_string(void *ctx, const char *opts,
+ const char *key_val_sep, const char *pairs_sep);
+
+/**
+ * Parse the key-value pairs list in opts. For each key=value pair found,
+ * set the value of the corresponding option in ctx.
+ *
+ * @param ctx the AVClass object to set options on
+ * @param opts the options string, key-value pairs separated by a
+ * delimiter
+ * @param shorthand a NULL-terminated array of options names for shorthand
+ * notation: if the first field in opts has no key part,
+ * the key is taken from the first element of shorthand;
+ * then again for the second, etc., until either opts is
+ * finished, shorthand is finished or a named option is
+ * found; after that, all options must be named
+ * @param key_val_sep a 0-terminated list of characters used to separate
+ * key from value, for example '='
+ * @param pairs_sep a 0-terminated list of characters used to separate
+ * two pairs from each other, for example ':' or ','
+ * @return the number of successfully set key=value pairs, or a negative
+ * value corresponding to an AVERROR code in case of error:
+ * AVERROR(EINVAL) if opts cannot be parsed,
+ * the error code issued by av_set_string3() if a key/value pair
+ * cannot be set
+ *
+ * Options names must use only the following characters: a-z A-Z 0-9 - . / _
+ * Separators must use characters distinct from option names and from each
+ * other.
+ */
+int av_opt_set_from_string(void *ctx, const char *opts,
+ const char *const *shorthand,
+ const char *key_val_sep, const char *pairs_sep);
+/**
+ * Free all allocated objects in obj.
+ */
+void av_opt_free(void *obj);
+
+/**
+ * Check whether a particular flag is set in a flags field.
+ *
+ * @param field_name the name of the flag field option
+ * @param flag_name the name of the flag to check
+ * @return non-zero if the flag is set, zero if the flag isn't set,
+ * isn't of the right type, or the flags field doesn't exist.
+ */
+int av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name);
+
+/**
+ * Set all the options from a given dictionary on an object.
+ *
+ * @param obj a struct whose first element is a pointer to AVClass
+ * @param options options to process. This dictionary will be freed and replaced
+ * by a new one containing all options not found in obj.
+ * Of course this new dictionary needs to be freed by caller
+ * with av_dict_free().
+ *
+ * @return 0 on success, a negative AVERROR if some option was found in obj,
+ * but could not be set.
+ *
+ * @see av_dict_copy()
+ */
+int av_opt_set_dict(void *obj, struct AVDictionary **options);
+
+
+/**
+ * Set all the options from a given dictionary on an object.
+ *
+ * @param obj a struct whose first element is a pointer to AVClass
+ * @param options options to process. This dictionary will be freed and replaced
+ * by a new one containing all options not found in obj.
+ * Of course this new dictionary needs to be freed by caller
+ * with av_dict_free().
+ * @param search_flags A combination of AV_OPT_SEARCH_*.
+ *
+ * @return 0 on success, a negative AVERROR if some option was found in obj,
+ * but could not be set.
+ *
+ * @see av_dict_copy()
+ */
+int av_opt_set_dict2(void *obj, struct AVDictionary **options, int search_flags);
+
+/**
+ * Extract a key-value pair from the beginning of a string.
+ *
+ * @param ropts pointer to the options string, will be updated to
+ * point to the rest of the string (one of the pairs_sep
+ * or the final NUL)
+ * @param key_val_sep a 0-terminated list of characters used to separate
+ * key from value, for example '='
+ * @param pairs_sep a 0-terminated list of characters used to separate
+ * two pairs from each other, for example ':' or ','
+ * @param flags flags; see the AV_OPT_FLAG_* values below
+ * @param rkey parsed key; must be freed using av_free()
+ * @param rval parsed value; must be freed using av_free()
+ *
+ * @return >=0 for success, or a negative value corresponding to an
+ * AVERROR code in case of error; in particular:
+ * AVERROR(EINVAL) if no key is present
+ *
+ */
+int av_opt_get_key_value(const char **ropts,
+ const char *key_val_sep, const char *pairs_sep,
+ unsigned flags,
+ char **rkey, char **rval);
+
+enum {
+
+ /**
+ * Accept to parse a value without a key; the key will then be returned
+ * as NULL.
+ */
+ AV_OPT_FLAG_IMPLICIT_KEY = 1,
+};
+
+/**
+ * @defgroup opt_eval_funcs Evaluating option strings
+ * @{
+ * This group of functions can be used to evaluate option strings
+ * and get numbers out of them. They do the same thing as av_opt_set(),
+ * except the result is written into the caller-supplied pointer.
+ *
+ * @param obj a struct whose first element is a pointer to AVClass.
+ * @param o an option for which the string is to be evaluated.
+ * @param val string to be evaluated.
+ * @param *_out value of the string will be written here.
+ *
+ * @return 0 on success, a negative number on failure.
+ */
+int av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int *flags_out);
+int av_opt_eval_int (void *obj, const AVOption *o, const char *val, int *int_out);
+int av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t *int64_out);
+int av_opt_eval_float (void *obj, const AVOption *o, const char *val, float *float_out);
+int av_opt_eval_double(void *obj, const AVOption *o, const char *val, double *double_out);
+int av_opt_eval_q (void *obj, const AVOption *o, const char *val, AVRational *q_out);
+/**
+ * @}
+ */
+
+#define AV_OPT_SEARCH_CHILDREN 0x0001 /**< Search in possible children of the
+ given object first. */
+/**
+ * The obj passed to av_opt_find() is fake -- only a double pointer to AVClass
+ * instead of a required pointer to a struct containing AVClass. This is
+ * useful for searching for options without needing to allocate the corresponding
+ * object.
+ */
+#define AV_OPT_SEARCH_FAKE_OBJ 0x0002
+
+/**
+ * Allows av_opt_query_ranges and av_opt_query_ranges_default to return more than
+ * one component for certain option types.
+ * @see AVOptionRanges for details.
+ */
+#define AV_OPT_MULTI_COMPONENT_RANGE 0x1000
+
+/**
+ * Look for an option in an object. Consider only options which
+ * have all the specified flags set.
+ *
+ * @param[in] obj A pointer to a struct whose first element is a
+ * pointer to an AVClass.
+ * Alternatively a double pointer to an AVClass, if
+ * AV_OPT_SEARCH_FAKE_OBJ search flag is set.
+ * @param[in] name The name of the option to look for.
+ * @param[in] unit When searching for named constants, name of the unit
+ * it belongs to.
+ * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).
+ * @param search_flags A combination of AV_OPT_SEARCH_*.
+ *
+ * @return A pointer to the option found, or NULL if no option
+ * was found.
+ *
+ * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable
+ * directly with av_opt_set(). Use special calls which take an options
+ * AVDictionary (e.g. avformat_open_input()) to set options found with this
+ * flag.
+ */
+const AVOption *av_opt_find(void *obj, const char *name, const char *unit,
+ int opt_flags, int search_flags);
+
+/**
+ * Look for an option in an object. Consider only options which
+ * have all the specified flags set.
+ *
+ * @param[in] obj A pointer to a struct whose first element is a
+ * pointer to an AVClass.
+ * Alternatively a double pointer to an AVClass, if
+ * AV_OPT_SEARCH_FAKE_OBJ search flag is set.
+ * @param[in] name The name of the option to look for.
+ * @param[in] unit When searching for named constants, name of the unit
+ * it belongs to.
+ * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG).
+ * @param search_flags A combination of AV_OPT_SEARCH_*.
+ * @param[out] target_obj if non-NULL, an object to which the option belongs will be
+ * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present
+ * in search_flags. This parameter is ignored if search_flags contain
+ * AV_OPT_SEARCH_FAKE_OBJ.
+ *
+ * @return A pointer to the option found, or NULL if no option
+ * was found.
+ */
+const AVOption *av_opt_find2(void *obj, const char *name, const char *unit,
+ int opt_flags, int search_flags, void **target_obj);
+
+/**
+ * Iterate over all AVOptions belonging to obj.
+ *
+ * @param obj an AVOptions-enabled struct or a double pointer to an
+ * AVClass describing it.
+ * @param prev result of the previous call to av_opt_next() on this object
+ * or NULL
+ * @return next AVOption or NULL
+ */
+const AVOption *av_opt_next(FF_CONST_AVUTIL55 void *obj, const AVOption *prev);
+
+/**
+ * Iterate over AVOptions-enabled children of obj.
+ *
+ * @param prev result of a previous call to this function or NULL
+ * @return next AVOptions-enabled child or NULL
+ */
+void *av_opt_child_next(void *obj, void *prev);
+
+/**
+ * Iterate over potential AVOptions-enabled children of parent.
+ *
+ * @param prev result of a previous call to this function or NULL
+ * @return AVClass corresponding to next potential child or NULL
+ */
+const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev);
+
+/**
+ * @defgroup opt_set_funcs Option setting functions
+ * @{
+ * Those functions set the field of obj with the given name to value.
+ *
+ * @param[in] obj A struct whose first element is a pointer to an AVClass.
+ * @param[in] name the name of the field to set
+ * @param[in] val The value to set. In case of av_opt_set() if the field is not
+ * of a string type, then the given string is parsed.
+ * SI postfixes and some named scalars are supported.
+ * If the field is of a numeric type, it has to be a numeric or named
+ * scalar. Behavior with more than one scalar and +- infix operators
+ * is undefined.
+ * If the field is of a flags type, it has to be a sequence of numeric
+ * scalars or named flags separated by '+' or '-'. Prefixing a flag
+ * with '+' causes it to be set without affecting the other flags;
+ * similarly, '-' unsets a flag.
+ * @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN
+ * is passed here, then the option may be set on a child of obj.
+ *
+ * @return 0 if the value has been set, or an AVERROR code in case of
+ * error:
+ * AVERROR_OPTION_NOT_FOUND if no matching option exists
+ * AVERROR(ERANGE) if the value is out of range
+ * AVERROR(EINVAL) if the value is not valid
+ */
+int av_opt_set (void *obj, const char *name, const char *val, int search_flags);
+int av_opt_set_int (void *obj, const char *name, int64_t val, int search_flags);
+int av_opt_set_double (void *obj, const char *name, double val, int search_flags);
+int av_opt_set_q (void *obj, const char *name, AVRational val, int search_flags);
+int av_opt_set_bin (void *obj, const char *name, const uint8_t *val, int size, int search_flags);
+int av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_flags);
+int av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags);
+int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags);
+int av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags);
+int av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags);
+/**
+ * @note Any old dictionary present is discarded and replaced with a copy of the new one. The
+ * caller still owns val is and responsible for freeing it.
+ */
+int av_opt_set_dict_val(void *obj, const char *name, const AVDictionary *val, int search_flags);
+
+/**
+ * Set a binary option to an integer list.
+ *
+ * @param obj AVClass object to set options on
+ * @param name name of the binary option
+ * @param val pointer to an integer list (must have the correct type with
+ * regard to the contents of the list)
+ * @param term list terminator (usually 0 or -1)
+ * @param flags search flags
+ */
+#define av_opt_set_int_list(obj, name, val, term, flags) \
+ (av_int_list_length(val, term) > INT_MAX / sizeof(*(val)) ? \
+ AVERROR(EINVAL) : \
+ av_opt_set_bin(obj, name, (const uint8_t *)(val), \
+ av_int_list_length(val, term) * sizeof(*(val)), flags))
+
+/**
+ * @}
+ */
+
+/**
+ * @defgroup opt_get_funcs Option getting functions
+ * @{
+ * Those functions get a value of the option with the given name from an object.
+ *
+ * @param[in] obj a struct whose first element is a pointer to an AVClass.
+ * @param[in] name name of the option to get.
+ * @param[in] search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN
+ * is passed here, then the option may be found in a child of obj.
+ * @param[out] out_val value of the option will be written here
+ * @return >=0 on success, a negative error code otherwise
+ */
+/**
+ * @note the returned string will be av_malloc()ed and must be av_free()ed by the caller
+ */
+int av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val);
+int av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val);
+int av_opt_get_double (void *obj, const char *name, int search_flags, double *out_val);
+int av_opt_get_q (void *obj, const char *name, int search_flags, AVRational *out_val);
+int av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_out, int *h_out);
+int av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt);
+int av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt);
+int av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val);
+int av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout);
+/**
+ * @param[out] out_val The returned dictionary is a copy of the actual value and must
+ * be freed with av_dict_free() by the caller
+ */
+int av_opt_get_dict_val(void *obj, const char *name, int search_flags, AVDictionary **out_val);
+/**
+ * @}
+ */
+/**
+ * Gets a pointer to the requested field in a struct.
+ * This function allows accessing a struct even when its fields are moved or
+ * renamed since the application making the access has been compiled,
+ *
+ * @returns a pointer to the field, it can be cast to the correct type and read
+ * or written to.
+ */
+void *av_opt_ptr(const AVClass *avclass, void *obj, const char *name);
+
+/**
+ * Free an AVOptionRanges struct and set it to NULL.
+ */
+void av_opt_freep_ranges(AVOptionRanges **ranges);
+
+/**
+ * Get a list of allowed ranges for the given option.
+ *
+ * The returned list may depend on other fields in obj like for example profile.
+ *
+ * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored
+ * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance
+ * AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges
+ *
+ * The result must be freed with av_opt_freep_ranges.
+ *
+ * @return number of compontents returned on success, a negative errro code otherwise
+ */
+int av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags);
+
+/**
+ * Copy options from src object into dest object.
+ *
+ * Options that require memory allocation (e.g. string or binary) are malloc'ed in dest object.
+ * Original memory allocated for such options is freed unless both src and dest options points to the same memory.
+ *
+ * @param dest Object to copy from
+ * @param src Object to copy into
+ * @return 0 on success, negative on error
+ */
+int av_opt_copy(void *dest, FF_CONST_AVUTIL55 void *src);
+
+/**
+ * Get a default list of allowed ranges for the given option.
+ *
+ * This list is constructed without using the AVClass.query_ranges() callback
+ * and can be used as fallback from within the callback.
+ *
+ * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored
+ * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance
+ * AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges
+ *
+ * The result must be freed with av_opt_free_ranges.
+ *
+ * @return number of compontents returned on success, a negative errro code otherwise
+ */
+int av_opt_query_ranges_default(AVOptionRanges **, void *obj, const char *key, int flags);
+
+/**
+ * Check if given option is set to its default value.
+ *
+ * Options o must belong to the obj. This function must not be called to check child's options state.
+ * @see av_opt_is_set_to_default_by_name().
+ *
+ * @param obj AVClass object to check option on
+ * @param o option to be checked
+ * @return >0 when option is set to its default,
+ * 0 when option is not set its default,
+ * <0 on error
+ */
+int av_opt_is_set_to_default(void *obj, const AVOption *o);
+
+/**
+ * Check if given option is set to its default value.
+ *
+ * @param obj AVClass object to check option on
+ * @param name option name
+ * @param search_flags combination of AV_OPT_SEARCH_*
+ * @return >0 when option is set to its default,
+ * 0 when option is not set its default,
+ * <0 on error
+ */
+int av_opt_is_set_to_default_by_name(void *obj, const char *name, int search_flags);
+
+
+#define AV_OPT_SERIALIZE_SKIP_DEFAULTS 0x00000001 ///< Serialize options that are not set to default values only.
+#define AV_OPT_SERIALIZE_OPT_FLAGS_EXACT 0x00000002 ///< Serialize options that exactly match opt_flags only.
+
+/**
+ * Serialize object's options.
+ *
+ * Create a string containing object's serialized options.
+ * Such string may be passed back to av_opt_set_from_string() in order to restore option values.
+ * A key/value or pairs separator occurring in the serialized value or
+ * name string are escaped through the av_escape() function.
+ *
+ * @param[in] obj AVClass object to serialize
+ * @param[in] opt_flags serialize options with all the specified flags set (AV_OPT_FLAG)
+ * @param[in] flags combination of AV_OPT_SERIALIZE_* flags
+ * @param[out] buffer Pointer to buffer that will be allocated with string containg serialized options.
+ * Buffer must be freed by the caller when is no longer needed.
+ * @param[in] key_val_sep character used to separate key from value
+ * @param[in] pairs_sep character used to separate two pairs from each other
+ * @return >= 0 on success, negative on error
+ * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same.
+ */
+int av_opt_serialize(void *obj, int opt_flags, int flags, char **buffer,
+ const char key_val_sep, const char pairs_sep);
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_OPT_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/parseutils.h b/Externals/ffmpeg/dev/include/libavutil/parseutils.h
new file mode 100644
index 0000000000..c80f0de3de
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/parseutils.h
@@ -0,0 +1,187 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PARSEUTILS_H
+#define AVUTIL_PARSEUTILS_H
+
+#include
+
+#include "rational.h"
+
+/**
+ * @file
+ * misc parsing utilities
+ */
+
+/**
+ * Parse str and store the parsed ratio in q.
+ *
+ * Note that a ratio with infinite (1/0) or negative value is
+ * considered valid, so you should check on the returned value if you
+ * want to exclude those values.
+ *
+ * The undefined value can be expressed using the "0:0" string.
+ *
+ * @param[in,out] q pointer to the AVRational which will contain the ratio
+ * @param[in] str the string to parse: it has to be a string in the format
+ * num:den, a float number or an expression
+ * @param[in] max the maximum allowed numerator and denominator
+ * @param[in] log_offset log level offset which is applied to the log
+ * level of log_ctx
+ * @param[in] log_ctx parent logging context
+ * @return >= 0 on success, a negative error code otherwise
+ */
+int av_parse_ratio(AVRational *q, const char *str, int max,
+ int log_offset, void *log_ctx);
+
+#define av_parse_ratio_quiet(rate, str, max) \
+ av_parse_ratio(rate, str, max, AV_LOG_MAX_OFFSET, NULL)
+
+/**
+ * Parse str and put in width_ptr and height_ptr the detected values.
+ *
+ * @param[in,out] width_ptr pointer to the variable which will contain the detected
+ * width value
+ * @param[in,out] height_ptr pointer to the variable which will contain the detected
+ * height value
+ * @param[in] str the string to parse: it has to be a string in the format
+ * width x height or a valid video size abbreviation.
+ * @return >= 0 on success, a negative error code otherwise
+ */
+int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str);
+
+/**
+ * Parse str and store the detected values in *rate.
+ *
+ * @param[in,out] rate pointer to the AVRational which will contain the detected
+ * frame rate
+ * @param[in] str the string to parse: it has to be a string in the format
+ * rate_num / rate_den, a float number or a valid video rate abbreviation
+ * @return >= 0 on success, a negative error code otherwise
+ */
+int av_parse_video_rate(AVRational *rate, const char *str);
+
+/**
+ * Put the RGBA values that correspond to color_string in rgba_color.
+ *
+ * @param color_string a string specifying a color. It can be the name of
+ * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence,
+ * possibly followed by "@" and a string representing the alpha
+ * component.
+ * The alpha component may be a string composed by "0x" followed by an
+ * hexadecimal number or a decimal number between 0.0 and 1.0, which
+ * represents the opacity value (0x00/0.0 means completely transparent,
+ * 0xff/1.0 completely opaque).
+ * If the alpha component is not specified then 0xff is assumed.
+ * The string "random" will result in a random color.
+ * @param slen length of the initial part of color_string containing the
+ * color. It can be set to -1 if color_string is a null terminated string
+ * containing nothing else than the color.
+ * @return >= 0 in case of success, a negative value in case of
+ * failure (for example if color_string cannot be parsed).
+ */
+int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen,
+ void *log_ctx);
+
+/**
+ * Get the name of a color from the internal table of hard-coded named
+ * colors.
+ *
+ * This function is meant to enumerate the color names recognized by
+ * av_parse_color().
+ *
+ * @param color_idx index of the requested color, starting from 0
+ * @param rgbp if not NULL, will point to a 3-elements array with the color value in RGB
+ * @return the color name string or NULL if color_idx is not in the array
+ */
+const char *av_get_known_color_name(int color_idx, const uint8_t **rgb);
+
+/**
+ * Parse timestr and return in *time a corresponding number of
+ * microseconds.
+ *
+ * @param timeval puts here the number of microseconds corresponding
+ * to the string in timestr. If the string represents a duration, it
+ * is the number of microseconds contained in the time interval. If
+ * the string is a date, is the number of microseconds since 1st of
+ * January, 1970 up to the time of the parsed date. If timestr cannot
+ * be successfully parsed, set *time to INT64_MIN.
+
+ * @param timestr a string representing a date or a duration.
+ * - If a date the syntax is:
+ * @code
+ * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH:MM:SS[.m...]]]}|{HHMMSS[.m...]]]}}[Z]
+ * now
+ * @endcode
+ * If the value is "now" it takes the current time.
+ * Time is local time unless Z is appended, in which case it is
+ * interpreted as UTC.
+ * If the year-month-day part is not specified it takes the current
+ * year-month-day.
+ * - If a duration the syntax is:
+ * @code
+ * [-][HH:]MM:SS[.m...]
+ * [-]S+[.m...]
+ * @endcode
+ * @param duration flag which tells how to interpret timestr, if not
+ * zero timestr is interpreted as a duration, otherwise as a date
+ * @return >= 0 in case of success, a negative value corresponding to an
+ * AVERROR code otherwise
+ */
+int av_parse_time(int64_t *timeval, const char *timestr, int duration);
+
+/**
+ * Parse the input string p according to the format string fmt and
+ * store its results in the structure dt.
+ * This implementation supports only a subset of the formats supported
+ * by the standard strptime().
+ *
+ * In particular it actually supports the parameters:
+ * - %H: the hour as a decimal number, using a 24-hour clock, in the
+ * range '00' through '23'
+ * - %J: hours as a decimal number, in the range '0' through INT_MAX
+ * - %M: the minute as a decimal number, using a 24-hour clock, in the
+ * range '00' through '59'
+ * - %S: the second as a decimal number, using a 24-hour clock, in the
+ * range '00' through '59'
+ * - %Y: the year as a decimal number, using the Gregorian calendar
+ * - %m: the month as a decimal number, in the range '1' through '12'
+ * - %d: the day of the month as a decimal number, in the range '1'
+ * through '31'
+ * - %%: a literal '%'
+ *
+ * @return a pointer to the first character not processed in this
+ * function call, or NULL in case the function fails to match all of
+ * the fmt string and therefore an error occurred
+ */
+char *av_small_strptime(const char *p, const char *fmt, struct tm *dt);
+
+/**
+ * Attempt to find a specific tag in a URL.
+ *
+ * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done.
+ * Return 1 if found.
+ */
+int av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info);
+
+/**
+ * Convert the decomposed UTC time in tm to a time_t value.
+ */
+time_t av_timegm(struct tm *tm);
+
+#endif /* AVUTIL_PARSEUTILS_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/pixdesc.h b/Externals/ffmpeg/dev/include/libavutil/pixdesc.h
new file mode 100644
index 0000000000..a4376b2c0e
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/pixdesc.h
@@ -0,0 +1,385 @@
+/*
+ * pixel format descriptor
+ * Copyright (c) 2009 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXDESC_H
+#define AVUTIL_PIXDESC_H
+
+#include
+
+#include "attributes.h"
+#include "pixfmt.h"
+
+typedef struct AVComponentDescriptor {
+ /**
+ * Which of the 4 planes contains the component.
+ */
+ uint16_t plane : 2;
+
+ /**
+ * Number of elements between 2 horizontally consecutive pixels minus 1.
+ * Elements are bits for bitstream formats, bytes otherwise.
+ */
+ uint16_t step_minus1 : 3;
+
+ /**
+ * Number of elements before the component of the first pixel plus 1.
+ * Elements are bits for bitstream formats, bytes otherwise.
+ */
+ uint16_t offset_plus1 : 3;
+
+ /**
+ * Number of least significant bits that must be shifted away
+ * to get the value.
+ */
+ uint16_t shift : 3;
+
+ /**
+ * Number of bits in the component minus 1.
+ */
+ uint16_t depth_minus1 : 4;
+} AVComponentDescriptor;
+
+/**
+ * Descriptor that unambiguously describes how the bits of a pixel are
+ * stored in the up to 4 data planes of an image. It also stores the
+ * subsampling factors and number of components.
+ *
+ * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV
+ * and all the YUV variants) AVPixFmtDescriptor just stores how values
+ * are stored not what these values represent.
+ */
+typedef struct AVPixFmtDescriptor {
+ const char *name;
+ uint8_t nb_components; ///< The number of components each pixel has, (1-4)
+
+ /**
+ * Amount to shift the luma width right to find the chroma width.
+ * For YV12 this is 1 for example.
+ * chroma_width = -((-luma_width) >> log2_chroma_w)
+ * The note above is needed to ensure rounding up.
+ * This value only refers to the chroma components.
+ */
+ uint8_t log2_chroma_w; ///< chroma_width = -((-luma_width )>>log2_chroma_w)
+
+ /**
+ * Amount to shift the luma height right to find the chroma height.
+ * For YV12 this is 1 for example.
+ * chroma_height= -((-luma_height) >> log2_chroma_h)
+ * The note above is needed to ensure rounding up.
+ * This value only refers to the chroma components.
+ */
+ uint8_t log2_chroma_h;
+ uint8_t flags;
+
+ /**
+ * Parameters that describe how pixels are packed.
+ * If the format has 2 or 4 components, then alpha is last.
+ * If the format has 1 or 2 components, then luma is 0.
+ * If the format has 3 or 4 components,
+ * if the RGB flag is set then 0 is red, 1 is green and 2 is blue;
+ * otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V.
+ */
+ AVComponentDescriptor comp[4];
+
+ /**
+ * Alternative comma-separated names.
+ */
+ const char *alias;
+} AVPixFmtDescriptor;
+
+/**
+ * Pixel format is big-endian.
+ */
+#define AV_PIX_FMT_FLAG_BE (1 << 0)
+/**
+ * Pixel format has a palette in data[1], values are indexes in this palette.
+ */
+#define AV_PIX_FMT_FLAG_PAL (1 << 1)
+/**
+ * All values of a component are bit-wise packed end to end.
+ */
+#define AV_PIX_FMT_FLAG_BITSTREAM (1 << 2)
+/**
+ * Pixel format is an HW accelerated format.
+ */
+#define AV_PIX_FMT_FLAG_HWACCEL (1 << 3)
+/**
+ * At least one pixel component is not in the first data plane.
+ */
+#define AV_PIX_FMT_FLAG_PLANAR (1 << 4)
+/**
+ * The pixel format contains RGB-like data (as opposed to YUV/grayscale).
+ */
+#define AV_PIX_FMT_FLAG_RGB (1 << 5)
+/**
+ * The pixel format is "pseudo-paletted". This means that FFmpeg treats it as
+ * paletted internally, but the palette is generated by the decoder and is not
+ * stored in the file.
+ */
+#define AV_PIX_FMT_FLAG_PSEUDOPAL (1 << 6)
+/**
+ * The pixel format has an alpha channel.
+ */
+#define AV_PIX_FMT_FLAG_ALPHA (1 << 7)
+
+#if FF_API_PIX_FMT
+/**
+ * @deprecated use the AV_PIX_FMT_FLAG_* flags
+ */
+#define PIX_FMT_BE AV_PIX_FMT_FLAG_BE
+#define PIX_FMT_PAL AV_PIX_FMT_FLAG_PAL
+#define PIX_FMT_BITSTREAM AV_PIX_FMT_FLAG_BITSTREAM
+#define PIX_FMT_HWACCEL AV_PIX_FMT_FLAG_HWACCEL
+#define PIX_FMT_PLANAR AV_PIX_FMT_FLAG_PLANAR
+#define PIX_FMT_RGB AV_PIX_FMT_FLAG_RGB
+#define PIX_FMT_PSEUDOPAL AV_PIX_FMT_FLAG_PSEUDOPAL
+#define PIX_FMT_ALPHA AV_PIX_FMT_FLAG_ALPHA
+#endif
+
+#if FF_API_PIX_FMT_DESC
+/**
+ * The array of all the pixel format descriptors.
+ */
+extern attribute_deprecated const AVPixFmtDescriptor av_pix_fmt_descriptors[];
+#endif
+
+/**
+ * Read a line from an image, and write the values of the
+ * pixel format component c to dst.
+ *
+ * @param data the array containing the pointers to the planes of the image
+ * @param linesize the array containing the linesizes of the image
+ * @param desc the pixel format descriptor for the image
+ * @param x the horizontal coordinate of the first pixel to read
+ * @param y the vertical coordinate of the first pixel to read
+ * @param w the width of the line to read, that is the number of
+ * values to write to dst
+ * @param read_pal_component if not zero and the format is a paletted
+ * format writes the values corresponding to the palette
+ * component c in data[1] to dst, rather than the palette indexes in
+ * data[0]. The behavior is undefined if the format is not paletted.
+ */
+void av_read_image_line(uint16_t *dst, const uint8_t *data[4],
+ const int linesize[4], const AVPixFmtDescriptor *desc,
+ int x, int y, int c, int w, int read_pal_component);
+
+/**
+ * Write the values from src to the pixel format component c of an
+ * image line.
+ *
+ * @param src array containing the values to write
+ * @param data the array containing the pointers to the planes of the
+ * image to write into. It is supposed to be zeroed.
+ * @param linesize the array containing the linesizes of the image
+ * @param desc the pixel format descriptor for the image
+ * @param x the horizontal coordinate of the first pixel to write
+ * @param y the vertical coordinate of the first pixel to write
+ * @param w the width of the line to write, that is the number of
+ * values to write to the image line
+ */
+void av_write_image_line(const uint16_t *src, uint8_t *data[4],
+ const int linesize[4], const AVPixFmtDescriptor *desc,
+ int x, int y, int c, int w);
+
+/**
+ * Return the pixel format corresponding to name.
+ *
+ * If there is no pixel format with name name, then looks for a
+ * pixel format with the name corresponding to the native endian
+ * format of name.
+ * For example in a little-endian system, first looks for "gray16",
+ * then for "gray16le".
+ *
+ * Finally if no pixel format has been found, returns AV_PIX_FMT_NONE.
+ */
+enum AVPixelFormat av_get_pix_fmt(const char *name);
+
+/**
+ * Return the short name for a pixel format, NULL in case pix_fmt is
+ * unknown.
+ *
+ * @see av_get_pix_fmt(), av_get_pix_fmt_string()
+ */
+const char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt);
+
+/**
+ * Print in buf the string corresponding to the pixel format with
+ * number pix_fmt, or a header if pix_fmt is negative.
+ *
+ * @param buf the buffer where to write the string
+ * @param buf_size the size of buf
+ * @param pix_fmt the number of the pixel format to print the
+ * corresponding info string, or a negative value to print the
+ * corresponding header.
+ */
+char *av_get_pix_fmt_string(char *buf, int buf_size,
+ enum AVPixelFormat pix_fmt);
+
+/**
+ * Return the number of bits per pixel used by the pixel format
+ * described by pixdesc. Note that this is not the same as the number
+ * of bits per sample.
+ *
+ * The returned number of bits refers to the number of bits actually
+ * used for storing the pixel information, that is padding bits are
+ * not counted.
+ */
+int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc);
+
+/**
+ * Return the number of bits per pixel for the pixel format
+ * described by pixdesc, including any padding or unused bits.
+ */
+int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc);
+
+/**
+ * @return a pixel format descriptor for provided pixel format or NULL if
+ * this pixel format is unknown.
+ */
+const AVPixFmtDescriptor *av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt);
+
+/**
+ * Iterate over all pixel format descriptors known to libavutil.
+ *
+ * @param prev previous descriptor. NULL to get the first descriptor.
+ *
+ * @return next descriptor or NULL after the last descriptor
+ */
+const AVPixFmtDescriptor *av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev);
+
+/**
+ * @return an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc
+ * is not a valid pointer to a pixel format descriptor.
+ */
+enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc);
+
+/**
+ * Utility function to access log2_chroma_w log2_chroma_h from
+ * the pixel format AVPixFmtDescriptor.
+ *
+ * See av_get_chroma_sub_sample() for a function that asserts a
+ * valid pixel format instead of returning an error code.
+ * Its recommended that you use avcodec_get_chroma_sub_sample unless
+ * you do check the return code!
+ *
+ * @param[in] pix_fmt the pixel format
+ * @param[out] h_shift store log2_chroma_w
+ * @param[out] v_shift store log2_chroma_h
+ *
+ * @return 0 on success, AVERROR(ENOSYS) on invalid or unknown pixel format
+ */
+int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt,
+ int *h_shift, int *v_shift);
+
+/**
+ * @return number of planes in pix_fmt, a negative AVERROR if pix_fmt is not a
+ * valid pixel format.
+ */
+int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt);
+
+void ff_check_pixfmt_descriptors(void);
+
+/**
+ * Utility function to swap the endianness of a pixel format.
+ *
+ * @param[in] pix_fmt the pixel format
+ *
+ * @return pixel format with swapped endianness if it exists,
+ * otherwise AV_PIX_FMT_NONE
+ */
+enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt);
+
+#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */
+#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */
+#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */
+#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */
+#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */
+#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */
+
+/**
+ * Compute what kind of losses will occur when converting from one specific
+ * pixel format to another.
+ * When converting from one pixel format to another, information loss may occur.
+ * For example, when converting from RGB24 to GRAY, the color information will
+ * be lost. Similarly, other losses occur when converting from some formats to
+ * other formats. These losses can involve loss of chroma, but also loss of
+ * resolution, loss of color depth, loss due to the color space conversion, loss
+ * of the alpha bits or loss due to color quantization.
+ * av_get_fix_fmt_loss() informs you about the various types of losses
+ * which will occur when converting from one pixel format to another.
+ *
+ * @param[in] dst_pix_fmt destination pixel format
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @return Combination of flags informing you what kind of losses will occur
+ * (maximum loss for an invalid dst_pix_fmt).
+ */
+int av_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt,
+ enum AVPixelFormat src_pix_fmt,
+ int has_alpha);
+
+/**
+ * Compute what kind of losses will occur when converting from one specific
+ * pixel format to another.
+ * When converting from one pixel format to another, information loss may occur.
+ * For example, when converting from RGB24 to GRAY, the color information will
+ * be lost. Similarly, other losses occur when converting from some formats to
+ * other formats. These losses can involve loss of chroma, but also loss of
+ * resolution, loss of color depth, loss due to the color space conversion, loss
+ * of the alpha bits or loss due to color quantization.
+ * av_get_fix_fmt_loss() informs you about the various types of losses
+ * which will occur when converting from one pixel format to another.
+ *
+ * @param[in] dst_pix_fmt destination pixel format
+ * @param[in] src_pix_fmt source pixel format
+ * @param[in] has_alpha Whether the source pixel format alpha channel is used.
+ * @return Combination of flags informing you what kind of losses will occur
+ * (maximum loss for an invalid dst_pix_fmt).
+ */
+enum AVPixelFormat av_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2,
+ enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr);
+
+/**
+ * @return the name for provided color range or NULL if unknown.
+ */
+const char *av_color_range_name(enum AVColorRange range);
+
+/**
+ * @return the name for provided color primaries or NULL if unknown.
+ */
+const char *av_color_primaries_name(enum AVColorPrimaries primaries);
+
+/**
+ * @return the name for provided color transfer or NULL if unknown.
+ */
+const char *av_color_transfer_name(enum AVColorTransferCharacteristic transfer);
+
+/**
+ * @return the name for provided color space or NULL if unknown.
+ */
+const char *av_color_space_name(enum AVColorSpace space);
+
+/**
+ * @return the name for provided chroma location or NULL if unknown.
+ */
+const char *av_chroma_location_name(enum AVChromaLocation location);
+
+#endif /* AVUTIL_PIXDESC_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/pixelutils.h b/Externals/ffmpeg/dev/include/libavutil/pixelutils.h
new file mode 100644
index 0000000000..a8dbc157e1
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/pixelutils.h
@@ -0,0 +1,52 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXELUTILS_H
+#define AVUTIL_PIXELUTILS_H
+
+#include
+#include
+#include "common.h"
+
+/**
+ * Sum of abs(src1[x] - src2[x])
+ */
+typedef int (*av_pixelutils_sad_fn)(const uint8_t *src1, ptrdiff_t stride1,
+ const uint8_t *src2, ptrdiff_t stride2);
+
+/**
+ * Get a potentially optimized pointer to a Sum-of-absolute-differences
+ * function (see the av_pixelutils_sad_fn prototype).
+ *
+ * @param w_bits 1<
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_PIXFMT_H
+#define AVUTIL_PIXFMT_H
+
+/**
+ * @file
+ * pixel format definitions
+ *
+ */
+
+#include "libavutil/avconfig.h"
+#include "version.h"
+
+#define AVPALETTE_SIZE 1024
+#define AVPALETTE_COUNT 256
+
+/**
+ * Pixel format.
+ *
+ * @note
+ * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA
+ * color is put together as:
+ * (A << 24) | (R << 16) | (G << 8) | B
+ * This is stored as BGRA on little-endian CPU architectures and ARGB on
+ * big-endian CPUs.
+ *
+ * @par
+ * When the pixel format is palettized RGB (AV_PIX_FMT_PAL8), the palettized
+ * image data is stored in AVFrame.data[0]. The palette is transported in
+ * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is
+ * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is
+ * also endian-specific). Note also that the individual RGB palette
+ * components stored in AVFrame.data[1] should be in the range 0..255.
+ * This is important as many custom PAL8 video codecs that were designed
+ * to run on the IBM VGA graphics adapter use 6-bit palette components.
+ *
+ * @par
+ * For all the 8bit per pixel formats, an RGB32 palette is in data[1] like
+ * for pal8. This palette is filled in automatically by the function
+ * allocating the picture.
+ *
+ * @note
+ * Make sure that all newly added big-endian formats have (pix_fmt & 1) == 1
+ * and that all newly added little-endian formats have (pix_fmt & 1) == 0.
+ * This allows simpler detection of big vs little-endian.
+ */
+enum AVPixelFormat {
+ AV_PIX_FMT_NONE = -1,
+ AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
+ AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr
+ AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB...
+ AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR...
+ AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
+ AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
+ AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
+ AV_PIX_FMT_GRAY8, ///< Y , 8bpp
+ AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb
+ AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb
+ AV_PIX_FMT_PAL8, ///< 8 bit with PIX_FMT_RGB32 palette
+ AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV420P and setting color_range
+ AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV422P and setting color_range
+ AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of PIX_FMT_YUV444P and setting color_range
+#if FF_API_XVMC
+ AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing
+ AV_PIX_FMT_XVMC_MPEG2_IDCT,
+#define AV_PIX_FMT_XVMC AV_PIX_FMT_XVMC_MPEG2_IDCT
+#endif /* FF_API_XVMC */
+ AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1
+ AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3
+ AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb)
+ AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb)
+ AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb)
+ AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits
+ AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb)
+ AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V)
+ AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped
+
+ AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB...
+ AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
+ AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR...
+ AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA...
+
+ AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian
+ AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian
+ AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
+ AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of PIX_FMT_YUV440P and setting color_range
+ AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
+#if FF_API_VDPAU
+ AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+ AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+#endif
+ AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian
+ AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian
+
+ AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian
+ AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian
+ AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), big-endian, most significant bit to 0
+ AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1A 5R 5G 5B(lsb), little-endian, most significant bit to 0
+
+ AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian
+ AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian
+ AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), big-endian, most significant bit to 1
+ AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1A 5B 5G 5R(lsb), little-endian, most significant bit to 1
+
+ AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers
+ AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers
+ AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a vaapi_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+
+ AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+#if FF_API_VDPAU
+ AV_PIX_FMT_VDPAU_MPEG4, ///< MPEG4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers
+#endif
+ AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer
+
+ AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), little-endian, most significant bits to 0
+ AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4A 4R 4G 4B(lsb), big-endian, most significant bits to 0
+ AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), little-endian, most significant bits to 1
+ AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4A 4B 4G 4R(lsb), big-endian, most significant bits to 1
+ AV_PIX_FMT_YA8, ///< 8bit gray, 8bit alpha
+
+ AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
+ AV_PIX_FMT_GRAY8A= AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8
+
+ AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian
+ AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian
+
+ /**
+ * The following 12 formats have the disadvantage of needing 1 format for each bit depth.
+ * Notice that each 9/10 bits sample is stored in 16 bits with extra padding.
+ * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better.
+ */
+ AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_VDA_VLD, ///< hardware decoding through VDA
+
+#ifdef AV_PIX_FMT_ABI_GIT_MASTER
+ AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+ AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+#endif
+ AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp
+ AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian
+ AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian
+ AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian
+ AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian
+ AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian
+ AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian
+
+ /**
+ * duplicated pixel formats for compatibility with libav.
+ * FFmpeg supports these formats since May 8 2012 and Jan 28 2012 (commits f9ca1ac7 and 143a5c55)
+ * Libav added them Oct 12 2012 with incompatible values (commit 6d5600e85)
+ */
+ AV_PIX_FMT_YUVA422P_LIBAV, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
+ AV_PIX_FMT_YUVA444P_LIBAV, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
+
+ AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian
+ AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian
+ AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian)
+ AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian)
+ AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian)
+
+ AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface
+
+ AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0
+ AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0
+ AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
+ AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+
+ /**
+ * duplicated pixel formats for compatibility with libav.
+ * FFmpeg supports these formats since Sat Sep 24 06:01:45 2011 +0200 (commits 9569a3c9f41387a8c7d1ce97d8693520477a66c3)
+ * also see Fri Nov 25 01:38:21 2011 +0100 92afb431621c79155fcb7171d26f137eb1bee028
+ * Libav added them Sun Mar 16 23:05:47 2014 +0100 with incompatible values (commit 1481d24c3a0abf81e1d7a514547bd5305232be30)
+ */
+ AV_PIX_FMT_RGBA64BE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ AV_PIX_FMT_RGBA64LE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+ AV_PIX_FMT_BGRA64BE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ AV_PIX_FMT_BGRA64LE_LIBAV, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+
+ AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb
+
+ AV_PIX_FMT_VDA, ///< HW acceleration through VDA, data[3] contains a CVPixelBufferRef
+
+ AV_PIX_FMT_YA16BE, ///< 16bit gray, 16bit alpha (big-endian)
+ AV_PIX_FMT_YA16LE, ///< 16bit gray, 16bit alpha (little-endian)
+
+
+#ifndef AV_PIX_FMT_ABI_GIT_MASTER
+ AV_PIX_FMT_RGBA64BE=0x123, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+ AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian
+ AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian
+#endif
+ AV_PIX_FMT_0RGB=0x123+4, ///< packed RGB 8:8:8, 32bpp, 0RGB0RGB...
+ AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGB0RGB0...
+ AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, 0BGR0BGR...
+ AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGR0BGR0...
+ AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
+ AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
+
+ AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian
+ AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian
+ AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian
+ AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian
+ AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian
+ AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian
+ AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian
+ AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian
+ AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian
+ AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp
+ AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian
+ AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian
+ AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of PIX_FMT_YUV411P and setting color_range
+
+ AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */
+ AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */
+ AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */
+ AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */
+ AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */
+ AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */
+ AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */
+ AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */
+ AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */
+ AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */
+ AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */
+ AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */
+#if !FF_API_XVMC
+ AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing
+#endif /* !FF_API_XVMC */
+
+ AV_PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions
+
+#if FF_API_PIX_FMT
+#include "old_pix_fmts.h"
+#endif
+};
+
+#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI
+#define AV_PIX_FMT_YUVA422P AV_PIX_FMT_YUVA422P_LIBAV
+#define AV_PIX_FMT_YUVA444P AV_PIX_FMT_YUVA444P_LIBAV
+#define AV_PIX_FMT_RGBA64BE AV_PIX_FMT_RGBA64BE_LIBAV
+#define AV_PIX_FMT_RGBA64LE AV_PIX_FMT_RGBA64LE_LIBAV
+#define AV_PIX_FMT_BGRA64BE AV_PIX_FMT_BGRA64BE_LIBAV
+#define AV_PIX_FMT_BGRA64LE AV_PIX_FMT_BGRA64LE_LIBAV
+#endif
+
+
+#define AV_PIX_FMT_Y400A AV_PIX_FMT_GRAY8A
+#define AV_PIX_FMT_GBR24P AV_PIX_FMT_GBRP
+
+#if AV_HAVE_BIGENDIAN
+# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be
+#else
+# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le
+#endif
+
+#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA)
+#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR)
+#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA)
+#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB)
+#define AV_PIX_FMT_0RGB32 AV_PIX_FMT_NE(0RGB, BGR0)
+#define AV_PIX_FMT_0BGR32 AV_PIX_FMT_NE(0BGR, RGB0)
+
+#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE)
+#define AV_PIX_FMT_YA16 AV_PIX_FMT_NE(YA16BE, YA16LE)
+#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE)
+#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE)
+#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE)
+#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE)
+#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE)
+#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE)
+#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE)
+#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE)
+#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE)
+#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE)
+
+#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE)
+#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE)
+#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE)
+#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE)
+#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE)
+#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE)
+#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE)
+#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE)
+#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE)
+#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE)
+#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE)
+#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE)
+#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE)
+#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE)
+#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE)
+
+#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE)
+#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE)
+#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE)
+#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE)
+#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE)
+#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE)
+
+#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE, BAYER_BGGR16LE)
+#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE, BAYER_RGGB16LE)
+#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE, BAYER_GBRG16LE)
+#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE, BAYER_GRBG16LE)
+
+
+#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE)
+#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE)
+#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE)
+#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE)
+#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE)
+#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE)
+#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE)
+#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE)
+#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE)
+
+#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE)
+#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE)
+
+
+#if FF_API_PIX_FMT
+#define PixelFormat AVPixelFormat
+
+#define PIX_FMT_Y400A AV_PIX_FMT_Y400A
+#define PIX_FMT_GBR24P AV_PIX_FMT_GBR24P
+
+#define PIX_FMT_NE(be, le) AV_PIX_FMT_NE(be, le)
+
+#define PIX_FMT_RGB32 AV_PIX_FMT_RGB32
+#define PIX_FMT_RGB32_1 AV_PIX_FMT_RGB32_1
+#define PIX_FMT_BGR32 AV_PIX_FMT_BGR32
+#define PIX_FMT_BGR32_1 AV_PIX_FMT_BGR32_1
+#define PIX_FMT_0RGB32 AV_PIX_FMT_0RGB32
+#define PIX_FMT_0BGR32 AV_PIX_FMT_0BGR32
+
+#define PIX_FMT_GRAY16 AV_PIX_FMT_GRAY16
+#define PIX_FMT_RGB48 AV_PIX_FMT_RGB48
+#define PIX_FMT_RGB565 AV_PIX_FMT_RGB565
+#define PIX_FMT_RGB555 AV_PIX_FMT_RGB555
+#define PIX_FMT_RGB444 AV_PIX_FMT_RGB444
+#define PIX_FMT_BGR48 AV_PIX_FMT_BGR48
+#define PIX_FMT_BGR565 AV_PIX_FMT_BGR565
+#define PIX_FMT_BGR555 AV_PIX_FMT_BGR555
+#define PIX_FMT_BGR444 AV_PIX_FMT_BGR444
+
+#define PIX_FMT_YUV420P9 AV_PIX_FMT_YUV420P9
+#define PIX_FMT_YUV422P9 AV_PIX_FMT_YUV422P9
+#define PIX_FMT_YUV444P9 AV_PIX_FMT_YUV444P9
+#define PIX_FMT_YUV420P10 AV_PIX_FMT_YUV420P10
+#define PIX_FMT_YUV422P10 AV_PIX_FMT_YUV422P10
+#define PIX_FMT_YUV444P10 AV_PIX_FMT_YUV444P10
+#define PIX_FMT_YUV420P12 AV_PIX_FMT_YUV420P12
+#define PIX_FMT_YUV422P12 AV_PIX_FMT_YUV422P12
+#define PIX_FMT_YUV444P12 AV_PIX_FMT_YUV444P12
+#define PIX_FMT_YUV420P14 AV_PIX_FMT_YUV420P14
+#define PIX_FMT_YUV422P14 AV_PIX_FMT_YUV422P14
+#define PIX_FMT_YUV444P14 AV_PIX_FMT_YUV444P14
+#define PIX_FMT_YUV420P16 AV_PIX_FMT_YUV420P16
+#define PIX_FMT_YUV422P16 AV_PIX_FMT_YUV422P16
+#define PIX_FMT_YUV444P16 AV_PIX_FMT_YUV444P16
+
+#define PIX_FMT_RGBA64 AV_PIX_FMT_RGBA64
+#define PIX_FMT_BGRA64 AV_PIX_FMT_BGRA64
+#define PIX_FMT_GBRP9 AV_PIX_FMT_GBRP9
+#define PIX_FMT_GBRP10 AV_PIX_FMT_GBRP10
+#define PIX_FMT_GBRP12 AV_PIX_FMT_GBRP12
+#define PIX_FMT_GBRP14 AV_PIX_FMT_GBRP14
+#define PIX_FMT_GBRP16 AV_PIX_FMT_GBRP16
+#endif
+
+/**
+ * Chromaticity coordinates of the source primaries.
+ */
+enum AVColorPrimaries {
+ AVCOL_PRI_RESERVED0 = 0,
+ AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B
+ AVCOL_PRI_UNSPECIFIED = 2,
+ AVCOL_PRI_RESERVED = 3,
+ AVCOL_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
+
+ AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC
+ AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above
+ AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C
+ AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020
+ AVCOL_PRI_NB, ///< Not part of ABI
+};
+
+/**
+ * Color Transfer Characteristic.
+ */
+enum AVColorTransferCharacteristic {
+ AVCOL_TRC_RESERVED0 = 0,
+ AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361
+ AVCOL_TRC_UNSPECIFIED = 2,
+ AVCOL_TRC_RESERVED = 3,
+ AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM
+ AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG
+ AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC
+ AVCOL_TRC_SMPTE240M = 7,
+ AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics"
+ AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)"
+ AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)"
+ AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4
+ AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut
+ AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC)
+ AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10 bit system
+ AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12 bit system
+ AVCOL_TRC_NB, ///< Not part of ABI
+};
+
+/**
+ * YUV colorspace type.
+ */
+enum AVColorSpace {
+ AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB)
+ AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B
+ AVCOL_SPC_UNSPECIFIED = 2,
+ AVCOL_SPC_RESERVED = 3,
+ AVCOL_SPC_FCC = 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20)
+ AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601
+ AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC / functionally identical to above
+ AVCOL_SPC_SMPTE240M = 7,
+ AVCOL_SPC_YCOCG = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16
+ AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system
+ AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system
+ AVCOL_SPC_NB, ///< Not part of ABI
+};
+#define AVCOL_SPC_YCGCO AVCOL_SPC_YCOCG
+
+
+/**
+ * MPEG vs JPEG YUV range.
+ */
+enum AVColorRange {
+ AVCOL_RANGE_UNSPECIFIED = 0,
+ AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges
+ AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges
+ AVCOL_RANGE_NB, ///< Not part of ABI
+};
+
+/**
+ * Location of chroma samples.
+ *
+ * X X 3 4 X X are luma samples,
+ * 1 2 1-6 are possible chroma positions
+ * X X 5 6 X 0 is undefined/unknown position
+ */
+enum AVChromaLocation {
+ AVCHROMA_LOC_UNSPECIFIED = 0,
+ AVCHROMA_LOC_LEFT = 1, ///< mpeg2/4, h264 default
+ AVCHROMA_LOC_CENTER = 2, ///< mpeg1, jpeg, h263
+ AVCHROMA_LOC_TOPLEFT = 3, ///< DV
+ AVCHROMA_LOC_TOP = 4,
+ AVCHROMA_LOC_BOTTOMLEFT = 5,
+ AVCHROMA_LOC_BOTTOM = 6,
+ AVCHROMA_LOC_NB, ///< Not part of ABI
+};
+
+#endif /* AVUTIL_PIXFMT_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/random_seed.h b/Externals/ffmpeg/dev/include/libavutil/random_seed.h
new file mode 100644
index 0000000000..0462a048e0
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/random_seed.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (c) 2009 Baptiste Coudurier
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_RANDOM_SEED_H
+#define AVUTIL_RANDOM_SEED_H
+
+#include
+/**
+ * @addtogroup lavu_crypto
+ * @{
+ */
+
+/**
+ * Get a seed to use in conjunction with random functions.
+ * This function tries to provide a good seed at a best effort bases.
+ * Its possible to call this function multiple times if more bits are needed.
+ * It can be quite slow, which is why it should only be used as seed for a faster
+ * PRNG. The quality of the seed depends on the platform.
+ */
+uint32_t av_get_random_seed(void);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RANDOM_SEED_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/rational.h b/Externals/ffmpeg/dev/include/libavutil/rational.h
new file mode 100644
index 0000000000..7439701db2
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/rational.h
@@ -0,0 +1,166 @@
+/*
+ * rational numbers
+ * Copyright (c) 2003 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * rational numbers
+ * @author Michael Niedermayer
+ */
+
+#ifndef AVUTIL_RATIONAL_H
+#define AVUTIL_RATIONAL_H
+
+#include
+#include
+#include "attributes.h"
+
+/**
+ * @addtogroup lavu_math
+ * @{
+ */
+
+/**
+ * rational number numerator/denominator
+ */
+typedef struct AVRational{
+ int num; ///< numerator
+ int den; ///< denominator
+} AVRational;
+
+/**
+ * Create a rational.
+ * Useful for compilers that do not support compound literals.
+ * @note The return value is not reduced.
+ */
+static inline AVRational av_make_q(int num, int den)
+{
+ AVRational r = { num, den };
+ return r;
+}
+
+/**
+ * Compare two rationals.
+ * @param a first rational
+ * @param b second rational
+ * @return 0 if a==b, 1 if a>b, -1 if a>63)|1;
+ else if(b.den && a.den) return 0;
+ else if(a.num && b.num) return (a.num>>31) - (b.num>>31);
+ else return INT_MIN;
+}
+
+/**
+ * Convert rational to double.
+ * @param a rational to convert
+ * @return (double) a
+ */
+static inline double av_q2d(AVRational a){
+ return a.num / (double) a.den;
+}
+
+/**
+ * Reduce a fraction.
+ * This is useful for framerate calculations.
+ * @param dst_num destination numerator
+ * @param dst_den destination denominator
+ * @param num source numerator
+ * @param den source denominator
+ * @param max the maximum allowed for dst_num & dst_den
+ * @return 1 if exact, 0 otherwise
+ */
+int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max);
+
+/**
+ * Multiply two rationals.
+ * @param b first rational
+ * @param c second rational
+ * @return b*c
+ */
+AVRational av_mul_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Divide one rational by another.
+ * @param b first rational
+ * @param c second rational
+ * @return b/c
+ */
+AVRational av_div_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Add two rationals.
+ * @param b first rational
+ * @param c second rational
+ * @return b+c
+ */
+AVRational av_add_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Subtract one rational from another.
+ * @param b first rational
+ * @param c second rational
+ * @return b-c
+ */
+AVRational av_sub_q(AVRational b, AVRational c) av_const;
+
+/**
+ * Invert a rational.
+ * @param q value
+ * @return 1 / q
+ */
+static av_always_inline AVRational av_inv_q(AVRational q)
+{
+ AVRational r = { q.den, q.num };
+ return r;
+}
+
+/**
+ * Convert a double precision floating point number to a rational.
+ * inf is expressed as {1,0} or {-1,0} depending on the sign.
+ *
+ * @param d double to convert
+ * @param max the maximum allowed numerator and denominator
+ * @return (AVRational) d
+ */
+AVRational av_d2q(double d, int max) av_const;
+
+/**
+ * @return 1 if q1 is nearer to q than q2, -1 if q2 is nearer
+ * than q1, 0 if they have the same distance.
+ */
+int av_nearer_q(AVRational q, AVRational q1, AVRational q2);
+
+/**
+ * Find the nearest value in q_list to q.
+ * @param q_list an array of rationals terminated by {0, 0}
+ * @return the index of the nearest value found in the array
+ */
+int av_find_nearest_q_idx(AVRational q, const AVRational* q_list);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RATIONAL_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/replaygain.h b/Externals/ffmpeg/dev/include/libavutil/replaygain.h
new file mode 100644
index 0000000000..5c03e1993d
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/replaygain.h
@@ -0,0 +1,51 @@
+/*
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_REPLAYGAIN_H
+#define AVUTIL_REPLAYGAIN_H
+
+#include
+
+/**
+ * ReplayGain information (see
+ * http://wiki.hydrogenaudio.org/index.php?title=ReplayGain_1.0_specification).
+ * The size of this struct is a part of the public ABI.
+ */
+typedef struct AVReplayGain {
+ /**
+ * Track replay gain in microbels (divide by 100000 to get the value in dB).
+ * Should be set to INT32_MIN when unknown.
+ */
+ int32_t track_gain;
+ /**
+ * Peak track amplitude, with 100000 representing full scale (but values
+ * may overflow). 0 when unknown.
+ */
+ uint32_t track_peak;
+ /**
+ * Same as track_gain, but for the whole album.
+ */
+ int32_t album_gain;
+ /**
+ * Same as track_peak, but for the whole album,
+ */
+ uint32_t album_peak;
+} AVReplayGain;
+
+#endif /* AVUTIL_REPLAYGAIN_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/ripemd.h b/Externals/ffmpeg/dev/include/libavutil/ripemd.h
new file mode 100644
index 0000000000..7b0c8bc89c
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/ripemd.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2007 Michael Niedermayer
+ * Copyright (C) 2013 James Almer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_RIPEMD_H
+#define AVUTIL_RIPEMD_H
+
+#include
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_ripemd RIPEMD
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_ripemd_size;
+
+struct AVRIPEMD;
+
+/**
+ * Allocate an AVRIPEMD context.
+ */
+struct AVRIPEMD *av_ripemd_alloc(void);
+
+/**
+ * Initialize RIPEMD hashing.
+ *
+ * @param context pointer to the function context (of size av_ripemd_size)
+ * @param bits number of bits in digest (128, 160, 256 or 320 bits)
+ * @return zero if initialization succeeded, -1 otherwise
+ */
+int av_ripemd_init(struct AVRIPEMD* context, int bits);
+
+/**
+ * Update hash value.
+ *
+ * @param context hash function context
+ * @param data input data to update hash with
+ * @param len input data length
+ */
+void av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, unsigned int len);
+
+/**
+ * Finish hashing and output digest value.
+ *
+ * @param context hash function context
+ * @param digest buffer where output digest value is stored
+ */
+void av_ripemd_final(struct AVRIPEMD* context, uint8_t *digest);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_RIPEMD_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/samplefmt.h b/Externals/ffmpeg/dev/include/libavutil/samplefmt.h
new file mode 100644
index 0000000000..6a8a031c02
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/samplefmt.h
@@ -0,0 +1,271 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SAMPLEFMT_H
+#define AVUTIL_SAMPLEFMT_H
+
+#include
+
+#include "avutil.h"
+#include "attributes.h"
+
+/**
+ * @addtogroup lavu_audio
+ * @{
+ *
+ * @defgroup lavu_sampfmts Audio sample formats
+ *
+ * Audio sample format enumeration and related convenience functions.
+ * @{
+ *
+ */
+
+/**
+ * Audio sample formats
+ *
+ * - The data described by the sample format is always in native-endian order.
+ * Sample values can be expressed by native C types, hence the lack of a signed
+ * 24-bit sample format even though it is a common raw audio data format.
+ *
+ * - The floating-point formats are based on full volume being in the range
+ * [-1.0, 1.0]. Any values outside this range are beyond full volume level.
+ *
+ * - The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg
+ * (such as AVFrame in libavcodec) is as follows:
+ *
+ * @par
+ * For planar sample formats, each audio channel is in a separate data plane,
+ * and linesize is the buffer size, in bytes, for a single plane. All data
+ * planes must be the same size. For packed sample formats, only the first data
+ * plane is used, and samples for each channel are interleaved. In this case,
+ * linesize is the buffer size, in bytes, for the 1 plane.
+ *
+ */
+enum AVSampleFormat {
+ AV_SAMPLE_FMT_NONE = -1,
+ AV_SAMPLE_FMT_U8, ///< unsigned 8 bits
+ AV_SAMPLE_FMT_S16, ///< signed 16 bits
+ AV_SAMPLE_FMT_S32, ///< signed 32 bits
+ AV_SAMPLE_FMT_FLT, ///< float
+ AV_SAMPLE_FMT_DBL, ///< double
+
+ AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar
+ AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar
+ AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar
+ AV_SAMPLE_FMT_FLTP, ///< float, planar
+ AV_SAMPLE_FMT_DBLP, ///< double, planar
+
+ AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically
+};
+
+/**
+ * Return the name of sample_fmt, or NULL if sample_fmt is not
+ * recognized.
+ */
+const char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt);
+
+/**
+ * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE
+ * on error.
+ */
+enum AVSampleFormat av_get_sample_fmt(const char *name);
+
+/**
+ * Return the planar<->packed alternative form of the given sample format, or
+ * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the
+ * requested planar/packed format, the format returned is the same as the
+ * input.
+ */
+enum AVSampleFormat av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt, int planar);
+
+/**
+ * Get the packed alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in packed format, the format returned is
+ * the same as the input.
+ *
+ * @return the packed alternative form of the given sample format or
+ AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the planar alternative form of the given sample format.
+ *
+ * If the passed sample_fmt is already in planar format, the format returned is
+ * the same as the input.
+ *
+ * @return the planar alternative form of the given sample format or
+ AV_SAMPLE_FMT_NONE on error.
+ */
+enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt);
+
+/**
+ * Generate a string corresponding to the sample format with
+ * sample_fmt, or a header if sample_fmt is negative.
+ *
+ * @param buf the buffer where to write the string
+ * @param buf_size the size of buf
+ * @param sample_fmt the number of the sample format to print the
+ * corresponding info string, or a negative value to print the
+ * corresponding header.
+ * @return the pointer to the filled buffer or NULL if sample_fmt is
+ * unknown or in case of other errors
+ */
+char *av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt);
+
+/**
+ * Return number of bytes per sample.
+ *
+ * @param sample_fmt the sample format
+ * @return number of bytes per sample or zero if unknown for the given
+ * sample format
+ */
+int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt);
+
+/**
+ * Check if the sample format is planar.
+ *
+ * @param sample_fmt the sample format to inspect
+ * @return 1 if the sample format is planar, 0 if it is interleaved
+ */
+int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt);
+
+/**
+ * Get the required buffer size for the given audio parameters.
+ *
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return required buffer size, or negative error code on failure
+ */
+int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * @}
+ *
+ * @defgroup lavu_sampmanip Samples manipulation
+ *
+ * Functions that manipulate audio samples
+ * @{
+ */
+
+/**
+ * Fill plane data pointers and linesize for samples with sample
+ * format sample_fmt.
+ *
+ * The audio_data array is filled with the pointers to the samples data planes:
+ * for planar, set the start point of each channel's data within the buffer,
+ * for packed, set the start point of the entire buffer only.
+ *
+ * The value pointed to by linesize is set to the aligned size of each
+ * channel's data buffer for planar layout, or to the aligned size of the
+ * buffer for all channels for packed layout.
+ *
+ * The buffer in buf must be big enough to contain all the samples
+ * (use av_samples_get_buffer_size() to compute its minimum size),
+ * otherwise the audio_data pointers will point to invalid data.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize calculated linesize, may be NULL
+ * @param buf the pointer to a buffer containing the samples
+ * @param nb_channels the number of channels
+ * @param nb_samples the number of samples in a single channel
+ * @param sample_fmt the sample format
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return >=0 on success or a negative error code on failure
+ * @todo return minimum size in bytes required for the buffer in case
+ * of success at the next bump
+ */
+int av_samples_fill_arrays(uint8_t **audio_data, int *linesize,
+ const uint8_t *buf,
+ int nb_channels, int nb_samples,
+ enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Allocate a samples buffer for nb_samples samples, and fill data pointers and
+ * linesize accordingly.
+ * The allocated samples buffer can be freed by using av_freep(&audio_data[0])
+ * Allocated data will be initialized to silence.
+ *
+ * @see enum AVSampleFormat
+ * The documentation for AVSampleFormat describes the data layout.
+ *
+ * @param[out] audio_data array to be filled with the pointer for each channel
+ * @param[out] linesize aligned size for audio buffer(s), may be NULL
+ * @param nb_channels number of audio channels
+ * @param nb_samples number of samples per channel
+ * @param align buffer size alignment (0 = default, 1 = no alignment)
+ * @return >=0 on success or a negative error code on failure
+ * @todo return the size of the allocated buffer in case of success at the next bump
+ * @see av_samples_fill_arrays()
+ * @see av_samples_alloc_array_and_samples()
+ */
+int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels,
+ int nb_samples, enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Allocate a data pointers array, samples buffer for nb_samples
+ * samples, and fill data pointers and linesize accordingly.
+ *
+ * This is the same as av_samples_alloc(), but also allocates the data
+ * pointers array.
+ *
+ * @see av_samples_alloc()
+ */
+int av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels,
+ int nb_samples, enum AVSampleFormat sample_fmt, int align);
+
+/**
+ * Copy samples from src to dst.
+ *
+ * @param dst destination array of pointers to data planes
+ * @param src source array of pointers to data planes
+ * @param dst_offset offset in samples at which the data will be written to dst
+ * @param src_offset offset in samples at which the data will be read from src
+ * @param nb_samples number of samples to be copied
+ * @param nb_channels number of audio channels
+ * @param sample_fmt audio sample format
+ */
+int av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset,
+ int src_offset, int nb_samples, int nb_channels,
+ enum AVSampleFormat sample_fmt);
+
+/**
+ * Fill an audio buffer with silence.
+ *
+ * @param audio_data array of pointers to data planes
+ * @param offset offset in samples at which to start filling
+ * @param nb_samples number of samples to fill
+ * @param nb_channels number of audio channels
+ * @param sample_fmt audio sample format
+ */
+int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples,
+ int nb_channels, enum AVSampleFormat sample_fmt);
+
+/**
+ * @}
+ * @}
+ */
+#endif /* AVUTIL_SAMPLEFMT_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/sha.h b/Externals/ffmpeg/dev/include/libavutil/sha.h
new file mode 100644
index 0000000000..bf4377e51b
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/sha.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2007 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SHA_H
+#define AVUTIL_SHA_H
+
+#include
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_sha SHA
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_sha_size;
+
+struct AVSHA;
+
+/**
+ * Allocate an AVSHA context.
+ */
+struct AVSHA *av_sha_alloc(void);
+
+/**
+ * Initialize SHA-1 or SHA-2 hashing.
+ *
+ * @param context pointer to the function context (of size av_sha_size)
+ * @param bits number of bits in digest (SHA-1 - 160 bits, SHA-2 224 or 256 bits)
+ * @return zero if initialization succeeded, -1 otherwise
+ */
+int av_sha_init(struct AVSHA* context, int bits);
+
+/**
+ * Update hash value.
+ *
+ * @param context hash function context
+ * @param data input data to update hash with
+ * @param len input data length
+ */
+void av_sha_update(struct AVSHA* context, const uint8_t* data, unsigned int len);
+
+/**
+ * Finish hashing and output digest value.
+ *
+ * @param context hash function context
+ * @param digest buffer where output digest value is stored
+ */
+void av_sha_final(struct AVSHA* context, uint8_t *digest);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_SHA_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/sha512.h b/Externals/ffmpeg/dev/include/libavutil/sha512.h
new file mode 100644
index 0000000000..7b08701477
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/sha512.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2007 Michael Niedermayer
+ * Copyright (C) 2013 James Almer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_SHA512_H
+#define AVUTIL_SHA512_H
+
+#include
+
+#include "attributes.h"
+#include "version.h"
+
+/**
+ * @defgroup lavu_sha512 SHA512
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+extern const int av_sha512_size;
+
+struct AVSHA512;
+
+/**
+ * Allocate an AVSHA512 context.
+ */
+struct AVSHA512 *av_sha512_alloc(void);
+
+/**
+ * Initialize SHA-2 512 hashing.
+ *
+ * @param context pointer to the function context (of size av_sha512_size)
+ * @param bits number of bits in digest (224, 256, 384 or 512 bits)
+ * @return zero if initialization succeeded, -1 otherwise
+ */
+int av_sha512_init(struct AVSHA512* context, int bits);
+
+/**
+ * Update hash value.
+ *
+ * @param context hash function context
+ * @param data input data to update hash with
+ * @param len input data length
+ */
+void av_sha512_update(struct AVSHA512* context, const uint8_t* data, unsigned int len);
+
+/**
+ * Finish hashing and output digest value.
+ *
+ * @param context hash function context
+ * @param digest buffer where output digest value is stored
+ */
+void av_sha512_final(struct AVSHA512* context, uint8_t *digest);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_SHA512_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/stereo3d.h b/Externals/ffmpeg/dev/include/libavutil/stereo3d.h
new file mode 100644
index 0000000000..1135dc9ddc
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/stereo3d.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2013 Vittorio Giovara
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_STEREO3D_H
+#define AVUTIL_STEREO3D_H
+
+#include
+
+#include "frame.h"
+
+/**
+ * List of possible 3D Types
+ */
+enum AVStereo3DType {
+ /**
+ * Video is not stereoscopic (and metadata has to be there).
+ */
+ AV_STEREO3D_2D,
+
+ /**
+ * Views are next to each other.
+ *
+ * LLLLRRRR
+ * LLLLRRRR
+ * LLLLRRRR
+ * ...
+ */
+ AV_STEREO3D_SIDEBYSIDE,
+
+ /**
+ * Views are on top of each other.
+ *
+ * LLLLLLLL
+ * LLLLLLLL
+ * RRRRRRRR
+ * RRRRRRRR
+ */
+ AV_STEREO3D_TOPBOTTOM,
+
+ /**
+ * Views are alternated temporally.
+ *
+ * frame0 frame1 frame2 ...
+ * LLLLLLLL RRRRRRRR LLLLLLLL
+ * LLLLLLLL RRRRRRRR LLLLLLLL
+ * LLLLLLLL RRRRRRRR LLLLLLLL
+ * ... ... ...
+ */
+ AV_STEREO3D_FRAMESEQUENCE,
+
+ /**
+ * Views are packed in a checkerboard-like structure per pixel.
+ *
+ * LRLRLRLR
+ * RLRLRLRL
+ * LRLRLRLR
+ * ...
+ */
+ AV_STEREO3D_CHECKERBOARD,
+
+ /**
+ * Views are next to each other, but when upscaling
+ * apply a checkerboard pattern.
+ *
+ * LLLLRRRR L L L L R R R R
+ * LLLLRRRR => L L L L R R R R
+ * LLLLRRRR L L L L R R R R
+ * LLLLRRRR L L L L R R R R
+ */
+ AV_STEREO3D_SIDEBYSIDE_QUINCUNX,
+
+ /**
+ * Views are packed per line, as if interlaced.
+ *
+ * LLLLLLLL
+ * RRRRRRRR
+ * LLLLLLLL
+ * ...
+ */
+ AV_STEREO3D_LINES,
+
+ /**
+ * Views are packed per column.
+ *
+ * LRLRLRLR
+ * LRLRLRLR
+ * LRLRLRLR
+ * ...
+ */
+ AV_STEREO3D_COLUMNS,
+};
+
+
+/**
+ * Inverted views, Right/Bottom represents the left view.
+ */
+#define AV_STEREO3D_FLAG_INVERT (1 << 0)
+
+/**
+ * Stereo 3D type: this structure describes how two videos are packed
+ * within a single video surface, with additional information as needed.
+ *
+ * @note The struct must be allocated with av_stereo3d_alloc() and
+ * its size is not a part of the public ABI.
+ */
+typedef struct AVStereo3D {
+ /**
+ * How views are packed within the video.
+ */
+ enum AVStereo3DType type;
+
+ /**
+ * Additional information about the frame packing.
+ */
+ int flags;
+} AVStereo3D;
+
+/**
+ * Allocate an AVStereo3D structure and set its fields to default values.
+ * The resulting struct can be freed using av_freep().
+ *
+ * @return An AVStereo3D filled with default values or NULL on failure.
+ */
+AVStereo3D *av_stereo3d_alloc(void);
+
+/**
+ * Allocate a complete AVFrameSideData and add it to the frame.
+ *
+ * @param frame The frame which side data is added to.
+ *
+ * @return The AVStereo3D structure to be filled by caller.
+ */
+AVStereo3D *av_stereo3d_create_side_data(AVFrame *frame);
+
+#endif /* AVUTIL_STEREO3D_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/threadmessage.h b/Externals/ffmpeg/dev/include/libavutil/threadmessage.h
new file mode 100644
index 0000000000..a8481d8ec3
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/threadmessage.h
@@ -0,0 +1,91 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public License
+ * as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
+ * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_THREADMESSAGE_H
+#define AVUTIL_THREADMESSAGE_H
+
+typedef struct AVThreadMessageQueue AVThreadMessageQueue;
+
+typedef enum AVThreadMessageFlags {
+
+ /**
+ * Perform non-blocking operation.
+ * If this flag is set, send and recv operations are non-blocking and
+ * return AVERROR(EAGAIN) immediately if they can not proceed.
+ */
+ AV_THREAD_MESSAGE_NONBLOCK = 1,
+
+} AVThreadMessageFlags;
+
+/**
+ * Allocate a new message queue.
+ *
+ * @param mq pointer to the message queue
+ * @param nelem maximum number of elements in the queue
+ * @param elsize size of each element in the queue
+ * @return >=0 for success; <0 for error, in particular AVERROR(ENOSYS) if
+ * lavu was built without thread support
+ */
+int av_thread_message_queue_alloc(AVThreadMessageQueue **mq,
+ unsigned nelem,
+ unsigned elsize);
+
+/**
+ * Free a message queue.
+ *
+ * The message queue must no longer be in use by another thread.
+ */
+void av_thread_message_queue_free(AVThreadMessageQueue **mq);
+
+/**
+ * Send a message on the queue.
+ */
+int av_thread_message_queue_send(AVThreadMessageQueue *mq,
+ void *msg,
+ unsigned flags);
+
+/**
+ * Receive a message from the queue.
+ */
+int av_thread_message_queue_recv(AVThreadMessageQueue *mq,
+ void *msg,
+ unsigned flags);
+
+/**
+ * Set the sending error code.
+ *
+ * If the error code is set to non-zero, av_thread_message_queue_recv() will
+ * return it immediately when there are no longer available messages.
+ * Conventional values, such as AVERROR_EOF or AVERROR(EAGAIN), can be used
+ * to cause the receiving thread to stop or suspend its operation.
+ */
+void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq,
+ int err);
+
+/**
+ * Set the receiving error code.
+ *
+ * If the error code is set to non-zero, av_thread_message_queue_send() will
+ * return it immediately. Conventional values, such as AVERROR_EOF or
+ * AVERROR(EAGAIN), can be used to cause the sending thread to stop or
+ * suspend its operation.
+ */
+void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq,
+ int err);
+
+#endif /* AVUTIL_THREADMESSAGE_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/time.h b/Externals/ffmpeg/dev/include/libavutil/time.h
new file mode 100644
index 0000000000..dc169b064a
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/time.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (c) 2000-2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_TIME_H
+#define AVUTIL_TIME_H
+
+#include
+
+/**
+ * Get the current time in microseconds.
+ */
+int64_t av_gettime(void);
+
+/**
+ * Get the current time in microseconds since some unspecified starting point.
+ * On platforms that support it, the time comes from a monotonic clock
+ * This property makes this time source ideal for measuring relative time.
+ * The returned values may not be monotonic on platforms where a monotonic
+ * clock is not available.
+ */
+int64_t av_gettime_relative(void);
+
+/**
+ * Indicates with a boolean result if the av_gettime_relative() time source
+ * is monotonic.
+ */
+int av_gettime_relative_is_monotonic(void);
+
+/**
+ * Sleep for a period of time. Although the duration is expressed in
+ * microseconds, the actual delay may be rounded to the precision of the
+ * system timer.
+ *
+ * @param usec Number of microseconds to sleep.
+ * @return zero on success or (negative) error code.
+ */
+int av_usleep(unsigned usec);
+
+#endif /* AVUTIL_TIME_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/timecode.h b/Externals/ffmpeg/dev/include/libavutil/timecode.h
new file mode 100644
index 0000000000..56e3975fd8
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/timecode.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright (c) 2006 Smartjog S.A.S, Baptiste Coudurier
+ * Copyright (c) 2011-2012 Smartjog S.A.S, Clément Bœsch
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Timecode helpers header
+ */
+
+#ifndef AVUTIL_TIMECODE_H
+#define AVUTIL_TIMECODE_H
+
+#include
+#include "rational.h"
+
+#define AV_TIMECODE_STR_SIZE 16
+
+enum AVTimecodeFlag {
+ AV_TIMECODE_FLAG_DROPFRAME = 1<<0, ///< timecode is drop frame
+ AV_TIMECODE_FLAG_24HOURSMAX = 1<<1, ///< timecode wraps after 24 hours
+ AV_TIMECODE_FLAG_ALLOWNEGATIVE = 1<<2, ///< negative time values are allowed
+};
+
+typedef struct {
+ int start; ///< timecode frame start (first base frame number)
+ uint32_t flags; ///< flags such as drop frame, +24 hours support, ...
+ AVRational rate; ///< frame rate in rational form
+ unsigned fps; ///< frame per second; must be consistent with the rate field
+} AVTimecode;
+
+/**
+ * Adjust frame number for NTSC drop frame time code.
+ *
+ * @param framenum frame number to adjust
+ * @param fps frame per second, 30 or 60
+ * @return adjusted frame number
+ * @warning adjustment is only valid in NTSC 29.97 and 59.94
+ */
+int av_timecode_adjust_ntsc_framenum2(int framenum, int fps);
+
+/**
+ * Convert frame number to SMPTE 12M binary representation.
+ *
+ * @param tc timecode data correctly initialized
+ * @param framenum frame number
+ * @return the SMPTE binary representation
+ *
+ * @note Frame number adjustment is automatically done in case of drop timecode,
+ * you do NOT have to call av_timecode_adjust_ntsc_framenum2().
+ * @note The frame number is relative to tc->start.
+ * @note Color frame (CF), binary group flags (BGF) and biphase mark polarity
+ * correction (PC) bits are set to zero.
+ */
+uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum);
+
+/**
+ * Load timecode string in buf.
+ *
+ * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long
+ * @param tc timecode data correctly initialized
+ * @param framenum frame number
+ * @return the buf parameter
+ *
+ * @note Timecode representation can be a negative timecode and have more than
+ * 24 hours, but will only be honored if the flags are correctly set.
+ * @note The frame number is relative to tc->start.
+ */
+char *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum);
+
+/**
+ * Get the timecode string from the SMPTE timecode format.
+ *
+ * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long
+ * @param tcsmpte the 32-bit SMPTE timecode
+ * @param prevent_df prevent the use of a drop flag when it is known the DF bit
+ * is arbitrary
+ * @return the buf parameter
+ */
+char *av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df);
+
+/**
+ * Get the timecode string from the 25-bit timecode format (MPEG GOP format).
+ *
+ * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long
+ * @param tc25bit the 25-bits timecode
+ * @return the buf parameter
+ */
+char *av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit);
+
+/**
+ * Init a timecode struct with the passed parameters.
+ *
+ * @param log_ctx a pointer to an arbitrary struct of which the first field
+ * is a pointer to an AVClass struct (used for av_log)
+ * @param tc pointer to an allocated AVTimecode
+ * @param rate frame rate in rational form
+ * @param flags miscellaneous flags such as drop frame, +24 hours, ...
+ * (see AVTimecodeFlag)
+ * @param frame_start the first frame number
+ * @return 0 on success, AVERROR otherwise
+ */
+int av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx);
+
+/**
+ * Parse timecode representation (hh:mm:ss[:;.]ff).
+ *
+ * @param log_ctx a pointer to an arbitrary struct of which the first field is a
+ * pointer to an AVClass struct (used for av_log).
+ * @param tc pointer to an allocated AVTimecode
+ * @param rate frame rate in rational form
+ * @param str timecode string which will determine the frame start
+ * @return 0 on success, AVERROR otherwise
+ */
+int av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx);
+
+/**
+ * Check if the timecode feature is available for the given frame rate
+ *
+ * @return 0 if supported, <0 otherwise
+ */
+int av_timecode_check_frame_rate(AVRational rate);
+
+#endif /* AVUTIL_TIMECODE_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/timestamp.h b/Externals/ffmpeg/dev/include/libavutil/timestamp.h
new file mode 100644
index 0000000000..f010a7ee38
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/timestamp.h
@@ -0,0 +1,78 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * timestamp utils, mostly useful for debugging/logging purposes
+ */
+
+#ifndef AVUTIL_TIMESTAMP_H
+#define AVUTIL_TIMESTAMP_H
+
+#include "common.h"
+
+#if defined(__cplusplus) && !defined(__STDC_FORMAT_MACROS) && !defined(PRId64)
+#error missing -D__STDC_FORMAT_MACROS / #define __STDC_FORMAT_MACROS
+#endif
+
+#define AV_TS_MAX_STRING_SIZE 32
+
+/**
+ * Fill the provided buffer with a string containing a timestamp
+ * representation.
+ *
+ * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE
+ * @param ts the timestamp to represent
+ * @return the buffer in input
+ */
+static inline char *av_ts_make_string(char *buf, int64_t ts)
+{
+ if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS");
+ else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%"PRId64, ts);
+ return buf;
+}
+
+/**
+ * Convenience macro, the return value should be used only directly in
+ * function arguments but never stand-alone.
+ */
+#define av_ts2str(ts) av_ts_make_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts)
+
+/**
+ * Fill the provided buffer with a string containing a timestamp time
+ * representation.
+ *
+ * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE
+ * @param ts the timestamp to represent
+ * @param tb the timebase of the timestamp
+ * @return the buffer in input
+ */
+static inline char *av_ts_make_time_string(char *buf, int64_t ts, AVRational *tb)
+{
+ if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS");
+ else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%.6g", av_q2d(*tb) * ts);
+ return buf;
+}
+
+/**
+ * Convenience macro, the return value should be used only directly in
+ * function arguments but never stand-alone.
+ */
+#define av_ts2timestr(ts, tb) av_ts_make_time_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts, tb)
+
+#endif /* AVUTIL_TIMESTAMP_H */
diff --git a/Externals/ffmpeg/dev/include/libavutil/version.h b/Externals/ffmpeg/dev/include/libavutil/version.h
new file mode 100644
index 0000000000..a1ed502016
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/version.h
@@ -0,0 +1,136 @@
+/*
+ * copyright (c) 2003 Fabrice Bellard
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_VERSION_H
+#define AVUTIL_VERSION_H
+
+#include "macros.h"
+
+/**
+ * @addtogroup version_utils
+ *
+ * Useful to check and match library version in order to maintain
+ * backward compatibility.
+ *
+ * @{
+ */
+
+#define AV_VERSION_INT(a, b, c) (a<<16 | b<<8 | c)
+#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c
+#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c)
+
+/**
+ * @}
+ */
+
+/**
+ * @file
+ * @ingroup lavu
+ * Libavutil version macros
+ */
+
+/**
+ * @defgroup lavu_ver Version and Build diagnostics
+ *
+ * Macros and function useful to check at compiletime and at runtime
+ * which version of libavutil is in use.
+ *
+ * @{
+ */
+
+#define LIBAVUTIL_VERSION_MAJOR 54
+#define LIBAVUTIL_VERSION_MINOR 16
+#define LIBAVUTIL_VERSION_MICRO 100
+
+#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \
+ LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \
+ LIBAVUTIL_VERSION_MINOR, \
+ LIBAVUTIL_VERSION_MICRO)
+#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT
+
+#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION)
+
+/**
+ * @}
+ *
+ * @defgroup depr_guards Deprecation guards
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ *
+ * @{
+ */
+
+#ifndef FF_API_OLD_AVOPTIONS
+#define FF_API_OLD_AVOPTIONS (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_PIX_FMT
+#define FF_API_PIX_FMT (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_CONTEXT_SIZE
+#define FF_API_CONTEXT_SIZE (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_PIX_FMT_DESC
+#define FF_API_PIX_FMT_DESC (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_AV_REVERSE
+#define FF_API_AV_REVERSE (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_AUDIOCONVERT
+#define FF_API_AUDIOCONVERT (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_CPU_FLAG_MMX2
+#define FF_API_CPU_FLAG_MMX2 (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_LLS_PRIVATE
+#define FF_API_LLS_PRIVATE (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_AVFRAME_LAVC
+#define FF_API_AVFRAME_LAVC (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_VDPAU
+#define FF_API_VDPAU (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_GET_CHANNEL_LAYOUT_COMPAT
+#define FF_API_GET_CHANNEL_LAYOUT_COMPAT (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_XVMC
+#define FF_API_XVMC (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+#ifndef FF_API_OPT_TYPE_METADATA
+#define FF_API_OPT_TYPE_METADATA (LIBAVUTIL_VERSION_MAJOR < 55)
+#endif
+
+#ifndef FF_CONST_AVUTIL55
+#if LIBAVUTIL_VERSION_MAJOR >= 55
+#define FF_CONST_AVUTIL55 const
+#else
+#define FF_CONST_AVUTIL55
+#endif
+#endif
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_VERSION_H */
+
diff --git a/Externals/ffmpeg/dev/include/libavutil/xtea.h b/Externals/ffmpeg/dev/include/libavutil/xtea.h
new file mode 100644
index 0000000000..6f1e71e345
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libavutil/xtea.h
@@ -0,0 +1,64 @@
+/*
+ * A 32-bit implementation of the XTEA algorithm
+ * Copyright (c) 2012 Samuel Pitoiset
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_XTEA_H
+#define AVUTIL_XTEA_H
+
+#include
+
+/**
+ * @file
+ * @brief Public header for libavutil XTEA algorithm
+ * @defgroup lavu_xtea XTEA
+ * @ingroup lavu_crypto
+ * @{
+ */
+
+typedef struct AVXTEA {
+ uint32_t key[16];
+} AVXTEA;
+
+/**
+ * Initialize an AVXTEA context.
+ *
+ * @param ctx an AVXTEA context
+ * @param key a key of 16 bytes used for encryption/decryption
+ */
+void av_xtea_init(struct AVXTEA *ctx, const uint8_t key[16]);
+
+/**
+ * Encrypt or decrypt a buffer using a previously initialized context.
+ *
+ * @param ctx an AVXTEA context
+ * @param dst destination array, can be equal to src
+ * @param src source array, can be equal to dst
+ * @param count number of 8 byte blocks
+ * @param iv initialization vector for CBC mode, if NULL then ECB will be used
+ * @param decrypt 0 for encryption, 1 for decryption
+ */
+void av_xtea_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src,
+ int count, uint8_t *iv, int decrypt);
+
+/**
+ * @}
+ */
+
+#endif /* AVUTIL_XTEA_H */
diff --git a/Externals/ffmpeg/dev/include/libpostproc/postprocess.h b/Externals/ffmpeg/dev/include/libpostproc/postprocess.h
new file mode 100644
index 0000000000..e00ed968d7
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libpostproc/postprocess.h
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef POSTPROC_POSTPROCESS_H
+#define POSTPROC_POSTPROCESS_H
+
+/**
+ * @file
+ * @ingroup lpp
+ * external API header
+ */
+
+/**
+ * @defgroup lpp Libpostproc
+ * @{
+ */
+
+#include "libpostproc/version.h"
+
+/**
+ * Return the LIBPOSTPROC_VERSION_INT constant.
+ */
+unsigned postproc_version(void);
+
+/**
+ * Return the libpostproc build-time configuration.
+ */
+const char *postproc_configuration(void);
+
+/**
+ * Return the libpostproc license.
+ */
+const char *postproc_license(void);
+
+#define PP_QUALITY_MAX 6
+
+#define QP_STORE_T int8_t
+
+#include
+
+typedef void pp_context;
+typedef void pp_mode;
+
+#if LIBPOSTPROC_VERSION_INT < (52<<16)
+typedef pp_context pp_context_t;
+typedef pp_mode pp_mode_t;
+extern const char *const pp_help; ///< a simple help text
+#else
+extern const char pp_help[]; ///< a simple help text
+#endif
+
+void pp_postprocess(const uint8_t * src[3], const int srcStride[3],
+ uint8_t * dst[3], const int dstStride[3],
+ int horizontalSize, int verticalSize,
+ const QP_STORE_T *QP_store, int QP_stride,
+ pp_mode *mode, pp_context *ppContext, int pict_type);
+
+
+/**
+ * Return a pp_mode or NULL if an error occurred.
+ *
+ * @param name the string after "-pp" on the command line
+ * @param quality a number from 0 to PP_QUALITY_MAX
+ */
+pp_mode *pp_get_mode_by_name_and_quality(const char *name, int quality);
+void pp_free_mode(pp_mode *mode);
+
+pp_context *pp_get_context(int width, int height, int flags);
+void pp_free_context(pp_context *ppContext);
+
+#define PP_CPU_CAPS_MMX 0x80000000
+#define PP_CPU_CAPS_MMX2 0x20000000
+#define PP_CPU_CAPS_3DNOW 0x40000000
+#define PP_CPU_CAPS_ALTIVEC 0x10000000
+#define PP_CPU_CAPS_AUTO 0x00080000
+
+#define PP_FORMAT 0x00000008
+#define PP_FORMAT_420 (0x00000011|PP_FORMAT)
+#define PP_FORMAT_422 (0x00000001|PP_FORMAT)
+#define PP_FORMAT_411 (0x00000002|PP_FORMAT)
+#define PP_FORMAT_444 (0x00000000|PP_FORMAT)
+#define PP_FORMAT_440 (0x00000010|PP_FORMAT)
+
+#define PP_PICT_TYPE_QP2 0x00000010 ///< MPEG2 style QScale
+
+/**
+ * @}
+ */
+
+#endif /* POSTPROC_POSTPROCESS_H */
diff --git a/Externals/ffmpeg/dev/include/libpostproc/version.h b/Externals/ffmpeg/dev/include/libpostproc/version.h
new file mode 100644
index 0000000000..59c24660f8
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libpostproc/version.h
@@ -0,0 +1,45 @@
+/*
+ * Version macros.
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef POSTPROC_POSTPROCESS_VERSION_H
+#define POSTPROC_POSTPROCESS_VERSION_H
+
+/**
+ * @file
+ * Libpostproc version macros
+ */
+
+#include "libavutil/avutil.h"
+
+#define LIBPOSTPROC_VERSION_MAJOR 53
+#define LIBPOSTPROC_VERSION_MINOR 3
+#define LIBPOSTPROC_VERSION_MICRO 100
+
+#define LIBPOSTPROC_VERSION_INT AV_VERSION_INT(LIBPOSTPROC_VERSION_MAJOR, \
+ LIBPOSTPROC_VERSION_MINOR, \
+ LIBPOSTPROC_VERSION_MICRO)
+#define LIBPOSTPROC_VERSION AV_VERSION(LIBPOSTPROC_VERSION_MAJOR, \
+ LIBPOSTPROC_VERSION_MINOR, \
+ LIBPOSTPROC_VERSION_MICRO)
+#define LIBPOSTPROC_BUILD LIBPOSTPROC_VERSION_INT
+
+#define LIBPOSTPROC_IDENT "postproc" AV_STRINGIFY(LIBPOSTPROC_VERSION)
+
+#endif /* POSTPROC_POSTPROCESS_VERSION_H */
diff --git a/Externals/ffmpeg/dev/include/libswresample/swresample.h b/Externals/ffmpeg/dev/include/libswresample/swresample.h
new file mode 100644
index 0000000000..37656a667d
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libswresample/swresample.h
@@ -0,0 +1,534 @@
+/*
+ * Copyright (C) 2011-2013 Michael Niedermayer (michaelni@gmx.at)
+ *
+ * This file is part of libswresample
+ *
+ * libswresample is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * libswresample is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with libswresample; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef SWRESAMPLE_SWRESAMPLE_H
+#define SWRESAMPLE_SWRESAMPLE_H
+
+/**
+ * @file
+ * @ingroup lswr
+ * libswresample public header
+ */
+
+/**
+ * @defgroup lswr Libswresample
+ * @{
+ *
+ * Libswresample (lswr) is a library that handles audio resampling, sample
+ * format conversion and mixing.
+ *
+ * Interaction with lswr is done through SwrContext, which is
+ * allocated with swr_alloc() or swr_alloc_set_opts(). It is opaque, so all parameters
+ * must be set with the @ref avoptions API.
+ *
+ * The first thing you will need to do in order to use lswr is to allocate
+ * SwrContext. This can be done with swr_alloc() or swr_alloc_set_opts(). If you
+ * are using the former, you must set options through the @ref avoptions API.
+ * The latter function provides the same feature, but it allows you to set some
+ * common options in the same statement.
+ *
+ * For example the following code will setup conversion from planar float sample
+ * format to interleaved signed 16-bit integer, downsampling from 48kHz to
+ * 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing
+ * matrix). This is using the swr_alloc() function.
+ * @code
+ * SwrContext *swr = swr_alloc();
+ * av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0);
+ * av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0);
+ * av_opt_set_int(swr, "in_sample_rate", 48000, 0);
+ * av_opt_set_int(swr, "out_sample_rate", 44100, 0);
+ * av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0);
+ * av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
+ * @endcode
+ *
+ * The same job can be done using swr_alloc_set_opts() as well:
+ * @code
+ * SwrContext *swr = swr_alloc_set_opts(NULL, // we're allocating a new context
+ * AV_CH_LAYOUT_STEREO, // out_ch_layout
+ * AV_SAMPLE_FMT_S16, // out_sample_fmt
+ * 44100, // out_sample_rate
+ * AV_CH_LAYOUT_5POINT1, // in_ch_layout
+ * AV_SAMPLE_FMT_FLTP, // in_sample_fmt
+ * 48000, // in_sample_rate
+ * 0, // log_offset
+ * NULL); // log_ctx
+ * @endcode
+ *
+ * Once all values have been set, it must be initialized with swr_init(). If
+ * you need to change the conversion parameters, you can change the parameters
+ * using @ref AVOptions, as described above in the first example; or by using
+ * swr_alloc_set_opts(), but with the first argument the allocated context.
+ * You must then call swr_init() again.
+ *
+ * The conversion itself is done by repeatedly calling swr_convert().
+ * Note that the samples may get buffered in swr if you provide insufficient
+ * output space or if sample rate conversion is done, which requires "future"
+ * samples. Samples that do not require future input can be retrieved at any
+ * time by using swr_convert() (in_count can be set to 0).
+ * At the end of conversion the resampling buffer can be flushed by calling
+ * swr_convert() with NULL in and 0 in_count.
+ *
+ * The samples used in the conversion process can be managed with the libavutil
+ * @ref lavu_sampmanip "samples manipulation" API, including av_samples_alloc()
+ * function used in the following example.
+ *
+ * The delay between input and output, can at any time be found by using
+ * swr_get_delay().
+ *
+ * The following code demonstrates the conversion loop assuming the parameters
+ * from above and caller-defined functions get_input() and handle_output():
+ * @code
+ * uint8_t **input;
+ * int in_samples;
+ *
+ * while (get_input(&input, &in_samples)) {
+ * uint8_t *output;
+ * int out_samples = av_rescale_rnd(swr_get_delay(swr, 48000) +
+ * in_samples, 44100, 48000, AV_ROUND_UP);
+ * av_samples_alloc(&output, NULL, 2, out_samples,
+ * AV_SAMPLE_FMT_S16, 0);
+ * out_samples = swr_convert(swr, &output, out_samples,
+ * input, in_samples);
+ * handle_output(output, out_samples);
+ * av_freep(&output);
+ * }
+ * @endcode
+ *
+ * When the conversion is finished, the conversion
+ * context and everything associated with it must be freed with swr_free().
+ * A swr_close() function is also available, but it exists mainly for
+ * compatibility with libavresample, and is not required to be called.
+ *
+ * There will be no memory leak if the data is not completely flushed before
+ * swr_free().
+ */
+
+#include
+#include "libavutil/frame.h"
+#include "libavutil/samplefmt.h"
+
+#include "libswresample/version.h"
+
+#if LIBSWRESAMPLE_VERSION_MAJOR < 1
+#define SWR_CH_MAX 32 ///< Maximum number of channels
+#endif
+
+/**
+ * @name Option constants
+ * These constants are used for the @ref avoptions interface for lswr.
+ * @{
+ *
+ */
+
+#define SWR_FLAG_RESAMPLE 1 ///< Force resampling even if equal sample rate
+//TODO use int resample ?
+//long term TODO can we enable this dynamically?
+
+/** Dithering algorithms */
+enum SwrDitherType {
+ SWR_DITHER_NONE = 0,
+ SWR_DITHER_RECTANGULAR,
+ SWR_DITHER_TRIANGULAR,
+ SWR_DITHER_TRIANGULAR_HIGHPASS,
+
+ SWR_DITHER_NS = 64, ///< not part of API/ABI
+ SWR_DITHER_NS_LIPSHITZ,
+ SWR_DITHER_NS_F_WEIGHTED,
+ SWR_DITHER_NS_MODIFIED_E_WEIGHTED,
+ SWR_DITHER_NS_IMPROVED_E_WEIGHTED,
+ SWR_DITHER_NS_SHIBATA,
+ SWR_DITHER_NS_LOW_SHIBATA,
+ SWR_DITHER_NS_HIGH_SHIBATA,
+ SWR_DITHER_NB, ///< not part of API/ABI
+};
+
+/** Resampling Engines */
+enum SwrEngine {
+ SWR_ENGINE_SWR, /**< SW Resampler */
+ SWR_ENGINE_SOXR, /**< SoX Resampler */
+ SWR_ENGINE_NB, ///< not part of API/ABI
+};
+
+/** Resampling Filter Types */
+enum SwrFilterType {
+ SWR_FILTER_TYPE_CUBIC, /**< Cubic */
+ SWR_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall Windowed Sinc */
+ SWR_FILTER_TYPE_KAISER, /**< Kaiser Windowed Sinc */
+};
+
+/**
+ * @}
+ */
+
+/**
+ * The libswresample context. Unlike libavcodec and libavformat, this structure
+ * is opaque. This means that if you would like to set options, you must use
+ * the @ref avoptions API and cannot directly set values to members of the
+ * structure.
+ */
+typedef struct SwrContext SwrContext;
+
+/**
+ * Get the AVClass for SwrContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ * @return the AVClass of SwrContext
+ */
+const AVClass *swr_get_class(void);
+
+/**
+ * @name SwrContext constructor functions
+ * @{
+ */
+
+/**
+ * Allocate SwrContext.
+ *
+ * If you use this function you will need to set the parameters (manually or
+ * with swr_alloc_set_opts()) before calling swr_init().
+ *
+ * @see swr_alloc_set_opts(), swr_init(), swr_free()
+ * @return NULL on error, allocated context otherwise
+ */
+struct SwrContext *swr_alloc(void);
+
+/**
+ * Initialize context after user parameters have been set.
+ * @note The context must be configured using the AVOption API.
+ *
+ * @see av_opt_set_int()
+ * @see av_opt_set_dict()
+ *
+ * @param[in,out] s Swr context to initialize
+ * @return AVERROR error code in case of failure.
+ */
+int swr_init(struct SwrContext *s);
+
+/**
+ * Check whether an swr context has been initialized or not.
+ *
+ * @param[in] s Swr context to check
+ * @see swr_init()
+ * @return positive if it has been initialized, 0 if not initialized
+ */
+int swr_is_initialized(struct SwrContext *s);
+
+/**
+ * Allocate SwrContext if needed and set/reset common parameters.
+ *
+ * This function does not require s to be allocated with swr_alloc(). On the
+ * other hand, swr_alloc() can use swr_alloc_set_opts() to set the parameters
+ * on the allocated context.
+ *
+ * @param s existing Swr context if available, or NULL if not
+ * @param out_ch_layout output channel layout (AV_CH_LAYOUT_*)
+ * @param out_sample_fmt output sample format (AV_SAMPLE_FMT_*).
+ * @param out_sample_rate output sample rate (frequency in Hz)
+ * @param in_ch_layout input channel layout (AV_CH_LAYOUT_*)
+ * @param in_sample_fmt input sample format (AV_SAMPLE_FMT_*).
+ * @param in_sample_rate input sample rate (frequency in Hz)
+ * @param log_offset logging level offset
+ * @param log_ctx parent logging context, can be NULL
+ *
+ * @see swr_init(), swr_free()
+ * @return NULL on error, allocated context otherwise
+ */
+struct SwrContext *swr_alloc_set_opts(struct SwrContext *s,
+ int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate,
+ int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate,
+ int log_offset, void *log_ctx);
+
+/**
+ * @}
+ *
+ * @name SwrContext destructor functions
+ * @{
+ */
+
+/**
+ * Free the given SwrContext and set the pointer to NULL.
+ *
+ * @param[in] s a pointer to a pointer to Swr context
+ */
+void swr_free(struct SwrContext **s);
+
+/**
+ * Closes the context so that swr_is_initialized() returns 0.
+ *
+ * The context can be brought back to life by running swr_init(),
+ * swr_init() can also be used without swr_close().
+ * This function is mainly provided for simplifying the usecase
+ * where one tries to support libavresample and libswresample.
+ *
+ * @param[in,out] s Swr context to be closed
+ */
+void swr_close(struct SwrContext *s);
+
+/**
+ * @}
+ *
+ * @name Core conversion functions
+ * @{
+ */
+
+/** Convert audio.
+ *
+ * in and in_count can be set to 0 to flush the last few samples out at the
+ * end.
+ *
+ * If more input is provided than output space then the input will be buffered.
+ * You can avoid this buffering by providing more output space than input.
+ * Conversion will run directly without copying whenever possible.
+ *
+ * @param s allocated Swr context, with parameters set
+ * @param out output buffers, only the first one need be set in case of packed audio
+ * @param out_count amount of space available for output in samples per channel
+ * @param in input buffers, only the first one need to be set in case of packed audio
+ * @param in_count number of input samples available in one channel
+ *
+ * @return number of samples output per channel, negative value on error
+ */
+int swr_convert(struct SwrContext *s, uint8_t **out, int out_count,
+ const uint8_t **in , int in_count);
+
+/**
+ * Convert the next timestamp from input to output
+ * timestamps are in 1/(in_sample_rate * out_sample_rate) units.
+ *
+ * @note There are 2 slightly differently behaving modes.
+ * @li When automatic timestamp compensation is not used, (min_compensation >= FLT_MAX)
+ * in this case timestamps will be passed through with delays compensated
+ * @li When automatic timestamp compensation is used, (min_compensation < FLT_MAX)
+ * in this case the output timestamps will match output sample numbers.
+ * See ffmpeg-resampler(1) for the two modes of compensation.
+ *
+ * @param s[in] initialized Swr context
+ * @param pts[in] timestamp for the next input sample, INT64_MIN if unknown
+ * @see swr_set_compensation(), swr_drop_output(), and swr_inject_silence() are
+ * function used internally for timestamp compensation.
+ * @return the output timestamp for the next output sample
+ */
+int64_t swr_next_pts(struct SwrContext *s, int64_t pts);
+
+/**
+ * @}
+ *
+ * @name Low-level option setting functions
+ * These functons provide a means to set low-level options that is not possible
+ * with the AVOption API.
+ * @{
+ */
+
+/**
+ * Activate resampling compensation ("soft" compensation). This function is
+ * internally called when needed in swr_next_pts().
+ *
+ * @param[in,out] s allocated Swr context. If it is not initialized,
+ * or SWR_FLAG_RESAMPLE is not set, swr_init() is
+ * called with the flag set.
+ * @param[in] sample_delta delta in PTS per sample
+ * @param[in] compensation_distance number of samples to compensate for
+ * @return >= 0 on success, AVERROR error codes if:
+ * @li @c s is NULL,
+ * @li @c compensation_distance is less than 0,
+ * @li @c compensation_distance is 0 but sample_delta is not,
+ * @li compensation unsupported by resampler, or
+ * @li swr_init() fails when called.
+ */
+int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance);
+
+/**
+ * Set a customized input channel mapping.
+ *
+ * @param[in,out] s allocated Swr context, not yet initialized
+ * @param[in] channel_map customized input channel mapping (array of channel
+ * indexes, -1 for a muted channel)
+ * @return >= 0 on success, or AVERROR error code in case of failure.
+ */
+int swr_set_channel_mapping(struct SwrContext *s, const int *channel_map);
+
+/**
+ * Set a customized remix matrix.
+ *
+ * @param s allocated Swr context, not yet initialized
+ * @param matrix remix coefficients; matrix[i + stride * o] is
+ * the weight of input channel i in output channel o
+ * @param stride offset between lines of the matrix
+ * @return >= 0 on success, or AVERROR error code in case of failure.
+ */
+int swr_set_matrix(struct SwrContext *s, const double *matrix, int stride);
+
+/**
+ * @}
+ *
+ * @name Sample handling functions
+ * @{
+ */
+
+/**
+ * Drops the specified number of output samples.
+ *
+ * This function, along with swr_inject_silence(), is called by swr_next_pts()
+ * if needed for "hard" compensation.
+ *
+ * @param s allocated Swr context
+ * @param count number of samples to be dropped
+ *
+ * @return >= 0 on success, or a negative AVERROR code on failure
+ */
+int swr_drop_output(struct SwrContext *s, int count);
+
+/**
+ * Injects the specified number of silence samples.
+ *
+ * This function, along with swr_drop_output(), is called by swr_next_pts()
+ * if needed for "hard" compensation.
+ *
+ * @param s allocated Swr context
+ * @param count number of samples to be dropped
+ *
+ * @return >= 0 on success, or a negative AVERROR code on failure
+ */
+int swr_inject_silence(struct SwrContext *s, int count);
+
+/**
+ * Gets the delay the next input sample will experience relative to the next output sample.
+ *
+ * Swresample can buffer data if more input has been provided than available
+ * output space, also converting between sample rates needs a delay.
+ * This function returns the sum of all such delays.
+ * The exact delay is not necessarily an integer value in either input or
+ * output sample rate. Especially when downsampling by a large value, the
+ * output sample rate may be a poor choice to represent the delay, similarly
+ * for upsampling and the input sample rate.
+ *
+ * @param s swr context
+ * @param base timebase in which the returned delay will be:
+ * @li if it's set to 1 the returned delay is in seconds
+ * @li if it's set to 1000 the returned delay is in milliseconds
+ * @li if it's set to the input sample rate then the returned
+ * delay is in input samples
+ * @li if it's set to the output sample rate then the returned
+ * delay is in output samples
+ * @li if it's the least common multiple of in_sample_rate and
+ * out_sample_rate then an exact rounding-free delay will be
+ * returned
+ * @returns the delay in 1 / @c base units.
+ */
+int64_t swr_get_delay(struct SwrContext *s, int64_t base);
+
+/**
+ * @}
+ *
+ * @name Configuration accessors
+ * @{
+ */
+
+/**
+ * Return the @ref LIBSWRESAMPLE_VERSION_INT constant.
+ *
+ * This is useful to check if the build-time libswresample has the same version
+ * as the run-time one.
+ *
+ * @returns the unsigned int-typed version
+ */
+unsigned swresample_version(void);
+
+/**
+ * Return the swr build-time configuration.
+ *
+ * @returns the build-time @c ./configure flags
+ */
+const char *swresample_configuration(void);
+
+/**
+ * Return the swr license.
+ *
+ * @returns the license of libswresample, determined at build-time
+ */
+const char *swresample_license(void);
+
+/**
+ * @}
+ *
+ * @name AVFrame based API
+ * @{
+ */
+
+/**
+ * Convert the samples in the input AVFrame and write them to the output AVFrame.
+ *
+ * Input and output AVFrames must have channel_layout, sample_rate and format set.
+ *
+ * If the output AVFrame does not have the data pointers allocated the nb_samples
+ * field will be set using av_frame_get_buffer()
+ * is called to allocate the frame.
+ *
+ * The output AVFrame can be NULL or have fewer allocated samples than required.
+ * In this case, any remaining samples not written to the output will be added
+ * to an internal FIFO buffer, to be returned at the next call to this function
+ * or to swr_convert().
+ *
+ * If converting sample rate, there may be data remaining in the internal
+ * resampling delay buffer. swr_get_delay() tells the number of
+ * remaining samples. To get this data as output, call this function or
+ * swr_convert() with NULL input.
+ *
+ * If the SwrContext configuration does not match the output and
+ * input AVFrame settings the conversion does not take place and depending on
+ * which AVFrame is not matching AVERROR_OUTPUT_CHANGED, AVERROR_INPUT_CHANGED
+ * or the result of a bitwise-OR of them is returned.
+ *
+ * @see swr_delay()
+ * @see swr_convert()
+ * @see swr_get_delay()
+ *
+ * @param swr audio resample context
+ * @param output output AVFrame
+ * @param input input AVFrame
+ * @return 0 on success, AVERROR on failure or nonmatching
+ * configuration.
+ */
+int swr_convert_frame(SwrContext *swr,
+ AVFrame *output, const AVFrame *input);
+
+/**
+ * Configure or reconfigure the SwrContext using the information
+ * provided by the AVFrames.
+ *
+ * The original resampling context is reset even on failure.
+ * The function calls swr_close() internally if the context is open.
+ *
+ * @see swr_close();
+ *
+ * @param swr audio resample context
+ * @param output output AVFrame
+ * @param input input AVFrame
+ * @return 0 on success, AVERROR on failure.
+ */
+int swr_config_frame(SwrContext *swr, const AVFrame *out, const AVFrame *in);
+
+/**
+ * @}
+ * @}
+ */
+
+#endif /* SWRESAMPLE_SWRESAMPLE_H */
diff --git a/Externals/ffmpeg/dev/include/libswresample/version.h b/Externals/ffmpeg/dev/include/libswresample/version.h
new file mode 100644
index 0000000000..61c76fa2f4
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libswresample/version.h
@@ -0,0 +1,45 @@
+/*
+ * Version macros.
+ *
+ * This file is part of libswresample
+ *
+ * libswresample is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * libswresample is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with libswresample; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef SWR_VERSION_H
+#define SWR_VERSION_H
+
+/**
+ * @file
+ * Libswresample version macros
+ */
+
+#include "libavutil/avutil.h"
+
+#define LIBSWRESAMPLE_VERSION_MAJOR 1
+#define LIBSWRESAMPLE_VERSION_MINOR 1
+#define LIBSWRESAMPLE_VERSION_MICRO 100
+
+#define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \
+ LIBSWRESAMPLE_VERSION_MINOR, \
+ LIBSWRESAMPLE_VERSION_MICRO)
+#define LIBSWRESAMPLE_VERSION AV_VERSION(LIBSWRESAMPLE_VERSION_MAJOR, \
+ LIBSWRESAMPLE_VERSION_MINOR, \
+ LIBSWRESAMPLE_VERSION_MICRO)
+#define LIBSWRESAMPLE_BUILD LIBSWRESAMPLE_VERSION_INT
+
+#define LIBSWRESAMPLE_IDENT "SwR" AV_STRINGIFY(LIBSWRESAMPLE_VERSION)
+
+#endif /* SWR_VERSION_H */
diff --git a/Externals/ffmpeg/dev/include/libswscale/swscale.h b/Externals/ffmpeg/dev/include/libswscale/swscale.h
new file mode 100644
index 0000000000..903e1203fd
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libswscale/swscale.h
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2001-2011 Michael Niedermayer
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef SWSCALE_SWSCALE_H
+#define SWSCALE_SWSCALE_H
+
+/**
+ * @file
+ * @ingroup libsws
+ * external API header
+ */
+
+#include
+
+#include "libavutil/avutil.h"
+#include "libavutil/log.h"
+#include "libavutil/pixfmt.h"
+#include "version.h"
+
+/**
+ * @defgroup libsws Color conversion and scaling
+ * @{
+ *
+ * Return the LIBSWSCALE_VERSION_INT constant.
+ */
+unsigned swscale_version(void);
+
+/**
+ * Return the libswscale build-time configuration.
+ */
+const char *swscale_configuration(void);
+
+/**
+ * Return the libswscale license.
+ */
+const char *swscale_license(void);
+
+/* values for the flags, the stuff on the command line is different */
+#define SWS_FAST_BILINEAR 1
+#define SWS_BILINEAR 2
+#define SWS_BICUBIC 4
+#define SWS_X 8
+#define SWS_POINT 0x10
+#define SWS_AREA 0x20
+#define SWS_BICUBLIN 0x40
+#define SWS_GAUSS 0x80
+#define SWS_SINC 0x100
+#define SWS_LANCZOS 0x200
+#define SWS_SPLINE 0x400
+
+#define SWS_SRC_V_CHR_DROP_MASK 0x30000
+#define SWS_SRC_V_CHR_DROP_SHIFT 16
+
+#define SWS_PARAM_DEFAULT 123456
+
+#define SWS_PRINT_INFO 0x1000
+
+//the following 3 flags are not completely implemented
+//internal chrominace subsampling info
+#define SWS_FULL_CHR_H_INT 0x2000
+//input subsampling info
+#define SWS_FULL_CHR_H_INP 0x4000
+#define SWS_DIRECT_BGR 0x8000
+#define SWS_ACCURATE_RND 0x40000
+#define SWS_BITEXACT 0x80000
+#define SWS_ERROR_DIFFUSION 0x800000
+
+#if FF_API_SWS_CPU_CAPS
+/**
+ * CPU caps are autodetected now, those flags
+ * are only provided for API compatibility.
+ */
+#define SWS_CPU_CAPS_MMX 0x80000000
+#define SWS_CPU_CAPS_MMXEXT 0x20000000
+#define SWS_CPU_CAPS_MMX2 0x20000000
+#define SWS_CPU_CAPS_3DNOW 0x40000000
+#define SWS_CPU_CAPS_ALTIVEC 0x10000000
+#if FF_API_ARCH_BFIN
+#define SWS_CPU_CAPS_BFIN 0x01000000
+#endif
+#define SWS_CPU_CAPS_SSE2 0x02000000
+#endif
+
+#define SWS_MAX_REDUCE_CUTOFF 0.002
+
+#define SWS_CS_ITU709 1
+#define SWS_CS_FCC 4
+#define SWS_CS_ITU601 5
+#define SWS_CS_ITU624 5
+#define SWS_CS_SMPTE170M 5
+#define SWS_CS_SMPTE240M 7
+#define SWS_CS_DEFAULT 5
+
+/**
+ * Return a pointer to yuv<->rgb coefficients for the given colorspace
+ * suitable for sws_setColorspaceDetails().
+ *
+ * @param colorspace One of the SWS_CS_* macros. If invalid,
+ * SWS_CS_DEFAULT is used.
+ */
+const int *sws_getCoefficients(int colorspace);
+
+// when used for filters they must have an odd number of elements
+// coeffs cannot be shared between vectors
+typedef struct SwsVector {
+ double *coeff; ///< pointer to the list of coefficients
+ int length; ///< number of coefficients in the vector
+} SwsVector;
+
+// vectors can be shared
+typedef struct SwsFilter {
+ SwsVector *lumH;
+ SwsVector *lumV;
+ SwsVector *chrH;
+ SwsVector *chrV;
+} SwsFilter;
+
+struct SwsContext;
+
+/**
+ * Return a positive value if pix_fmt is a supported input format, 0
+ * otherwise.
+ */
+int sws_isSupportedInput(enum AVPixelFormat pix_fmt);
+
+/**
+ * Return a positive value if pix_fmt is a supported output format, 0
+ * otherwise.
+ */
+int sws_isSupportedOutput(enum AVPixelFormat pix_fmt);
+
+/**
+ * @param[in] pix_fmt the pixel format
+ * @return a positive value if an endianness conversion for pix_fmt is
+ * supported, 0 otherwise.
+ */
+int sws_isSupportedEndiannessConversion(enum AVPixelFormat pix_fmt);
+
+/**
+ * Allocate an empty SwsContext. This must be filled and passed to
+ * sws_init_context(). For filling see AVOptions, options.c and
+ * sws_setColorspaceDetails().
+ */
+struct SwsContext *sws_alloc_context(void);
+
+/**
+ * Initialize the swscaler context sws_context.
+ *
+ * @return zero or positive value on success, a negative value on
+ * error
+ */
+int sws_init_context(struct SwsContext *sws_context, SwsFilter *srcFilter, SwsFilter *dstFilter);
+
+/**
+ * Free the swscaler context swsContext.
+ * If swsContext is NULL, then does nothing.
+ */
+void sws_freeContext(struct SwsContext *swsContext);
+
+/**
+ * Allocate and return an SwsContext. You need it to perform
+ * scaling/conversion operations using sws_scale().
+ *
+ * @param srcW the width of the source image
+ * @param srcH the height of the source image
+ * @param srcFormat the source image format
+ * @param dstW the width of the destination image
+ * @param dstH the height of the destination image
+ * @param dstFormat the destination image format
+ * @param flags specify which algorithm and options to use for rescaling
+ * @return a pointer to an allocated context, or NULL in case of error
+ * @note this function is to be removed after a saner alternative is
+ * written
+ */
+struct SwsContext *sws_getContext(int srcW, int srcH, enum AVPixelFormat srcFormat,
+ int dstW, int dstH, enum AVPixelFormat dstFormat,
+ int flags, SwsFilter *srcFilter,
+ SwsFilter *dstFilter, const double *param);
+
+/**
+ * Scale the image slice in srcSlice and put the resulting scaled
+ * slice in the image in dst. A slice is a sequence of consecutive
+ * rows in an image.
+ *
+ * Slices have to be provided in sequential order, either in
+ * top-bottom or bottom-top order. If slices are provided in
+ * non-sequential order the behavior of the function is undefined.
+ *
+ * @param c the scaling context previously created with
+ * sws_getContext()
+ * @param srcSlice the array containing the pointers to the planes of
+ * the source slice
+ * @param srcStride the array containing the strides for each plane of
+ * the source image
+ * @param srcSliceY the position in the source image of the slice to
+ * process, that is the number (counted starting from
+ * zero) in the image of the first row of the slice
+ * @param srcSliceH the height of the source slice, that is the number
+ * of rows in the slice
+ * @param dst the array containing the pointers to the planes of
+ * the destination image
+ * @param dstStride the array containing the strides for each plane of
+ * the destination image
+ * @return the height of the output slice
+ */
+int sws_scale(struct SwsContext *c, const uint8_t *const srcSlice[],
+ const int srcStride[], int srcSliceY, int srcSliceH,
+ uint8_t *const dst[], const int dstStride[]);
+
+/**
+ * @param dstRange flag indicating the while-black range of the output (1=jpeg / 0=mpeg)
+ * @param srcRange flag indicating the while-black range of the input (1=jpeg / 0=mpeg)
+ * @param table the yuv2rgb coefficients describing the output yuv space, normally ff_yuv2rgb_coeffs[x]
+ * @param inv_table the yuv2rgb coefficients describing the input yuv space, normally ff_yuv2rgb_coeffs[x]
+ * @param brightness 16.16 fixed point brightness correction
+ * @param contrast 16.16 fixed point contrast correction
+ * @param saturation 16.16 fixed point saturation correction
+ * @return -1 if not supported
+ */
+int sws_setColorspaceDetails(struct SwsContext *c, const int inv_table[4],
+ int srcRange, const int table[4], int dstRange,
+ int brightness, int contrast, int saturation);
+
+/**
+ * @return -1 if not supported
+ */
+int sws_getColorspaceDetails(struct SwsContext *c, int **inv_table,
+ int *srcRange, int **table, int *dstRange,
+ int *brightness, int *contrast, int *saturation);
+
+/**
+ * Allocate and return an uninitialized vector with length coefficients.
+ */
+SwsVector *sws_allocVec(int length);
+
+/**
+ * Return a normalized Gaussian curve used to filter stuff
+ * quality = 3 is high quality, lower is lower quality.
+ */
+SwsVector *sws_getGaussianVec(double variance, double quality);
+
+/**
+ * Allocate and return a vector with length coefficients, all
+ * with the same value c.
+ */
+SwsVector *sws_getConstVec(double c, int length);
+
+/**
+ * Allocate and return a vector with just one coefficient, with
+ * value 1.0.
+ */
+SwsVector *sws_getIdentityVec(void);
+
+/**
+ * Scale all the coefficients of a by the scalar value.
+ */
+void sws_scaleVec(SwsVector *a, double scalar);
+
+/**
+ * Scale all the coefficients of a so that their sum equals height.
+ */
+void sws_normalizeVec(SwsVector *a, double height);
+void sws_convVec(SwsVector *a, SwsVector *b);
+void sws_addVec(SwsVector *a, SwsVector *b);
+void sws_subVec(SwsVector *a, SwsVector *b);
+void sws_shiftVec(SwsVector *a, int shift);
+
+/**
+ * Allocate and return a clone of the vector a, that is a vector
+ * with the same coefficients as a.
+ */
+SwsVector *sws_cloneVec(SwsVector *a);
+
+/**
+ * Print with av_log() a textual representation of the vector a
+ * if log_level <= av_log_level.
+ */
+void sws_printVec2(SwsVector *a, AVClass *log_ctx, int log_level);
+
+void sws_freeVec(SwsVector *a);
+
+SwsFilter *sws_getDefaultFilter(float lumaGBlur, float chromaGBlur,
+ float lumaSharpen, float chromaSharpen,
+ float chromaHShift, float chromaVShift,
+ int verbose);
+void sws_freeFilter(SwsFilter *filter);
+
+/**
+ * Check if context can be reused, otherwise reallocate a new one.
+ *
+ * If context is NULL, just calls sws_getContext() to get a new
+ * context. Otherwise, checks if the parameters are the ones already
+ * saved in context. If that is the case, returns the current
+ * context. Otherwise, frees context and gets a new context with
+ * the new parameters.
+ *
+ * Be warned that srcFilter and dstFilter are not checked, they
+ * are assumed to remain the same.
+ */
+struct SwsContext *sws_getCachedContext(struct SwsContext *context,
+ int srcW, int srcH, enum AVPixelFormat srcFormat,
+ int dstW, int dstH, enum AVPixelFormat dstFormat,
+ int flags, SwsFilter *srcFilter,
+ SwsFilter *dstFilter, const double *param);
+
+/**
+ * Convert an 8-bit paletted frame into a frame with a color depth of 32 bits.
+ *
+ * The output frame will have the same packed format as the palette.
+ *
+ * @param src source frame buffer
+ * @param dst destination frame buffer
+ * @param num_pixels number of pixels to convert
+ * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src
+ */
+void sws_convertPalette8ToPacked32(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette);
+
+/**
+ * Convert an 8-bit paletted frame into a frame with a color depth of 24 bits.
+ *
+ * With the palette format "ABCD", the destination frame ends up with the format "ABC".
+ *
+ * @param src source frame buffer
+ * @param dst destination frame buffer
+ * @param num_pixels number of pixels to convert
+ * @param palette array with [256] entries, which must match color arrangement (RGB or BGR) of src
+ */
+void sws_convertPalette8ToPacked24(const uint8_t *src, uint8_t *dst, int num_pixels, const uint8_t *palette);
+
+/**
+ * Get the AVClass for swsContext. It can be used in combination with
+ * AV_OPT_SEARCH_FAKE_OBJ for examining options.
+ *
+ * @see av_opt_find().
+ */
+const AVClass *sws_get_class(void);
+
+/**
+ * @}
+ */
+
+#endif /* SWSCALE_SWSCALE_H */
diff --git a/Externals/ffmpeg/dev/include/libswscale/version.h b/Externals/ffmpeg/dev/include/libswscale/version.h
new file mode 100644
index 0000000000..228c5770eb
--- /dev/null
+++ b/Externals/ffmpeg/dev/include/libswscale/version.h
@@ -0,0 +1,56 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef SWSCALE_VERSION_H
+#define SWSCALE_VERSION_H
+
+/**
+ * @file
+ * swscale version macros
+ */
+
+#include "libavutil/version.h"
+
+#define LIBSWSCALE_VERSION_MAJOR 3
+#define LIBSWSCALE_VERSION_MINOR 1
+#define LIBSWSCALE_VERSION_MICRO 101
+
+#define LIBSWSCALE_VERSION_INT AV_VERSION_INT(LIBSWSCALE_VERSION_MAJOR, \
+ LIBSWSCALE_VERSION_MINOR, \
+ LIBSWSCALE_VERSION_MICRO)
+#define LIBSWSCALE_VERSION AV_VERSION(LIBSWSCALE_VERSION_MAJOR, \
+ LIBSWSCALE_VERSION_MINOR, \
+ LIBSWSCALE_VERSION_MICRO)
+#define LIBSWSCALE_BUILD LIBSWSCALE_VERSION_INT
+
+#define LIBSWSCALE_IDENT "SwS" AV_STRINGIFY(LIBSWSCALE_VERSION)
+
+/**
+ * FF_API_* defines may be placed below to indicate public API that will be
+ * dropped at a future version bump. The defines themselves are not part of
+ * the public API and may change, break or disappear at any time.
+ */
+
+#ifndef FF_API_SWS_CPU_CAPS
+#define FF_API_SWS_CPU_CAPS (LIBSWSCALE_VERSION_MAJOR < 4)
+#endif
+#ifndef FF_API_ARCH_BFIN
+#define FF_API_ARCH_BFIN (LIBSWSCALE_VERSION_MAJOR < 4)
+#endif
+
+#endif /* SWSCALE_VERSION_H */
diff --git a/Externals/ffmpeg/dev/lib/avcodec-56.def b/Externals/ffmpeg/dev/lib/avcodec-56.def
new file mode 100644
index 0000000000..962bd0f5e0
--- /dev/null
+++ b/Externals/ffmpeg/dev/lib/avcodec-56.def
@@ -0,0 +1,219 @@
+EXPORTS
+ audio_resample
+ audio_resample_close
+ av_audio_convert
+ av_audio_convert_alloc
+ av_audio_convert_free
+ av_audio_resample_init
+ av_bitstream_filter_close
+ av_bitstream_filter_filter
+ av_bitstream_filter_init
+ av_bitstream_filter_next
+ av_codec_ffversion DATA
+ av_codec_get_chroma_intra_matrix
+ av_codec_get_codec_descriptor
+ av_codec_get_lowres
+ av_codec_get_max_lowres
+ av_codec_get_pkt_timebase
+ av_codec_get_seek_preroll
+ av_codec_is_decoder
+ av_codec_is_encoder
+ av_codec_next
+ av_codec_set_chroma_intra_matrix
+ av_codec_set_codec_descriptor
+ av_codec_set_lowres
+ av_codec_set_pkt_timebase
+ av_codec_set_seek_preroll
+ av_copy_packet
+ av_copy_packet_side_data
+ av_dct_calc
+ av_dct_end
+ av_dct_init
+ av_destruct_packet
+ av_dup_packet
+ av_dv_codec_profile
+ av_dv_codec_profile2
+ av_dv_frame_profile
+ av_fast_padded_malloc
+ av_fast_padded_mallocz
+ av_fft_calc
+ av_fft_end
+ av_fft_init
+ av_fft_permute
+ av_free_packet
+ av_get_audio_frame_duration
+ av_get_bits_per_sample
+ av_get_codec_tag_string
+ av_get_exact_bits_per_sample
+ av_get_pcm_codec
+ av_get_profile_name
+ av_grow_packet
+ av_hwaccel_next
+ av_imdct_calc
+ av_imdct_half
+ av_init_packet
+ av_lockmgr_register
+ av_log_ask_for_sample
+ av_log_missing_feature
+ av_mdct_calc
+ av_mdct_end
+ av_mdct_init
+ av_new_packet
+ av_packet_copy_props
+ av_packet_free_side_data
+ av_packet_from_data
+ av_packet_get_side_data
+ av_packet_merge_side_data
+ av_packet_move_ref
+ av_packet_new_side_data
+ av_packet_pack_dictionary
+ av_packet_ref
+ av_packet_rescale_ts
+ av_packet_shrink_side_data
+ av_packet_split_side_data
+ av_packet_unpack_dictionary
+ av_packet_unref
+ av_parser_change
+ av_parser_close
+ av_parser_init
+ av_parser_next
+ av_parser_parse2
+ av_picture_copy
+ av_picture_crop
+ av_picture_pad
+ av_rdft_calc
+ av_rdft_end
+ av_rdft_init
+ av_register_bitstream_filter
+ av_register_codec_parser
+ av_register_hwaccel
+ av_resample
+ av_resample_close
+ av_resample_compensate
+ av_resample_init
+ av_shrink_packet
+ av_vorbis_parse_frame
+ av_vorbis_parse_frame_flags
+ av_vorbis_parse_free
+ av_vorbis_parse_init
+ av_vorbis_parse_reset
+ av_xiphlacing
+ available_bits
+ avcodec_align_dimensions
+ avcodec_align_dimensions2
+ avcodec_alloc_context3
+ avcodec_alloc_frame
+ avcodec_chroma_pos_to_enum
+ avcodec_close
+ avcodec_configuration
+ avcodec_copy_context
+ avcodec_dct_alloc
+ avcodec_dct_get_class
+ avcodec_dct_init
+ avcodec_decode_audio3
+ avcodec_decode_audio4
+ avcodec_decode_subtitle2
+ avcodec_decode_video2
+ avcodec_default_execute
+ avcodec_default_execute2
+ avcodec_default_get_buffer
+ avcodec_default_get_buffer2
+ avcodec_default_get_format
+ avcodec_default_reget_buffer
+ avcodec_default_release_buffer
+ avcodec_descriptor_get
+ avcodec_descriptor_get_by_name
+ avcodec_descriptor_next
+ avcodec_encode_audio
+ avcodec_encode_audio2
+ avcodec_encode_subtitle
+ avcodec_encode_video
+ avcodec_encode_video2
+ avcodec_enum_to_chroma_pos
+ avcodec_fill_audio_frame
+ avcodec_find_best_pix_fmt2
+ avcodec_find_best_pix_fmt_of_2
+ avcodec_find_best_pix_fmt_of_list
+ avcodec_find_decoder
+ avcodec_find_decoder_by_name
+ avcodec_find_encoder
+ avcodec_find_encoder_by_name
+ avcodec_flush_buffers
+ avcodec_free_context
+ avcodec_free_frame
+ avcodec_get_chroma_sub_sample
+ avcodec_get_class
+ avcodec_get_context_defaults3
+ avcodec_get_edge_width
+ avcodec_get_frame_class
+ avcodec_get_frame_defaults
+ avcodec_get_name
+ avcodec_get_pix_fmt_loss
+ avcodec_get_subtitle_rect_class
+ avcodec_get_type
+ avcodec_is_open
+ avcodec_license
+ avcodec_open2
+ avcodec_pix_fmt_to_codec_tag
+ avcodec_register
+ avcodec_register_all
+ avcodec_set_dimensions
+ avcodec_string
+ avcodec_version
+ aver_isf_history
+ avpicture_alloc
+ avpicture_deinterlace
+ avpicture_fill
+ avpicture_free
+ avpicture_get_size
+ avpicture_layout
+ avpriv_aac_parse_header
+ avpriv_ac3_channel_layout_tab DATA
+ avpriv_ac3_parse_header
+ avpriv_ac3_parse_header2
+ avpriv_align_put_bits
+ avpriv_bprint_to_extradata
+ avpriv_color_frame
+ avpriv_copy_bits
+ avpriv_copy_pce_data
+ avpriv_dca_convert_bitstream
+ avpriv_dca_sample_rates DATA
+ avpriv_dirac_parse_sequence_header
+ avpriv_dnxhd_get_frame_size
+ avpriv_do_elbg
+ avpriv_dv_frame_profile2
+ avpriv_exif_decode_ifd
+ avpriv_find_pix_fmt
+ avpriv_find_start_code
+ avpriv_flac_is_extradata_valid
+ avpriv_flac_parse_streaminfo
+ avpriv_get_raw_pix_fmt_tags
+ avpriv_h264_has_num_reorder_frames
+ avpriv_init_elbg
+ avpriv_lock_avformat
+ avpriv_mjpeg_bits_ac_chrominance DATA
+ avpriv_mjpeg_bits_ac_luminance DATA
+ avpriv_mjpeg_bits_dc_chrominance DATA
+ avpriv_mjpeg_bits_dc_luminance DATA
+ avpriv_mjpeg_val_ac_chrominance DATA
+ avpriv_mjpeg_val_ac_luminance DATA
+ avpriv_mjpeg_val_dc DATA
+ avpriv_mpa_bitrate_tab DATA
+ avpriv_mpa_decode_header
+ avpriv_mpa_decode_header2
+ avpriv_mpa_freq_tab DATA
+ avpriv_mpeg4audio_get_config
+ avpriv_mpeg4audio_sample_rates DATA
+ avpriv_mpegaudio_decode_header
+ avpriv_pix_fmt_bps_avi DATA
+ avpriv_pix_fmt_bps_mov DATA
+ avpriv_put_string
+ avpriv_split_xiph_headers
+ avpriv_tak_parse_streaminfo
+ avpriv_toupper4
+ avpriv_unlock_avformat
+ avpriv_vorbis_parse_extradata
+ avpriv_vorbis_parse_frame
+ avpriv_vorbis_parse_frame_flags
+ avpriv_vorbis_parse_reset
+ avsubtitle_free
diff --git a/Externals/ffmpeg/dev/lib/avcodec.lib b/Externals/ffmpeg/dev/lib/avcodec.lib
new file mode 100644
index 0000000000..563ea96305
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/avcodec.lib differ
diff --git a/Externals/ffmpeg/dev/lib/avdevice-56.def b/Externals/ffmpeg/dev/lib/avdevice-56.def
new file mode 100644
index 0000000000..f453187e60
--- /dev/null
+++ b/Externals/ffmpeg/dev/lib/avdevice-56.def
@@ -0,0 +1,19 @@
+EXPORTS
+ av_device_capabilities DATA
+ av_device_ffversion DATA
+ av_input_audio_device_next
+ av_input_video_device_next
+ av_output_audio_device_next
+ av_output_video_device_next
+ avdevice_app_to_dev_control_message
+ avdevice_capabilities_create
+ avdevice_capabilities_free
+ avdevice_configuration
+ avdevice_dev_to_app_control_message
+ avdevice_free_list_devices
+ avdevice_license
+ avdevice_list_devices
+ avdevice_list_input_sources
+ avdevice_list_output_sinks
+ avdevice_register_all
+ avdevice_version
diff --git a/Externals/ffmpeg/dev/lib/avdevice.lib b/Externals/ffmpeg/dev/lib/avdevice.lib
new file mode 100644
index 0000000000..600f7b636c
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/avdevice.lib differ
diff --git a/Externals/ffmpeg/dev/lib/avfilter-5.def b/Externals/ffmpeg/dev/lib/avfilter-5.def
new file mode 100644
index 0000000000..7268f51bbb
--- /dev/null
+++ b/Externals/ffmpeg/dev/lib/avfilter-5.def
@@ -0,0 +1,81 @@
+EXPORTS
+ av_abuffersink_params_alloc
+ av_buffersink_get_buffer_ref
+ av_buffersink_get_frame
+ av_buffersink_get_frame_flags
+ av_buffersink_get_frame_rate
+ av_buffersink_get_samples
+ av_buffersink_params_alloc
+ av_buffersink_poll_frame
+ av_buffersink_read
+ av_buffersink_read_samples
+ av_buffersink_set_frame_size
+ av_buffersrc_add_frame
+ av_buffersrc_add_frame_flags
+ av_buffersrc_add_ref
+ av_buffersrc_buffer
+ av_buffersrc_get_nb_failed_requests
+ av_buffersrc_write_frame
+ av_filter_ffversion DATA
+ av_filter_next
+ avfilter_add_matrix
+ avfilter_all_channel_layouts DATA
+ avfilter_config_links
+ avfilter_configuration
+ avfilter_copy_buf_props
+ avfilter_copy_buffer_ref_props
+ avfilter_copy_frame_props
+ avfilter_free
+ avfilter_get_audio_buffer_ref_from_arrays
+ avfilter_get_audio_buffer_ref_from_arrays_channels
+ avfilter_get_audio_buffer_ref_from_frame
+ avfilter_get_buffer_ref_from_frame
+ avfilter_get_by_name
+ avfilter_get_class
+ avfilter_get_matrix
+ avfilter_get_video_buffer_ref_from_arrays
+ avfilter_get_video_buffer_ref_from_frame
+ avfilter_graph_add_filter
+ avfilter_graph_alloc
+ avfilter_graph_alloc_filter
+ avfilter_graph_config
+ avfilter_graph_create_filter
+ avfilter_graph_dump
+ avfilter_graph_free
+ avfilter_graph_get_filter
+ avfilter_graph_parse
+ avfilter_graph_parse2
+ avfilter_graph_parse_ptr
+ avfilter_graph_queue_command
+ avfilter_graph_request_oldest
+ avfilter_graph_send_command
+ avfilter_graph_set_auto_convert
+ avfilter_init_dict
+ avfilter_init_filter
+ avfilter_init_str
+ avfilter_inout_alloc
+ avfilter_inout_free
+ avfilter_insert_filter
+ avfilter_license
+ avfilter_link
+ avfilter_link_free
+ avfilter_link_get_channels
+ avfilter_link_set_closed
+ avfilter_make_format64_list
+ avfilter_mul_matrix
+ avfilter_next
+ avfilter_open
+ avfilter_pad_count
+ avfilter_pad_get_name
+ avfilter_pad_get_type
+ avfilter_process_command
+ avfilter_ref_buffer
+ avfilter_ref_get_channels
+ avfilter_register
+ avfilter_register_all
+ avfilter_sub_matrix
+ avfilter_transform
+ avfilter_uninit
+ avfilter_unref_buffer
+ avfilter_unref_bufferp
+ avfilter_version
diff --git a/Externals/ffmpeg/dev/lib/avfilter.lib b/Externals/ffmpeg/dev/lib/avfilter.lib
new file mode 100644
index 0000000000..46b6cd509d
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/avfilter.lib differ
diff --git a/Externals/ffmpeg/dev/lib/avformat-56.def b/Externals/ffmpeg/dev/lib/avformat-56.def
new file mode 100644
index 0000000000..dfce181ede
--- /dev/null
+++ b/Externals/ffmpeg/dev/lib/avformat-56.def
@@ -0,0 +1,161 @@
+EXPORTS
+ av_add_index_entry
+ av_append_packet
+ av_codec_get_id
+ av_codec_get_tag
+ av_codec_get_tag2
+ av_convert_lang_to
+ av_demuxer_open
+ av_dump_format
+ av_filename_number_test
+ av_find_best_stream
+ av_find_default_stream_index
+ av_find_input_format
+ av_find_program_from_stream
+ av_fmt_ctx_get_duration_estimation_method
+ av_format_ffversion DATA
+ av_format_get_audio_codec
+ av_format_get_control_message_cb
+ av_format_get_metadata_header_padding
+ av_format_get_opaque
+ av_format_get_probe_score
+ av_format_get_subtitle_codec
+ av_format_get_video_codec
+ av_format_inject_global_side_data
+ av_format_set_audio_codec
+ av_format_set_control_message_cb
+ av_format_set_metadata_header_padding
+ av_format_set_opaque
+ av_format_set_subtitle_codec
+ av_format_set_video_codec
+ av_get_frame_filename
+ av_get_output_timestamp
+ av_get_packet
+ av_guess_codec
+ av_guess_format
+ av_guess_frame_rate
+ av_guess_sample_aspect_ratio
+ av_hex_dump
+ av_hex_dump_log
+ av_iformat_next
+ av_index_search_timestamp
+ av_interleaved_write_frame
+ av_interleaved_write_uncoded_frame
+ av_match_ext
+ av_new_program
+ av_oformat_next
+ av_pkt_dump2
+ av_pkt_dump_log2
+ av_probe_input_buffer
+ av_probe_input_buffer2
+ av_probe_input_format
+ av_probe_input_format2
+ av_probe_input_format3
+ av_read_frame
+ av_read_pause
+ av_read_play
+ av_register_all
+ av_register_input_format
+ av_register_output_format
+ av_sdp_create
+ av_seek_frame
+ av_stream_get_end_pts
+ av_stream_get_parser
+ av_stream_get_r_frame_rate
+ av_stream_get_recommended_encoder_configuration
+ av_stream_get_side_data
+ av_stream_set_r_frame_rate
+ av_stream_set_recommended_encoder_configuration
+ av_url_split
+ av_write_frame
+ av_write_trailer
+ av_write_uncoded_frame
+ av_write_uncoded_frame_query
+ avformat_alloc_context
+ avformat_alloc_output_context2
+ avformat_close_input
+ avformat_configuration
+ avformat_find_stream_info
+ avformat_free_context
+ avformat_get_class
+ avformat_get_mov_audio_tags
+ avformat_get_mov_video_tags
+ avformat_get_riff_audio_tags
+ avformat_get_riff_video_tags
+ avformat_license
+ avformat_match_stream_specifier
+ avformat_network_deinit
+ avformat_network_init
+ avformat_new_stream
+ avformat_open_input
+ avformat_query_codec
+ avformat_queue_attached_pictures
+ avformat_seek_file
+ avformat_version
+ avformat_write_header
+ avio_alloc_context
+ avio_check
+ avio_close
+ avio_close_dyn_buf
+ avio_closep
+ avio_enum_protocols
+ avio_feof
+ avio_find_protocol_name
+ avio_flush
+ avio_get_str
+ avio_get_str16be
+ avio_get_str16le
+ avio_open
+ avio_open2
+ avio_open_dyn_buf
+ avio_pause
+ avio_printf
+ avio_put_str
+ avio_put_str16le
+ avio_r8
+ avio_rb16
+ avio_rb24
+ avio_rb32
+ avio_rb64
+ avio_read
+ avio_read_to_bprint
+ avio_rl16
+ avio_rl24
+ avio_rl32
+ avio_rl64
+ avio_seek
+ avio_seek_time
+ avio_size
+ avio_skip
+ avio_w8
+ avio_wb16
+ avio_wb24
+ avio_wb32
+ avio_wb64
+ avio_wl16
+ avio_wl24
+ avio_wl32
+ avio_wl64
+ avio_write
+ avpriv_dv_get_packet
+ avpriv_dv_init_demux
+ avpriv_dv_produce_packet
+ avpriv_mpegts_parse_close
+ avpriv_mpegts_parse_open
+ avpriv_mpegts_parse_packet
+ avpriv_new_chapter
+ avpriv_set_pts_info
+ ff_inet_aton
+ ff_rtp_get_local_rtcp_port
+ ff_rtp_get_local_rtp_port
+ ff_rtsp_parse_line
+ ff_socket_nonblock
+ ffio_open_dyn_packet_buf
+ ffio_set_buf_size
+ ffurl_close
+ ffurl_open
+ ffurl_read_complete
+ ffurl_seek
+ ffurl_size
+ ffurl_write
+ url_feof
diff --git a/Externals/ffmpeg/dev/lib/avformat.lib b/Externals/ffmpeg/dev/lib/avformat.lib
new file mode 100644
index 0000000000..a29a9cbd1a
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/avformat.lib differ
diff --git a/Externals/ffmpeg/dev/lib/avutil-54.def b/Externals/ffmpeg/dev/lib/avutil-54.def
new file mode 100644
index 0000000000..e02e465fe6
--- /dev/null
+++ b/Externals/ffmpeg/dev/lib/avutil-54.def
@@ -0,0 +1,441 @@
+EXPORTS
+ av_add_q
+ av_add_stable
+ av_adler32_update
+ av_aes_alloc
+ av_aes_crypt
+ av_aes_init
+ av_aes_size DATA
+ av_asprintf
+ av_audio_fifo_alloc
+ av_audio_fifo_drain
+ av_audio_fifo_free
+ av_audio_fifo_read
+ av_audio_fifo_realloc
+ av_audio_fifo_reset
+ av_audio_fifo_size
+ av_audio_fifo_space
+ av_audio_fifo_write
+ av_base64_decode
+ av_base64_encode
+ av_basename
+ av_blowfish_crypt
+ av_blowfish_crypt_ecb
+ av_blowfish_init
+ av_bmg_get
+ av_bprint_append_data
+ av_bprint_channel_layout
+ av_bprint_chars
+ av_bprint_clear
+ av_bprint_escape
+ av_bprint_finalize
+ av_bprint_get_buffer
+ av_bprint_init
+ av_bprint_init_for_buffer
+ av_bprint_strftime
+ av_bprintf
+ av_buffer_alloc
+ av_buffer_allocz
+ av_buffer_create
+ av_buffer_default_free
+ av_buffer_get_opaque
+ av_buffer_get_ref_count
+ av_buffer_is_writable
+ av_buffer_make_writable
+ av_buffer_pool_get
+ av_buffer_pool_init
+ av_buffer_pool_uninit
+ av_buffer_realloc
+ av_buffer_ref
+ av_buffer_unref
+ av_calloc
+ av_camellia_alloc
+ av_camellia_crypt
+ av_camellia_init
+ av_camellia_size DATA
+ av_cast5_alloc
+ av_cast5_crypt
+ av_cast5_crypt2
+ av_cast5_init
+ av_cast5_size DATA
+ av_channel_layout_extract_channel
+ av_chroma_location_name
+ av_color_primaries_name
+ av_color_range_name
+ av_color_space_name
+ av_color_transfer_name
+ av_compare_mod
+ av_compare_ts
+ av_cpu_count
+ av_crc
+ av_crc_get_table
+ av_crc_init
+ av_ctz
+ av_d2q
+ av_d2str
+ av_default_get_category
+ av_default_item_name
+ av_des_crypt
+ av_des_init
+ av_des_mac
+ av_dict_copy
+ av_dict_count
+ av_dict_free
+ av_dict_get
+ av_dict_get_string
+ av_dict_parse_string
+ av_dict_set
+ av_dict_set_int
+ av_dirname
+ av_display_matrix_flip
+ av_display_rotation_get
+ av_display_rotation_set
+ av_div_q
+ av_downmix_info_update_side_data
+ av_dynarray2_add
+ av_dynarray_add
+ av_dynarray_add_nofree
+ av_escape
+ av_expr_eval
+ av_expr_free
+ av_expr_parse
+ av_expr_parse_and_eval
+ av_fast_malloc
+ av_fast_realloc
+ av_fifo_alloc
+ av_fifo_alloc_array
+ av_fifo_drain
+ av_fifo_free
+ av_fifo_freep
+ av_fifo_generic_read
+ av_fifo_generic_write
+ av_fifo_grow
+ av_fifo_realloc2
+ av_fifo_reset
+ av_fifo_size
+ av_fifo_space
+ av_file_map
+ av_file_unmap
+ av_find_best_pix_fmt_of_2
+ av_find_info_tag
+ av_find_nearest_q_idx
+ av_fopen_utf8
+ av_force_cpu_flags
+ av_frame_alloc
+ av_frame_clone
+ av_frame_copy
+ av_frame_copy_props
+ av_frame_free
+ av_frame_get_best_effort_timestamp
+ av_frame_get_buffer
+ av_frame_get_channel_layout
+ av_frame_get_channels
+ av_frame_get_color_range
+ av_frame_get_colorspace
+ av_frame_get_decode_error_flags
+ av_frame_get_metadata
+ av_frame_get_pkt_duration
+ av_frame_get_pkt_pos
+ av_frame_get_pkt_size
+ av_frame_get_plane_buffer
+ av_frame_get_qp_table
+ av_frame_get_sample_rate
+ av_frame_get_side_data
+ av_frame_is_writable
+ av_frame_make_writable
+ av_frame_move_ref
+ av_frame_new_side_data
+ av_frame_ref
+ av_frame_remove_side_data
+ av_frame_set_best_effort_timestamp
+ av_frame_set_channel_layout
+ av_frame_set_channels
+ av_frame_set_color_range
+ av_frame_set_colorspace
+ av_frame_set_decode_error_flags
+ av_frame_set_metadata
+ av_frame_set_pkt_duration
+ av_frame_set_pkt_pos
+ av_frame_set_pkt_size
+ av_frame_set_qp_table
+ av_frame_set_sample_rate
+ av_frame_side_data_name
+ av_frame_unref
+ av_free
+ av_freep
+ av_gcd
+ av_get_alt_sample_fmt
+ av_get_bits_per_pixel
+ av_get_bytes_per_sample
+ av_get_channel_description
+ av_get_channel_layout
+ av_get_channel_layout_channel_index
+ av_get_channel_layout_nb_channels
+ av_get_channel_layout_string
+ av_get_channel_name
+ av_get_colorspace_name
+ av_get_cpu_flags
+ av_get_default_channel_layout
+ av_get_double
+ av_get_int
+ av_get_known_color_name
+ av_get_media_type_string
+ av_get_packed_sample_fmt
+ av_get_padded_bits_per_pixel
+ av_get_picture_type_char
+ av_get_pix_fmt
+ av_get_pix_fmt_loss
+ av_get_pix_fmt_name
+ av_get_pix_fmt_string
+ av_get_planar_sample_fmt
+ av_get_q
+ av_get_random_seed
+ av_get_sample_fmt
+ av_get_sample_fmt_name
+ av_get_sample_fmt_string
+ av_get_standard_channel_layout
+ av_get_string
+ av_get_time_base_q
+ av_get_token
+ av_gettime
+ av_gettime_relative
+ av_gettime_relative_is_monotonic
+ av_hash_alloc
+ av_hash_final
+ av_hash_final_b64
+ av_hash_final_bin
+ av_hash_final_hex
+ av_hash_freep
+ av_hash_get_name
+ av_hash_get_size
+ av_hash_init
+ av_hash_names
+ av_hash_update
+ av_hmac_alloc
+ av_hmac_calc
+ av_hmac_final
+ av_hmac_free
+ av_hmac_init
+ av_hmac_update
+ av_image_alloc
+ av_image_check_sar
+ av_image_check_size
+ av_image_copy
+ av_image_copy_plane
+ av_image_copy_to_buffer
+ av_image_fill_arrays
+ av_image_fill_linesizes
+ av_image_fill_max_pixsteps
+ av_image_fill_pointers
+ av_image_get_buffer_size
+ av_image_get_linesize
+ av_int_list_length_for_size
+ av_isdigit
+ av_isgraph
+ av_isspace
+ av_isxdigit
+ av_lfg_init
+ av_log
+ av_log2
+ av_log2_16bit
+ av_log_default_callback
+ av_log_format_line
+ av_log_get_flags
+ av_log_get_level
+ av_log_set_callback
+ av_log_set_flags
+ av_log_set_level
+ av_lzo1x_decode
+ av_malloc
+ av_mallocz
+ av_match_list
+ av_match_name
+ av_max_alloc
+ av_md5_alloc
+ av_md5_final
+ av_md5_init
+ av_md5_size DATA
+ av_md5_sum
+ av_md5_update
+ av_memcpy_backptr
+ av_memdup
+ av_mul_q
+ av_murmur3_alloc
+ av_murmur3_final
+ av_murmur3_init
+ av_murmur3_init_seeded
+ av_murmur3_update
+ av_nearer_q
+ av_next_option
+ av_opt_child_class_next
+ av_opt_child_next
+ av_opt_copy
+ av_opt_eval_double
+ av_opt_eval_flags
+ av_opt_eval_float
+ av_opt_eval_int
+ av_opt_eval_int64
+ av_opt_eval_q
+ av_opt_find
+ av_opt_find2
+ av_opt_flag_is_set
+ av_opt_free
+ av_opt_freep_ranges
+ av_opt_get
+ av_opt_get_channel_layout
+ av_opt_get_dict_val
+ av_opt_get_double
+ av_opt_get_image_size
+ av_opt_get_int
+ av_opt_get_key_value
+ av_opt_get_pixel_fmt
+ av_opt_get_q
+ av_opt_get_sample_fmt
+ av_opt_get_video_rate
+ av_opt_is_set_to_default
+ av_opt_is_set_to_default_by_name
+ av_opt_next
+ av_opt_ptr
+ av_opt_query_ranges
+ av_opt_query_ranges_default
+ av_opt_serialize
+ av_opt_set
+ av_opt_set_bin
+ av_opt_set_channel_layout
+ av_opt_set_defaults
+ av_opt_set_defaults2
+ av_opt_set_dict
+ av_opt_set_dict2
+ av_opt_set_dict_val
+ av_opt_set_double
+ av_opt_set_from_string
+ av_opt_set_image_size
+ av_opt_set_int
+ av_opt_set_pixel_fmt
+ av_opt_set_q
+ av_opt_set_sample_fmt
+ av_opt_set_video_rate
+ av_opt_show2
+ av_parse_color
+ av_parse_cpu_caps
+ av_parse_cpu_flags
+ av_parse_ratio
+ av_parse_time
+ av_parse_video_rate
+ av_parse_video_size
+ av_pix_fmt_count_planes
+ av_pix_fmt_desc_get
+ av_pix_fmt_desc_get_id
+ av_pix_fmt_desc_next
+ av_pix_fmt_descriptors DATA
+ av_pix_fmt_get_chroma_sub_sample
+ av_pix_fmt_swap_endianness
+ av_pixelutils_get_sad_fn
+ av_rc4_crypt
+ av_rc4_init
+ av_read_image_line
+ av_realloc
+ av_realloc_array
+ av_realloc_f
+ av_reallocp
+ av_reallocp_array
+ av_reduce
+ av_rescale
+ av_rescale_delta
+ av_rescale_q
+ av_rescale_q_rnd
+ av_rescale_rnd
+ av_reverse DATA
+ av_ripemd_alloc
+ av_ripemd_final
+ av_ripemd_init
+ av_ripemd_size DATA
+ av_ripemd_update
+ av_sample_fmt_is_planar
+ av_samples_alloc
+ av_samples_alloc_array_and_samples
+ av_samples_copy
+ av_samples_fill_arrays
+ av_samples_get_buffer_size
+ av_samples_set_silence
+ av_set_cpu_flags_mask
+ av_set_double
+ av_set_int
+ av_set_options_string
+ av_set_q
+ av_set_string3
+ av_sha512_alloc
+ av_sha512_final
+ av_sha512_init
+ av_sha512_size DATA
+ av_sha512_update
+ av_sha_alloc
+ av_sha_final
+ av_sha_init
+ av_sha_size DATA
+ av_sha_update
+ av_small_strptime
+ av_stereo3d_alloc
+ av_stereo3d_create_side_data
+ av_strcasecmp
+ av_strdup
+ av_strerror
+ av_stristart
+ av_stristr
+ av_strlcat
+ av_strlcatf
+ av_strlcpy
+ av_strncasecmp
+ av_strndup
+ av_strnstr
+ av_strstart
+ av_strtod
+ av_strtok
+ av_sub_q
+ av_tempfile
+ av_thread_message_queue_alloc
+ av_thread_message_queue_free
+ av_thread_message_queue_recv
+ av_thread_message_queue_send
+ av_thread_message_queue_set_err_recv
+ av_thread_message_queue_set_err_send
+ av_timecode_adjust_ntsc_framenum2
+ av_timecode_check_frame_rate
+ av_timecode_get_smpte_from_framenum
+ av_timecode_init
+ av_timecode_init_from_string
+ av_timecode_make_mpeg_tc_string
+ av_timecode_make_smpte_tc_string
+ av_timecode_make_string
+ av_timegm
+ av_tree_destroy
+ av_tree_enumerate
+ av_tree_find
+ av_tree_insert
+ av_tree_node_alloc
+ av_tree_node_size DATA
+ av_usleep
+ av_utf8_decode
+ av_util_ffversion DATA
+ av_vbprintf
+ av_vlog
+ av_write_image_line
+ av_xtea_crypt
+ av_xtea_init
+ avpriv_alloc_fixed_dsp
+ avpriv_cga_font DATA
+ avpriv_emms_yasm DATA
+ avpriv_float_dsp_alloc
+ avpriv_float_dsp_init
+ avpriv_frame_get_metadatap
+ avpriv_init_lls
+ avpriv_open
+ avpriv_report_missing_feature
+ avpriv_request_sample
+ avpriv_scalarproduct_float_c
+ avpriv_set_systematic_pal2
+ avpriv_solve_lls
+ avpriv_vga16_font DATA
+ avutil_configuration
+ avutil_license
+ avutil_version
diff --git a/Externals/ffmpeg/dev/lib/avutil.lib b/Externals/ffmpeg/dev/lib/avutil.lib
new file mode 100644
index 0000000000..a009c124c4
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/avutil.lib differ
diff --git a/Externals/ffmpeg/dev/lib/libavcodec.dll.a b/Externals/ffmpeg/dev/lib/libavcodec.dll.a
new file mode 100644
index 0000000000..4eaadfc46f
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/libavcodec.dll.a differ
diff --git a/Externals/ffmpeg/dev/lib/libavdevice.dll.a b/Externals/ffmpeg/dev/lib/libavdevice.dll.a
new file mode 100644
index 0000000000..5a8f4bd651
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/libavdevice.dll.a differ
diff --git a/Externals/ffmpeg/dev/lib/libavfilter.dll.a b/Externals/ffmpeg/dev/lib/libavfilter.dll.a
new file mode 100644
index 0000000000..cf8577a906
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/libavfilter.dll.a differ
diff --git a/Externals/ffmpeg/dev/lib/libavformat.dll.a b/Externals/ffmpeg/dev/lib/libavformat.dll.a
new file mode 100644
index 0000000000..720c267654
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/libavformat.dll.a differ
diff --git a/Externals/ffmpeg/dev/lib/libavutil.dll.a b/Externals/ffmpeg/dev/lib/libavutil.dll.a
new file mode 100644
index 0000000000..0220047180
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/libavutil.dll.a differ
diff --git a/Externals/ffmpeg/dev/lib/libpostproc.dll.a b/Externals/ffmpeg/dev/lib/libpostproc.dll.a
new file mode 100644
index 0000000000..c7b8ba266a
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/libpostproc.dll.a differ
diff --git a/Externals/ffmpeg/dev/lib/libswresample.dll.a b/Externals/ffmpeg/dev/lib/libswresample.dll.a
new file mode 100644
index 0000000000..999ec7f7ae
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/libswresample.dll.a differ
diff --git a/Externals/ffmpeg/dev/lib/libswscale.dll.a b/Externals/ffmpeg/dev/lib/libswscale.dll.a
new file mode 100644
index 0000000000..1dafa6ed2e
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/libswscale.dll.a differ
diff --git a/Externals/ffmpeg/dev/lib/postproc-53.def b/Externals/ffmpeg/dev/lib/postproc-53.def
new file mode 100644
index 0000000000..62c4c69d7f
--- /dev/null
+++ b/Externals/ffmpeg/dev/lib/postproc-53.def
@@ -0,0 +1,11 @@
+EXPORTS
+ postproc_configuration
+ postproc_ffversion DATA
+ postproc_license
+ postproc_version
+ pp_free_context
+ pp_free_mode
+ pp_get_context
+ pp_get_mode_by_name_and_quality
+ pp_help DATA
+ pp_postprocess
diff --git a/Externals/ffmpeg/dev/lib/postproc.lib b/Externals/ffmpeg/dev/lib/postproc.lib
new file mode 100644
index 0000000000..1425faa559
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/postproc.lib differ
diff --git a/Externals/ffmpeg/dev/lib/swresample-1.def b/Externals/ffmpeg/dev/lib/swresample-1.def
new file mode 100644
index 0000000000..a4aedce94a
--- /dev/null
+++ b/Externals/ffmpeg/dev/lib/swresample-1.def
@@ -0,0 +1,22 @@
+EXPORTS
+ swr_alloc
+ swr_alloc_set_opts
+ swr_close
+ swr_config_frame
+ swr_convert
+ swr_convert_frame
+ swr_drop_output
+ swr_ffversion DATA
+ swr_free
+ swr_get_class
+ swr_get_delay
+ swr_init
+ swr_inject_silence
+ swr_is_initialized
+ swr_next_pts
+ swr_set_channel_mapping
+ swr_set_compensation
+ swr_set_matrix
+ swresample_configuration
+ swresample_license
+ swresample_version
diff --git a/Externals/ffmpeg/dev/lib/swresample.lib b/Externals/ffmpeg/dev/lib/swresample.lib
new file mode 100644
index 0000000000..eedf4a8c75
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/swresample.lib differ
diff --git a/Externals/ffmpeg/dev/lib/swscale-3.def b/Externals/ffmpeg/dev/lib/swscale-3.def
new file mode 100644
index 0000000000..d6330d95f2
--- /dev/null
+++ b/Externals/ffmpeg/dev/lib/swscale-3.def
@@ -0,0 +1,36 @@
+EXPORTS
+ sws_addVec
+ sws_allocVec
+ sws_alloc_context
+ sws_cloneVec
+ sws_context_class DATA
+ sws_convVec
+ sws_convertPalette8ToPacked24
+ sws_convertPalette8ToPacked32
+ sws_freeContext
+ sws_freeFilter
+ sws_freeVec
+ sws_getCachedContext
+ sws_getCoefficients
+ sws_getColorspaceDetails
+ sws_getConstVec
+ sws_getContext
+ sws_getDefaultFilter
+ sws_getGaussianVec
+ sws_getIdentityVec
+ sws_get_class
+ sws_init_context
+ sws_isSupportedEndiannessConversion
+ sws_isSupportedInput
+ sws_isSupportedOutput
+ sws_normalizeVec
+ sws_printVec2
+ sws_rgb2rgb_init
+ sws_scale
+ sws_scaleVec
+ sws_setColorspaceDetails
+ sws_shiftVec
+ sws_subVec
+ swscale_configuration
+ swscale_license
+ swscale_version
diff --git a/Externals/ffmpeg/dev/lib/swscale.lib b/Externals/ffmpeg/dev/lib/swscale.lib
new file mode 100644
index 0000000000..9138a602d6
Binary files /dev/null and b/Externals/ffmpeg/dev/lib/swscale.lib differ
diff --git a/Externals/ffmpeg/dev/licenses/bzip2.txt b/Externals/ffmpeg/dev/licenses/bzip2.txt
new file mode 100644
index 0000000000..cc614178cf
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/bzip2.txt
@@ -0,0 +1,42 @@
+
+--------------------------------------------------------------------------
+
+This program, "bzip2", the associated library "libbzip2", and all
+documentation, are copyright (C) 1996-2010 Julian R Seward. All
+rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+1. Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+2. The origin of this software must not be misrepresented; you must
+ not claim that you wrote the original software. If you use this
+ software in a product, an acknowledgment in the product
+ documentation would be appreciated but is not required.
+
+3. Altered source versions must be plainly marked as such, and must
+ not be misrepresented as being the original software.
+
+4. The name of the author may not be used to endorse or promote
+ products derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS
+OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
+DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
+GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
+WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Julian Seward, jseward@bzip.org
+bzip2/libbzip2 version 1.0.6 of 6 September 2010
+
+--------------------------------------------------------------------------
diff --git a/Externals/ffmpeg/dev/licenses/fontconfig.txt b/Externals/ffmpeg/dev/licenses/fontconfig.txt
new file mode 100644
index 0000000000..2a5d777ff6
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/fontconfig.txt
@@ -0,0 +1,27 @@
+fontconfig/COPYING
+
+Copyright © 2000,2001,2002,2003,2004,2006,2007 Keith Packard
+Copyright © 2005 Patrick Lam
+Copyright © 2009 Roozbeh Pournader
+Copyright © 2008,2009 Red Hat, Inc.
+Copyright © 2008 Danilo Šegan
+
+
+Permission to use, copy, modify, distribute, and sell this software and its
+documentation for any purpose is hereby granted without fee, provided that
+the above copyright notice appear in all copies and that both that
+copyright notice and this permission notice appear in supporting
+documentation, and that the name of the author(s) not be used in
+advertising or publicity pertaining to distribution of the software without
+specific, written prior permission. The authors make no
+representations about the suitability of this software for any purpose. It
+is provided "as is" without express or implied warranty.
+
+THE AUTHOR(S) DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+EVENT SHALL THE AUTHOR(S) BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
diff --git a/Externals/ffmpeg/dev/licenses/freetype.txt b/Externals/ffmpeg/dev/licenses/freetype.txt
new file mode 100644
index 0000000000..bbaba33f47
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/freetype.txt
@@ -0,0 +1,169 @@
+ The FreeType Project LICENSE
+ ----------------------------
+
+ 2006-Jan-27
+
+ Copyright 1996-2002, 2006 by
+ David Turner, Robert Wilhelm, and Werner Lemberg
+
+
+
+Introduction
+============
+
+ The FreeType Project is distributed in several archive packages;
+ some of them may contain, in addition to the FreeType font engine,
+ various tools and contributions which rely on, or relate to, the
+ FreeType Project.
+
+ This license applies to all files found in such packages, and
+ which do not fall under their own explicit license. The license
+ affects thus the FreeType font engine, the test programs,
+ documentation and makefiles, at the very least.
+
+ This license was inspired by the BSD, Artistic, and IJG
+ (Independent JPEG Group) licenses, which all encourage inclusion
+ and use of free software in commercial and freeware products
+ alike. As a consequence, its main points are that:
+
+ o We don't promise that this software works. However, we will be
+ interested in any kind of bug reports. (`as is' distribution)
+
+ o You can use this software for whatever you want, in parts or
+ full form, without having to pay us. (`royalty-free' usage)
+
+ o You may not pretend that you wrote this software. If you use
+ it, or only parts of it, in a program, you must acknowledge
+ somewhere in your documentation that you have used the
+ FreeType code. (`credits')
+
+ We specifically permit and encourage the inclusion of this
+ software, with or without modifications, in commercial products.
+ We disclaim all warranties covering The FreeType Project and
+ assume no liability related to The FreeType Project.
+
+
+ Finally, many people asked us for a preferred form for a
+ credit/disclaimer to use in compliance with this license. We thus
+ encourage you to use the following text:
+
+ """
+ Portions of this software are copyright © The FreeType
+ Project (www.freetype.org). All rights reserved.
+ """
+
+ Please replace with the value from the FreeType version you
+ actually use.
+
+
+Legal Terms
+===========
+
+0. Definitions
+--------------
+
+ Throughout this license, the terms `package', `FreeType Project',
+ and `FreeType archive' refer to the set of files originally
+ distributed by the authors (David Turner, Robert Wilhelm, and
+ Werner Lemberg) as the `FreeType Project', be they named as alpha,
+ beta or final release.
+
+ `You' refers to the licensee, or person using the project, where
+ `using' is a generic term including compiling the project's source
+ code as well as linking it to form a `program' or `executable'.
+ This program is referred to as `a program using the FreeType
+ engine'.
+
+ This license applies to all files distributed in the original
+ FreeType Project, including all source code, binaries and
+ documentation, unless otherwise stated in the file in its
+ original, unmodified form as distributed in the original archive.
+ If you are unsure whether or not a particular file is covered by
+ this license, you must contact us to verify this.
+
+ The FreeType Project is copyright (C) 1996-2000 by David Turner,
+ Robert Wilhelm, and Werner Lemberg. All rights reserved except as
+ specified below.
+
+1. No Warranty
+--------------
+
+ THE FREETYPE PROJECT IS PROVIDED `AS IS' WITHOUT WARRANTY OF ANY
+ KIND, EITHER EXPRESS OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+ WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ PURPOSE. IN NO EVENT WILL ANY OF THE AUTHORS OR COPYRIGHT HOLDERS
+ BE LIABLE FOR ANY DAMAGES CAUSED BY THE USE OR THE INABILITY TO
+ USE, OF THE FREETYPE PROJECT.
+
+2. Redistribution
+-----------------
+
+ This license grants a worldwide, royalty-free, perpetual and
+ irrevocable right and license to use, execute, perform, compile,
+ display, copy, create derivative works of, distribute and
+ sublicense the FreeType Project (in both source and object code
+ forms) and derivative works thereof for any purpose; and to
+ authorize others to exercise some or all of the rights granted
+ herein, subject to the following conditions:
+
+ o Redistribution of source code must retain this license file
+ (`FTL.TXT') unaltered; any additions, deletions or changes to
+ the original files must be clearly indicated in accompanying
+ documentation. The copyright notices of the unaltered,
+ original files must be preserved in all copies of source
+ files.
+
+ o Redistribution in binary form must provide a disclaimer that
+ states that the software is based in part of the work of the
+ FreeType Team, in the distribution documentation. We also
+ encourage you to put an URL to the FreeType web page in your
+ documentation, though this isn't mandatory.
+
+ These conditions apply to any software derived from or based on
+ the FreeType Project, not just the unmodified files. If you use
+ our work, you must acknowledge us. However, no fee need be paid
+ to us.
+
+3. Advertising
+--------------
+
+ Neither the FreeType authors and contributors nor you shall use
+ the name of the other for commercial, advertising, or promotional
+ purposes without specific prior written permission.
+
+ We suggest, but do not require, that you use one or more of the
+ following phrases to refer to this software in your documentation
+ or advertising materials: `FreeType Project', `FreeType Engine',
+ `FreeType library', or `FreeType Distribution'.
+
+ As you have not signed this license, you are not required to
+ accept it. However, as the FreeType Project is copyrighted
+ material, only this license, or another one contracted with the
+ authors, grants you the right to use, distribute, and modify it.
+ Therefore, by using, distributing, or modifying the FreeType
+ Project, you indicate that you understand and accept all the terms
+ of this license.
+
+4. Contacts
+-----------
+
+ There are two mailing lists related to FreeType:
+
+ o freetype@nongnu.org
+
+ Discusses general use and applications of FreeType, as well as
+ future and wanted additions to the library and distribution.
+ If you are looking for support, start in this list if you
+ haven't found anything to help you in the documentation.
+
+ o freetype-devel@nongnu.org
+
+ Discusses bugs, as well as engine internals, design issues,
+ specific licenses, porting, etc.
+
+ Our home page can be found at
+
+ http://www.freetype.org
+
+
+--- end of FTL.TXT ---
diff --git a/Externals/ffmpeg/dev/licenses/frei0r.txt b/Externals/ffmpeg/dev/licenses/frei0r.txt
new file mode 100644
index 0000000000..623b6258a1
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/frei0r.txt
@@ -0,0 +1,340 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ , 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/Externals/ffmpeg/dev/licenses/gme.txt b/Externals/ffmpeg/dev/licenses/gme.txt
new file mode 100644
index 0000000000..5ab7695ab8
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/gme.txt
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ , 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/Externals/ffmpeg/dev/licenses/gnutls.txt b/Externals/ffmpeg/dev/licenses/gnutls.txt
new file mode 100644
index 0000000000..94a9ed024d
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/gnutls.txt
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+ .
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/Externals/ffmpeg/dev/licenses/lame.txt b/Externals/ffmpeg/dev/licenses/lame.txt
new file mode 100644
index 0000000000..f5030495bf
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/lame.txt
@@ -0,0 +1,481 @@
+ GNU LIBRARY GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1991 Free Software Foundation, Inc.
+ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the library GPL. It is
+ numbered 2 because it goes with version 2 of the ordinary GPL.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Library General Public License, applies to some
+specially designated Free Software Foundation software, and to any
+other libraries whose authors decide to use it. You can use it for
+your libraries, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if
+you distribute copies of the library, or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link a program with the library, you must provide
+complete object files to the recipients so that they can relink them
+with the library, after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ Our method of protecting your rights has two steps: (1) copyright
+the library, and (2) offer you this license which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ Also, for each distributor's protection, we want to make certain
+that everyone understands that there is no warranty for this free
+library. If the library is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original
+version, so that any problems introduced by others will not reflect on
+the original authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that companies distributing free
+software will individually obtain patent licenses, thus in effect
+transforming the program into proprietary software. To prevent this,
+we have made it clear that any patent must be licensed for everyone's
+free use or not licensed at all.
+
+ Most GNU software, including some libraries, is covered by the ordinary
+GNU General Public License, which was designed for utility programs. This
+license, the GNU Library General Public License, applies to certain
+designated libraries. This license is quite different from the ordinary
+one; be sure to read it in full, and don't assume that anything in it is
+the same as in the ordinary license.
+
+ The reason we have a separate public license for some libraries is that
+they blur the distinction we usually make between modifying or adding to a
+program and simply using it. Linking a program with a library, without
+changing the library, is in some sense simply using the library, and is
+analogous to running a utility program or application program. However, in
+a textual and legal sense, the linked executable is a combined work, a
+derivative of the original library, and the ordinary General Public License
+treats it as such.
+
+ Because of this blurred distinction, using the ordinary General
+Public License for libraries did not effectively promote software
+sharing, because most developers did not use the libraries. We
+concluded that weaker conditions might promote sharing better.
+
+ However, unrestricted linking of non-free programs would deprive the
+users of those programs of all benefit from the free status of the
+libraries themselves. This Library General Public License is intended to
+permit developers of non-free programs to use free libraries, while
+preserving your freedom as a user of such programs to change the free
+libraries that are incorporated in them. (We have not seen how to achieve
+this as regards changes in header files, but we have achieved it as regards
+changes in the actual functions of the Library.) The hope is that this
+will lead to faster development of free libraries.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, while the latter only
+works together with the library.
+
+ Note that it is possible for a library to be covered by the ordinary
+General Public License rather than by this special one.
+
+ GNU LIBRARY GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library which
+contains a notice placed by the copyright holder or other authorized
+party saying it may be distributed under the terms of this Library
+General Public License (also called "this License"). Each licensee is
+addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also compile or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ c) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ d) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the source code distributed need not include anything that is normally
+distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Library General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ , 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
diff --git a/Externals/ffmpeg/dev/licenses/libass.txt b/Externals/ffmpeg/dev/licenses/libass.txt
new file mode 100644
index 0000000000..8351a30e3a
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/libass.txt
@@ -0,0 +1,11 @@
+Permission to use, copy, modify, and/or distribute this software for any
+purpose with or without fee is hereby granted, provided that the above
+copyright notice and this permission notice appear in all copies.
+
+THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
diff --git a/Externals/ffmpeg/dev/licenses/libbluray.txt b/Externals/ffmpeg/dev/licenses/libbluray.txt
new file mode 100644
index 0000000000..20fb9c7da2
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/libbluray.txt
@@ -0,0 +1,458 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
diff --git a/Externals/ffmpeg/dev/licenses/libbs2b.txt b/Externals/ffmpeg/dev/licenses/libbs2b.txt
new file mode 100644
index 0000000000..cbf26a0226
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/libbs2b.txt
@@ -0,0 +1,20 @@
+Copyright (c) 2005 Boris Mikhaylov
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice shall be
+included in all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/Externals/ffmpeg/dev/licenses/libcaca.txt b/Externals/ffmpeg/dev/licenses/libcaca.txt
new file mode 100644
index 0000000000..2978491d0f
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/libcaca.txt
@@ -0,0 +1,14 @@
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ Version 2, December 2004
+
+ Copyright (C) 2004 Sam Hocevar
+ 14 rue de Plaisance, 75014 Paris, France
+ Everyone is permitted to copy and distribute verbatim or modified
+ copies of this license document, and changing it is allowed as long
+ as the name is changed.
+
+ DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. You just DO WHAT THE FUCK YOU WANT TO.
+
diff --git a/Externals/ffmpeg/dev/licenses/libgsm.txt b/Externals/ffmpeg/dev/licenses/libgsm.txt
new file mode 100644
index 0000000000..28fbb3ce15
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/libgsm.txt
@@ -0,0 +1,35 @@
+Copyright 1992, 1993, 1994 by Jutta Degener and Carsten Bormann,
+Technische Universitaet Berlin
+
+Any use of this software is permitted provided that this notice is not
+removed and that neither the authors nor the Technische Universitaet Berlin
+are deemed to have made any representations as to the suitability of this
+software for any purpose nor are held responsible for any defects of
+this software. THERE IS ABSOLUTELY NO WARRANTY FOR THIS SOFTWARE.
+
+As a matter of courtesy, the authors request to be informed about uses
+this software has found, about bugs in this software, and about any
+improvements that may be of general interest.
+
+Berlin, 28.11.1994
+Jutta Degener
+Carsten Bormann
+
+ oOo
+
+Since the original terms of 15 years ago maybe do not make our
+intentions completely clear given today's refined usage of the legal
+terms, we append this additional permission:
+
+ Permission to use, copy, modify, and distribute this software
+ for any purpose with or without fee is hereby granted,
+ provided that this notice is not removed and that neither
+ the authors nor the Technische Universitaet Berlin are
+ deemed to have made any representations as to the suitability
+ of this software for any purpose nor are held responsible
+ for any defects of this software. THERE IS ABSOLUTELY NO
+ WARRANTY FOR THIS SOFTWARE.
+
+Berkeley/Bremen, 05.04.2009
+Jutta Degener
+Carsten Bormann
diff --git a/Externals/ffmpeg/dev/licenses/libiconv.txt b/Externals/ffmpeg/dev/licenses/libiconv.txt
new file mode 100644
index 0000000000..94a9ed024d
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/libiconv.txt
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see .
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ Copyright (C)
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+ .
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+.
diff --git a/Externals/ffmpeg/dev/licenses/libilbc.txt b/Externals/ffmpeg/dev/licenses/libilbc.txt
new file mode 100644
index 0000000000..4c41b7b251
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/libilbc.txt
@@ -0,0 +1,29 @@
+Copyright (c) 2011, The WebRTC project authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ * Neither the name of Google nor the names of its contributors may
+ be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Externals/ffmpeg/dev/licenses/libmodplug.txt b/Externals/ffmpeg/dev/licenses/libmodplug.txt
new file mode 100644
index 0000000000..59fbf826c3
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/libmodplug.txt
@@ -0,0 +1 @@
+ModPlug-XMMS and libmodplug are now in the public domain.
diff --git a/Externals/ffmpeg/dev/licenses/libtheora.txt b/Externals/ffmpeg/dev/licenses/libtheora.txt
new file mode 100644
index 0000000000..c8ccce4ffb
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/libtheora.txt
@@ -0,0 +1,28 @@
+Copyright (C) 2002-2009 Xiph.org Foundation
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+- Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+- Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+- Neither the name of the Xiph.org Foundation nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Externals/ffmpeg/dev/licenses/libvorbis.txt b/Externals/ffmpeg/dev/licenses/libvorbis.txt
new file mode 100644
index 0000000000..28de72a970
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/libvorbis.txt
@@ -0,0 +1,28 @@
+Copyright (c) 2002-2008 Xiph.org Foundation
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+- Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+- Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+- Neither the name of the Xiph.org Foundation nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Externals/ffmpeg/dev/licenses/libvpx.txt b/Externals/ffmpeg/dev/licenses/libvpx.txt
new file mode 100644
index 0000000000..1ce44343c4
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/libvpx.txt
@@ -0,0 +1,31 @@
+Copyright (c) 2010, The WebM Project authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ * Neither the name of Google, nor the WebM Project, nor the names
+ of its contributors may be used to endorse or promote products
+ derived from this software without specific prior written
+ permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/Externals/ffmpeg/dev/licenses/libwebp.txt b/Externals/ffmpeg/dev/licenses/libwebp.txt
new file mode 100644
index 0000000000..7a6f99547d
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/libwebp.txt
@@ -0,0 +1,30 @@
+Copyright (c) 2010, Google Inc. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+
+ * Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in
+ the documentation and/or other materials provided with the
+ distribution.
+
+ * Neither the name of Google nor the names of its contributors may
+ be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/Externals/ffmpeg/dev/licenses/opencore-amr.txt b/Externals/ffmpeg/dev/licenses/opencore-amr.txt
new file mode 100644
index 0000000000..5ec4bf01e7
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/opencore-amr.txt
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the
+copyright owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other
+entities that control, are controlled by, or are under common control with
+that entity. For the purposes of this definition, "control" means (i) the
+power, direct or indirect, to cause the direction or management of such
+entity, whether by contract or otherwise, or (ii) ownership of fifty
+percent (50%) or more of the outstanding shares, or (iii) beneficial
+ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications,
+including but not limited to software source code, documentation source,
+and configuration files.
+
+"Object" form shall mean any form resulting from mechanical transformation
+or translation of a Source form, including but not limited to compiled
+object code, generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form,
+made available under the License, as indicated by a copyright notice that
+is included in or attached to the work (an example is provided in the
+Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form,
+that is based on (or derived from) the Work and for which the editorial
+revisions, annotations, elaborations, or other modifications represent, as
+a whole, an original work of authorship. For the purposes of this License,
+Derivative Works shall not include works that remain separable from, or
+merely link (or bind by name) to the interfaces of, the Work and Derivative
+Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original
+version of the Work and any modifications or additions to that Work or
+Derivative Works thereof, that is intentionally submitted to Licensor for
+inclusion in the Work by the copyright owner or by an individual or Legal
+Entity authorized to submit on behalf of the copyright owner. For the
+purposes of this definition, "submitted" means any form of electronic,
+verbal, or written communication sent to the Licensor or its
+representatives, including but not limited to communication on electronic
+mailing lists, source code control systems, and issue tracking systems that
+are managed by, or on behalf of, the Licensor for the purpose of discussing
+and improving the Work, but excluding communication that is conspicuously
+marked or otherwise designated in writing by the copyright owner as "Not a
+Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on
+behalf of whom a Contribution has been received by Licensor and
+subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of this
+License, each Contributor hereby grants to You a perpetual, worldwide,
+non-exclusive, no-charge, royalty-free, irrevocable copyright license to
+reproduce, prepare Derivative Works of, publicly display, publicly perform,
+sublicense, and distribute the Work and such Derivative Works in Source or
+Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of this
+License, each Contributor hereby grants to You a perpetual, worldwide,
+non-exclusive, no-charge, royalty-free, irrevocable (except as stated in
+this section) patent license to make, have made, use, offer to sell, sell,
+import, and otherwise transfer the Work, where such license applies only to
+those patent claims licensable by such Contributor that are necessarily
+infringed by their Contribution(s) alone or by combination of their
+Contribution(s) with the Work to which such Contribution(s) was submitted.
+If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or
+contributory patent infringement, then any patent licenses granted to You
+under this License for that Work shall terminate as of the date such
+litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the Work or
+Derivative Works thereof in any medium, with or without modifications, and
+in Source or Object form, provided that You meet the following conditions:
+
+ 1. You must give any other recipients of the Work or Derivative Works a
+copy of this License; and
+
+ 2. You must cause any modified files to carry prominent notices stating
+that You changed the files; and
+
+ 3. You must retain, in the Source form of any Derivative Works that You
+distribute, all copyright, patent, trademark, and attribution notices from
+the Source form of the Work, excluding those notices that do not pertain to
+any part of the Derivative Works; and
+
+ 4. If the Work includes a "NOTICE" text file as part of its
+distribution, then any Derivative Works that You distribute must include a
+readable copy of the attribution notices contained within such NOTICE file,
+excluding those notices that do not pertain to any part of the Derivative
+Works, in at least one of the following places: within a NOTICE text file
+distributed as part of the Derivative Works; within the Source form or
+documentation, if provided along with the Derivative Works; or, within a
+display generated by the Derivative Works, if and wherever such third-party
+notices normally appear. The contents of the NOTICE file are for
+informational purposes only and do not modify the License. You may add Your
+own attribution notices within Derivative Works that You distribute,
+alongside or as an addendum to the NOTICE text from the Work, provided that
+such additional attribution notices cannot be construed as modifying the
+License.
+
+You may add Your own copyright statement to Your modifications and may
+provide additional or different license terms and conditions for use,
+reproduction, or distribution of Your modifications, or for any such
+Derivative Works as a whole, provided Your use, reproduction, and
+distribution of the Work otherwise complies with the conditions stated in
+this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise, any
+Contribution intentionally submitted for inclusion in the Work by You to
+the Licensor shall be under the terms and conditions of this License,
+without any additional terms or conditions. Notwithstanding the above,
+nothing herein shall supersede or modify the terms of any separate license
+agreement you may have executed with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+names, trademarks, service marks, or product names of the Licensor, except
+as required for reasonable and customary use in describing the origin of
+the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or agreed to
+in writing, Licensor provides the Work (and each Contributor provides its
+Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied, including, without limitation, any
+warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or
+FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for
+determining the appropriateness of using or redistributing the Work and
+assume any risks associated with Your exercise of permissions under this
+License.
+
+8. Limitation of Liability. In no event and under no legal theory, whether
+in tort (including negligence), contract, or otherwise, unless required by
+applicable law (such as deliberate and grossly negligent acts) or agreed to
+in writing, shall any Contributor be liable to You for damages, including
+any direct, indirect, special, incidental, or consequential damages of any
+character arising as a result of this License or out of the use or
+inability to use the Work (including but not limited to damages for loss of
+goodwill, work stoppage, computer failure or malfunction, or any and all
+other commercial damages or losses), even if such Contributor has been
+advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing the
+Work or Derivative Works thereof, You may choose to offer, and charge a fee
+for, acceptance of support, warranty, indemnity, or other liability
+obligations and/or rights consistent with this License. However, in
+accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if
+You agree to indemnify, defend, and hold each Contributor harmless for any
+liability incurred by, or claims asserted against, such Contributor by
+reason of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included
+on the same "printed page" as the copyright notice for easier
+identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain a
+ copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable
+ law or agreed to in writing, software distributed under the License is
+ distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the specific language
+ governing permissions and limitations under the License.
diff --git a/Externals/ffmpeg/dev/licenses/openjpeg.txt b/Externals/ffmpeg/dev/licenses/openjpeg.txt
new file mode 100644
index 0000000000..f578e33a30
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/openjpeg.txt
@@ -0,0 +1,34 @@
+/*
+ * Copyright (c) 2002-2012, Communications and Remote Sensing Laboratory, Universite catholique de Louvain (UCL), Belgium
+ * Copyright (c) 2002-2012, Professor Benoit Macq
+ * Copyright (c) 2003-2012, Antonin Descampe
+ * Copyright (c) 2003-2009, Francois-Olivier Devaux
+ * Copyright (c) 2005, Herve Drolon, FreeImage Team
+ * Copyright (c) 2002-2003, Yannick Verschueren
+ * Copyright (c) 2001-2003, David Janssens
+ * Copyright (c) 2011-2012, Centre National d'Etudes Spatiales (CNES), France
+ * Copyright (c) 2012, CS Systemes d'Information, France
+ *
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS `AS IS'
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
diff --git a/Externals/ffmpeg/dev/licenses/opus.txt b/Externals/ffmpeg/dev/licenses/opus.txt
new file mode 100644
index 0000000000..f4159e675a
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/opus.txt
@@ -0,0 +1,44 @@
+Copyright 2001-2011 Xiph.Org, Skype Limited, Octasic,
+ Jean-Marc Valin, Timothy B. Terriberry,
+ CSIRO, Gregory Maxwell, Mark Borgerding,
+ Erik de Castro Lopo
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+- Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+- Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+- Neither the name of Internet Society, IETF or IETF Trust, nor the
+names of specific contributors, may be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
+OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Opus is subject to the royalty-free patent licenses which are
+specified at:
+
+Xiph.Org Foundation:
+https://datatracker.ietf.org/ipr/1524/
+
+Microsoft Corporation:
+https://datatracker.ietf.org/ipr/1914/
+
+Broadcom Corporation:
+https://datatracker.ietf.org/ipr/1526/
diff --git a/Externals/ffmpeg/dev/licenses/rtmpdump.txt b/Externals/ffmpeg/dev/licenses/rtmpdump.txt
new file mode 100644
index 0000000000..d511905c16
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/rtmpdump.txt
@@ -0,0 +1,339 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License along
+ with this program; if not, write to the Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ , 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/Externals/ffmpeg/dev/licenses/schroedinger.txt b/Externals/ffmpeg/dev/licenses/schroedinger.txt
new file mode 100644
index 0000000000..8a68a0d959
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/schroedinger.txt
@@ -0,0 +1,467 @@
+ GNU LIBRARY GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1991 Free Software Foundation, Inc.
+ 675 Mass Ave, Cambridge, MA 02139, USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the library GPL. It is
+ numbered 2 because it goes with version 2 of the ordinary GPL.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Library General Public License, applies to some
+specially designated Free Software Foundation software, and to any
+other libraries whose authors decide to use it. You can use it for
+your libraries, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if
+you distribute copies of the library, or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link a program with the library, you must provide
+complete object files to the recipients so that they can relink them
+with the library, after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ Our method of protecting your rights has two steps: (1) copyright
+the library, and (2) offer you this license which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ Also, for each distributor's protection, we want to make certain
+that everyone understands that there is no warranty for this free
+library. If the library is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original
+version, so that any problems introduced by others will not reflect on
+the original authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that companies distributing free
+software will individually obtain patent licenses, thus in effect
+transforming the program into proprietary software. To prevent this,
+we have made it clear that any patent must be licensed for everyone's
+free use or not licensed at all.
+
+ Most GNU software, including some libraries, is covered by the ordinary
+GNU General Public License, which was designed for utility programs. This
+license, the GNU Library General Public License, applies to certain
+designated libraries. This license is quite different from the ordinary
+one; be sure to read it in full, and don't assume that anything in it is
+the same as in the ordinary license.
+
+ The reason we have a separate public license for some libraries is that
+they blur the distinction we usually make between modifying or adding to a
+program and simply using it. Linking a program with a library, without
+changing the library, is in some sense simply using the library, and is
+analogous to running a utility program or application program. However, in
+a textual and legal sense, the linked executable is a combined work, a
+derivative of the original library, and the ordinary General Public License
+treats it as such.
+
+ Because of this blurred distinction, using the ordinary General
+Public License for libraries did not effectively promote software
+sharing, because most developers did not use the libraries. We
+concluded that weaker conditions might promote sharing better.
+
+ However, unrestricted linking of non-free programs would deprive the
+users of those programs of all benefit from the free status of the
+libraries themselves. This Library General Public License is intended to
+permit developers of non-free programs to use free libraries, while
+preserving your freedom as a user of such programs to change the free
+libraries that are incorporated in them. (We have not seen how to achieve
+this as regards changes in header files, but we have achieved it as regards
+changes in the actual functions of the Library.) The hope is that this
+will lead to faster development of free libraries.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, while the latter only
+works together with the library.
+
+ Note that it is possible for a library to be covered by the ordinary
+General Public License rather than by this special one.
+
+ GNU LIBRARY GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library which
+contains a notice placed by the copyright holder or other authorized
+party saying it may be distributed under the terms of this Library
+General Public License (also called "this License"). Each licensee is
+addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also compile or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ c) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ d) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the source code distributed need not include anything that is normally
+distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Library General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ Appendix: How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Library General Public
+ License as published by the Free Software Foundation; either
+ version 2 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Library General Public License for more details.
+
+ You should have received a copy of the GNU Library General Public
+ License along with this library; if not, write to the Free
+ Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
diff --git a/Externals/ffmpeg/dev/licenses/soxr.txt b/Externals/ffmpeg/dev/licenses/soxr.txt
new file mode 100644
index 0000000000..1c618785e9
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/soxr.txt
@@ -0,0 +1,24 @@
+SoX Resampler Library Copyright (c) 2007-13 robs@users.sourceforge.net
+
+This library is free software; you can redistribute it and/or modify it
+under the terms of the GNU Lesser General Public License as published by
+the Free Software Foundation; either version 2.1 of the License, or (at
+your option) any later version.
+
+This library is distributed in the hope that it will be useful, but
+WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser
+General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public License
+along with this library; if not, write to the Free Software Foundation,
+Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+
+Notes
+
+1. Re software in the `examples' directory: works that are not resampling
+examples but are based on the given examples -- for example, applications using
+the library -- shall not be considered to be derivative works of the examples.
+
+2. If building with pffft.c, see the licence embedded in that file.
diff --git a/Externals/ffmpeg/dev/licenses/speex.txt b/Externals/ffmpeg/dev/licenses/speex.txt
new file mode 100644
index 0000000000..de6fbe2c91
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/speex.txt
@@ -0,0 +1,35 @@
+Copyright 2002-2008 Xiph.org Foundation
+Copyright 2002-2008 Jean-Marc Valin
+Copyright 2005-2007 Analog Devices Inc.
+Copyright 2005-2008 Commonwealth Scientific and Industrial Research
+ Organisation (CSIRO)
+Copyright 1993, 2002, 2006 David Rowe
+Copyright 2003 EpicGames
+Copyright 1992-1994 Jutta Degener, Carsten Bormann
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+
+- Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+
+- Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+
+- Neither the name of the Xiph.org Foundation nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
+CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Externals/ffmpeg/dev/licenses/twolame.txt b/Externals/ffmpeg/dev/licenses/twolame.txt
new file mode 100644
index 0000000000..b1e3f5a263
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/twolame.txt
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ , 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/Externals/ffmpeg/dev/licenses/vid.stab.txt b/Externals/ffmpeg/dev/licenses/vid.stab.txt
new file mode 100644
index 0000000000..a09e1dc74d
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/vid.stab.txt
@@ -0,0 +1,16 @@
+In this project is open source in the sense of the GPL.
+
+ * This program is free software; you can redistribute it and/or modify *
+ * it under the terms of the GNU General Public License as published by *
+ * the Free Software Foundation; either version 2 of the License, or *
+ * (at your option) any later version. *
+ * *
+ * You should have received a copy of the GNU General Public License *
+ * along with this program; if not, write to the *
+ * Free Software Foundation, Inc., *
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
+ * *
+ * This program is distributed in the hope that it will be useful, *
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of *
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the *
+ * GNU General Public License for more details. *
diff --git a/Externals/ffmpeg/dev/licenses/vo-aacenc.txt b/Externals/ffmpeg/dev/licenses/vo-aacenc.txt
new file mode 100644
index 0000000000..5ec4bf01e7
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/vo-aacenc.txt
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the
+copyright owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other
+entities that control, are controlled by, or are under common control with
+that entity. For the purposes of this definition, "control" means (i) the
+power, direct or indirect, to cause the direction or management of such
+entity, whether by contract or otherwise, or (ii) ownership of fifty
+percent (50%) or more of the outstanding shares, or (iii) beneficial
+ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications,
+including but not limited to software source code, documentation source,
+and configuration files.
+
+"Object" form shall mean any form resulting from mechanical transformation
+or translation of a Source form, including but not limited to compiled
+object code, generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form,
+made available under the License, as indicated by a copyright notice that
+is included in or attached to the work (an example is provided in the
+Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form,
+that is based on (or derived from) the Work and for which the editorial
+revisions, annotations, elaborations, or other modifications represent, as
+a whole, an original work of authorship. For the purposes of this License,
+Derivative Works shall not include works that remain separable from, or
+merely link (or bind by name) to the interfaces of, the Work and Derivative
+Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original
+version of the Work and any modifications or additions to that Work or
+Derivative Works thereof, that is intentionally submitted to Licensor for
+inclusion in the Work by the copyright owner or by an individual or Legal
+Entity authorized to submit on behalf of the copyright owner. For the
+purposes of this definition, "submitted" means any form of electronic,
+verbal, or written communication sent to the Licensor or its
+representatives, including but not limited to communication on electronic
+mailing lists, source code control systems, and issue tracking systems that
+are managed by, or on behalf of, the Licensor for the purpose of discussing
+and improving the Work, but excluding communication that is conspicuously
+marked or otherwise designated in writing by the copyright owner as "Not a
+Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on
+behalf of whom a Contribution has been received by Licensor and
+subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of this
+License, each Contributor hereby grants to You a perpetual, worldwide,
+non-exclusive, no-charge, royalty-free, irrevocable copyright license to
+reproduce, prepare Derivative Works of, publicly display, publicly perform,
+sublicense, and distribute the Work and such Derivative Works in Source or
+Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of this
+License, each Contributor hereby grants to You a perpetual, worldwide,
+non-exclusive, no-charge, royalty-free, irrevocable (except as stated in
+this section) patent license to make, have made, use, offer to sell, sell,
+import, and otherwise transfer the Work, where such license applies only to
+those patent claims licensable by such Contributor that are necessarily
+infringed by their Contribution(s) alone or by combination of their
+Contribution(s) with the Work to which such Contribution(s) was submitted.
+If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or
+contributory patent infringement, then any patent licenses granted to You
+under this License for that Work shall terminate as of the date such
+litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the Work or
+Derivative Works thereof in any medium, with or without modifications, and
+in Source or Object form, provided that You meet the following conditions:
+
+ 1. You must give any other recipients of the Work or Derivative Works a
+copy of this License; and
+
+ 2. You must cause any modified files to carry prominent notices stating
+that You changed the files; and
+
+ 3. You must retain, in the Source form of any Derivative Works that You
+distribute, all copyright, patent, trademark, and attribution notices from
+the Source form of the Work, excluding those notices that do not pertain to
+any part of the Derivative Works; and
+
+ 4. If the Work includes a "NOTICE" text file as part of its
+distribution, then any Derivative Works that You distribute must include a
+readable copy of the attribution notices contained within such NOTICE file,
+excluding those notices that do not pertain to any part of the Derivative
+Works, in at least one of the following places: within a NOTICE text file
+distributed as part of the Derivative Works; within the Source form or
+documentation, if provided along with the Derivative Works; or, within a
+display generated by the Derivative Works, if and wherever such third-party
+notices normally appear. The contents of the NOTICE file are for
+informational purposes only and do not modify the License. You may add Your
+own attribution notices within Derivative Works that You distribute,
+alongside or as an addendum to the NOTICE text from the Work, provided that
+such additional attribution notices cannot be construed as modifying the
+License.
+
+You may add Your own copyright statement to Your modifications and may
+provide additional or different license terms and conditions for use,
+reproduction, or distribution of Your modifications, or for any such
+Derivative Works as a whole, provided Your use, reproduction, and
+distribution of the Work otherwise complies with the conditions stated in
+this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise, any
+Contribution intentionally submitted for inclusion in the Work by You to
+the Licensor shall be under the terms and conditions of this License,
+without any additional terms or conditions. Notwithstanding the above,
+nothing herein shall supersede or modify the terms of any separate license
+agreement you may have executed with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+names, trademarks, service marks, or product names of the Licensor, except
+as required for reasonable and customary use in describing the origin of
+the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or agreed to
+in writing, Licensor provides the Work (and each Contributor provides its
+Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied, including, without limitation, any
+warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or
+FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for
+determining the appropriateness of using or redistributing the Work and
+assume any risks associated with Your exercise of permissions under this
+License.
+
+8. Limitation of Liability. In no event and under no legal theory, whether
+in tort (including negligence), contract, or otherwise, unless required by
+applicable law (such as deliberate and grossly negligent acts) or agreed to
+in writing, shall any Contributor be liable to You for damages, including
+any direct, indirect, special, incidental, or consequential damages of any
+character arising as a result of this License or out of the use or
+inability to use the Work (including but not limited to damages for loss of
+goodwill, work stoppage, computer failure or malfunction, or any and all
+other commercial damages or losses), even if such Contributor has been
+advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing the
+Work or Derivative Works thereof, You may choose to offer, and charge a fee
+for, acceptance of support, warranty, indemnity, or other liability
+obligations and/or rights consistent with this License. However, in
+accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if
+You agree to indemnify, defend, and hold each Contributor harmless for any
+liability incurred by, or claims asserted against, such Contributor by
+reason of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included
+on the same "printed page" as the copyright notice for easier
+identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain a
+ copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable
+ law or agreed to in writing, software distributed under the License is
+ distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the specific language
+ governing permissions and limitations under the License.
diff --git a/Externals/ffmpeg/dev/licenses/vo-amrwbenc.txt b/Externals/ffmpeg/dev/licenses/vo-amrwbenc.txt
new file mode 100644
index 0000000000..5ec4bf01e7
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/vo-amrwbenc.txt
@@ -0,0 +1,191 @@
+Apache License
+Version 2.0, January 2004
+http://www.apache.org/licenses/
+
+TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+1. Definitions.
+
+"License" shall mean the terms and conditions for use, reproduction, and
+distribution as defined by Sections 1 through 9 of this document.
+
+"Licensor" shall mean the copyright owner or entity authorized by the
+copyright owner that is granting the License.
+
+"Legal Entity" shall mean the union of the acting entity and all other
+entities that control, are controlled by, or are under common control with
+that entity. For the purposes of this definition, "control" means (i) the
+power, direct or indirect, to cause the direction or management of such
+entity, whether by contract or otherwise, or (ii) ownership of fifty
+percent (50%) or more of the outstanding shares, or (iii) beneficial
+ownership of such entity.
+
+"You" (or "Your") shall mean an individual or Legal Entity exercising
+permissions granted by this License.
+
+"Source" form shall mean the preferred form for making modifications,
+including but not limited to software source code, documentation source,
+and configuration files.
+
+"Object" form shall mean any form resulting from mechanical transformation
+or translation of a Source form, including but not limited to compiled
+object code, generated documentation, and conversions to other media types.
+
+"Work" shall mean the work of authorship, whether in Source or Object form,
+made available under the License, as indicated by a copyright notice that
+is included in or attached to the work (an example is provided in the
+Appendix below).
+
+"Derivative Works" shall mean any work, whether in Source or Object form,
+that is based on (or derived from) the Work and for which the editorial
+revisions, annotations, elaborations, or other modifications represent, as
+a whole, an original work of authorship. For the purposes of this License,
+Derivative Works shall not include works that remain separable from, or
+merely link (or bind by name) to the interfaces of, the Work and Derivative
+Works thereof.
+
+"Contribution" shall mean any work of authorship, including the original
+version of the Work and any modifications or additions to that Work or
+Derivative Works thereof, that is intentionally submitted to Licensor for
+inclusion in the Work by the copyright owner or by an individual or Legal
+Entity authorized to submit on behalf of the copyright owner. For the
+purposes of this definition, "submitted" means any form of electronic,
+verbal, or written communication sent to the Licensor or its
+representatives, including but not limited to communication on electronic
+mailing lists, source code control systems, and issue tracking systems that
+are managed by, or on behalf of, the Licensor for the purpose of discussing
+and improving the Work, but excluding communication that is conspicuously
+marked or otherwise designated in writing by the copyright owner as "Not a
+Contribution."
+
+"Contributor" shall mean Licensor and any individual or Legal Entity on
+behalf of whom a Contribution has been received by Licensor and
+subsequently incorporated within the Work.
+
+2. Grant of Copyright License. Subject to the terms and conditions of this
+License, each Contributor hereby grants to You a perpetual, worldwide,
+non-exclusive, no-charge, royalty-free, irrevocable copyright license to
+reproduce, prepare Derivative Works of, publicly display, publicly perform,
+sublicense, and distribute the Work and such Derivative Works in Source or
+Object form.
+
+3. Grant of Patent License. Subject to the terms and conditions of this
+License, each Contributor hereby grants to You a perpetual, worldwide,
+non-exclusive, no-charge, royalty-free, irrevocable (except as stated in
+this section) patent license to make, have made, use, offer to sell, sell,
+import, and otherwise transfer the Work, where such license applies only to
+those patent claims licensable by such Contributor that are necessarily
+infringed by their Contribution(s) alone or by combination of their
+Contribution(s) with the Work to which such Contribution(s) was submitted.
+If You institute patent litigation against any entity (including a
+cross-claim or counterclaim in a lawsuit) alleging that the Work or a
+Contribution incorporated within the Work constitutes direct or
+contributory patent infringement, then any patent licenses granted to You
+under this License for that Work shall terminate as of the date such
+litigation is filed.
+
+4. Redistribution. You may reproduce and distribute copies of the Work or
+Derivative Works thereof in any medium, with or without modifications, and
+in Source or Object form, provided that You meet the following conditions:
+
+ 1. You must give any other recipients of the Work or Derivative Works a
+copy of this License; and
+
+ 2. You must cause any modified files to carry prominent notices stating
+that You changed the files; and
+
+ 3. You must retain, in the Source form of any Derivative Works that You
+distribute, all copyright, patent, trademark, and attribution notices from
+the Source form of the Work, excluding those notices that do not pertain to
+any part of the Derivative Works; and
+
+ 4. If the Work includes a "NOTICE" text file as part of its
+distribution, then any Derivative Works that You distribute must include a
+readable copy of the attribution notices contained within such NOTICE file,
+excluding those notices that do not pertain to any part of the Derivative
+Works, in at least one of the following places: within a NOTICE text file
+distributed as part of the Derivative Works; within the Source form or
+documentation, if provided along with the Derivative Works; or, within a
+display generated by the Derivative Works, if and wherever such third-party
+notices normally appear. The contents of the NOTICE file are for
+informational purposes only and do not modify the License. You may add Your
+own attribution notices within Derivative Works that You distribute,
+alongside or as an addendum to the NOTICE text from the Work, provided that
+such additional attribution notices cannot be construed as modifying the
+License.
+
+You may add Your own copyright statement to Your modifications and may
+provide additional or different license terms and conditions for use,
+reproduction, or distribution of Your modifications, or for any such
+Derivative Works as a whole, provided Your use, reproduction, and
+distribution of the Work otherwise complies with the conditions stated in
+this License.
+
+5. Submission of Contributions. Unless You explicitly state otherwise, any
+Contribution intentionally submitted for inclusion in the Work by You to
+the Licensor shall be under the terms and conditions of this License,
+without any additional terms or conditions. Notwithstanding the above,
+nothing herein shall supersede or modify the terms of any separate license
+agreement you may have executed with Licensor regarding such Contributions.
+
+6. Trademarks. This License does not grant permission to use the trade
+names, trademarks, service marks, or product names of the Licensor, except
+as required for reasonable and customary use in describing the origin of
+the Work and reproducing the content of the NOTICE file.
+
+7. Disclaimer of Warranty. Unless required by applicable law or agreed to
+in writing, Licensor provides the Work (and each Contributor provides its
+Contributions) on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied, including, without limitation, any
+warranties or conditions of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or
+FITNESS FOR A PARTICULAR PURPOSE. You are solely responsible for
+determining the appropriateness of using or redistributing the Work and
+assume any risks associated with Your exercise of permissions under this
+License.
+
+8. Limitation of Liability. In no event and under no legal theory, whether
+in tort (including negligence), contract, or otherwise, unless required by
+applicable law (such as deliberate and grossly negligent acts) or agreed to
+in writing, shall any Contributor be liable to You for damages, including
+any direct, indirect, special, incidental, or consequential damages of any
+character arising as a result of this License or out of the use or
+inability to use the Work (including but not limited to damages for loss of
+goodwill, work stoppage, computer failure or malfunction, or any and all
+other commercial damages or losses), even if such Contributor has been
+advised of the possibility of such damages.
+
+9. Accepting Warranty or Additional Liability. While redistributing the
+Work or Derivative Works thereof, You may choose to offer, and charge a fee
+for, acceptance of support, warranty, indemnity, or other liability
+obligations and/or rights consistent with this License. However, in
+accepting such obligations, You may act only on Your own behalf and on Your
+sole responsibility, not on behalf of any other Contributor, and only if
+You agree to indemnify, defend, and hold each Contributor harmless for any
+liability incurred by, or claims asserted against, such Contributor by
+reason of your accepting any such warranty or additional liability.
+
+END OF TERMS AND CONDITIONS
+
+APPENDIX: How to apply the Apache License to your work
+
+To apply the Apache License to your work, attach the following boilerplate
+notice, with the fields enclosed by brackets "[]" replaced with your own
+identifying information. (Don't include the brackets!) The text should be
+enclosed in the appropriate comment syntax for the file format. We also
+recommend that a file or class name and description of purpose be included
+on the same "printed page" as the copyright notice for easier
+identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License"); you may
+ not use this file except in compliance with the License. You may obtain a
+ copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable
+ law or agreed to in writing, software distributed under the License is
+ distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ KIND, either express or implied. See the License for the specific language
+ governing permissions and limitations under the License.
diff --git a/Externals/ffmpeg/dev/licenses/wavpack.txt b/Externals/ffmpeg/dev/licenses/wavpack.txt
new file mode 100644
index 0000000000..6ffc23b932
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/wavpack.txt
@@ -0,0 +1,25 @@
+ Copyright (c) 1998 - 2009 Conifer Software
+ All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+ * Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+ * Neither the name of Conifer Software nor the names of its contributors
+ may be used to endorse or promote products derived from this software
+ without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Externals/ffmpeg/dev/licenses/x264.txt b/Externals/ffmpeg/dev/licenses/x264.txt
new file mode 100644
index 0000000000..d60c31a97a
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/x264.txt
@@ -0,0 +1,340 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ , 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
diff --git a/Externals/ffmpeg/dev/licenses/x265.txt b/Externals/ffmpeg/dev/licenses/x265.txt
new file mode 100644
index 0000000000..18c946f707
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/x265.txt
@@ -0,0 +1,343 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+ 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users. This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it. (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.) You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+ To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have. You must make sure that they, too, receive or can get the
+source code. And you must show them these terms so they know their
+rights.
+
+ We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+ Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software. If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+ Finally, any free program is threatened constantly by software
+patents. We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary. To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ GNU GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License. The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language. (Hereinafter, translation is included without limitation in
+the term "modification".) Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+ 1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+ 2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) You must cause the modified files to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ b) You must cause any work that you distribute or publish, that in
+ whole or in part contains or is derived from the Program or any
+ part thereof, to be licensed as a whole at no charge to all third
+ parties under the terms of this License.
+
+ c) If the modified program normally reads commands interactively
+ when run, you must cause it, when started running for such
+ interactive use in the most ordinary way, to print or display an
+ announcement including an appropriate copyright notice and a
+ notice that there is no warranty (or else, saying that you provide
+ a warranty) and that users may redistribute the program under
+ these conditions, and telling the user how to view a copy of this
+ License. (Exception: if the Program itself is interactive but
+ does not normally print such an announcement, your work based on
+ the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+ a) Accompany it with the complete corresponding machine-readable
+ source code, which must be distributed under the terms of Sections
+ 1 and 2 above on a medium customarily used for software interchange; or,
+
+ b) Accompany it with a written offer, valid for at least three
+ years, to give any third party, for a charge no more than your
+ cost of physically performing source distribution, a complete
+ machine-readable copy of the corresponding source code, to be
+ distributed under the terms of Sections 1 and 2 above on a medium
+ customarily used for software interchange; or,
+
+ c) Accompany it with the information you received as to the offer
+ to distribute corresponding source code. (This alternative is
+ allowed only for noncommercial distribution and only if you
+ received the program in object code or executable form with such
+ an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it. For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable. However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License. Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+ 5. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Program or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+ 6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+ 7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all. For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded. In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+ 9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation. If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+ 10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission. For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this. Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+ NO WARRANTY
+
+ 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+ 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+ Gnomovision version 69, Copyright (C) year name of author
+ Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+ `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+ , 1 April 1989
+ Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs. If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library. If this is what you want to do, use the GNU Library General
+Public License instead of this License.
+
+This program is also available under a commercial proprietary license.
+For more information, contact us at licensing@multicorewareinc.com.
diff --git a/Externals/ffmpeg/dev/licenses/xavs.txt b/Externals/ffmpeg/dev/licenses/xavs.txt
new file mode 100644
index 0000000000..94a9ed024d
--- /dev/null
+++ b/Externals/ffmpeg/dev/licenses/xavs.txt
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+
+ Copyright (C)