ffmpeg-2.8.5

git-svn-id: svn://kolibrios.org@6147 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge)
2016-02-05 22:08:02 +00:00
parent a08f61ddb9
commit a4b787f4b8
5429 changed files with 1356786 additions and 0 deletions

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,184 @@
LIBRARIES-$(CONFIG_AVUTIL) += libavutil
LIBRARIES-$(CONFIG_SWSCALE) += libswscale
LIBRARIES-$(CONFIG_SWRESAMPLE) += libswresample
LIBRARIES-$(CONFIG_AVCODEC) += libavcodec
LIBRARIES-$(CONFIG_AVFORMAT) += libavformat
LIBRARIES-$(CONFIG_AVDEVICE) += libavdevice
LIBRARIES-$(CONFIG_AVFILTER) += libavfilter
COMPONENTS-$(CONFIG_AVUTIL) += ffmpeg-utils
COMPONENTS-$(CONFIG_SWSCALE) += ffmpeg-scaler
COMPONENTS-$(CONFIG_SWRESAMPLE) += ffmpeg-resampler
COMPONENTS-$(CONFIG_AVCODEC) += ffmpeg-codecs ffmpeg-bitstream-filters
COMPONENTS-$(CONFIG_AVFORMAT) += ffmpeg-formats ffmpeg-protocols
COMPONENTS-$(CONFIG_AVDEVICE) += ffmpeg-devices
COMPONENTS-$(CONFIG_AVFILTER) += ffmpeg-filters
MANPAGES1 = $(AVPROGS-yes:%=doc/%.1) $(AVPROGS-yes:%=doc/%-all.1) $(COMPONENTS-yes:%=doc/%.1)
MANPAGES3 = $(LIBRARIES-yes:%=doc/%.3)
MANPAGES = $(MANPAGES1) $(MANPAGES3)
PODPAGES = $(AVPROGS-yes:%=doc/%.pod) $(AVPROGS-yes:%=doc/%-all.pod) $(COMPONENTS-yes:%=doc/%.pod) $(LIBRARIES-yes:%=doc/%.pod)
HTMLPAGES = $(AVPROGS-yes:%=doc/%.html) $(AVPROGS-yes:%=doc/%-all.html) $(COMPONENTS-yes:%=doc/%.html) $(LIBRARIES-yes:%=doc/%.html) \
doc/developer.html \
doc/faq.html \
doc/fate.html \
doc/general.html \
doc/git-howto.html \
doc/nut.html \
doc/platform.html \
TXTPAGES = doc/fate.txt \
DOCS-$(CONFIG_HTMLPAGES) += $(HTMLPAGES)
DOCS-$(CONFIG_PODPAGES) += $(PODPAGES)
DOCS-$(CONFIG_MANPAGES) += $(MANPAGES)
DOCS-$(CONFIG_TXTPAGES) += $(TXTPAGES)
DOCS = $(DOCS-yes)
DOC_EXAMPLES-$(CONFIG_AVIO_DIR_CMD_EXAMPLE) += avio_dir_cmd
DOC_EXAMPLES-$(CONFIG_AVIO_READING_EXAMPLE) += avio_reading
DOC_EXAMPLES-$(CONFIG_AVCODEC_EXAMPLE) += avcodec
DOC_EXAMPLES-$(CONFIG_DECODING_ENCODING_EXAMPLE) += decoding_encoding
DOC_EXAMPLES-$(CONFIG_DEMUXING_DECODING_EXAMPLE) += demuxing_decoding
DOC_EXAMPLES-$(CONFIG_EXTRACT_MVS_EXAMPLE) += extract_mvs
DOC_EXAMPLES-$(CONFIG_FILTER_AUDIO_EXAMPLE) += filter_audio
DOC_EXAMPLES-$(CONFIG_FILTERING_AUDIO_EXAMPLE) += filtering_audio
DOC_EXAMPLES-$(CONFIG_FILTERING_VIDEO_EXAMPLE) += filtering_video
DOC_EXAMPLES-$(CONFIG_METADATA_EXAMPLE) += metadata
DOC_EXAMPLES-$(CONFIG_MUXING_EXAMPLE) += muxing
DOC_EXAMPLES-$(CONFIG_QSVDEC_EXAMPLE) += qsvdec
DOC_EXAMPLES-$(CONFIG_REMUXING_EXAMPLE) += remuxing
DOC_EXAMPLES-$(CONFIG_RESAMPLING_AUDIO_EXAMPLE) += resampling_audio
DOC_EXAMPLES-$(CONFIG_SCALING_VIDEO_EXAMPLE) += scaling_video
DOC_EXAMPLES-$(CONFIG_TRANSCODE_AAC_EXAMPLE) += transcode_aac
DOC_EXAMPLES-$(CONFIG_TRANSCODING_EXAMPLE) += transcoding
ALL_DOC_EXAMPLES_LIST = $(DOC_EXAMPLES-) $(DOC_EXAMPLES-yes)
DOC_EXAMPLES := $(DOC_EXAMPLES-yes:%=doc/examples/%$(PROGSSUF)$(EXESUF))
ALL_DOC_EXAMPLES := $(ALL_DOC_EXAMPLES_LIST:%=doc/examples/%$(PROGSSUF)$(EXESUF))
ALL_DOC_EXAMPLES_G := $(ALL_DOC_EXAMPLES_LIST:%=doc/examples/%$(PROGSSUF)_g$(EXESUF))
PROGS += $(DOC_EXAMPLES)
all-$(CONFIG_DOC): doc
doc: documentation
apidoc: doc/doxy/html
documentation: $(DOCS)
examples: $(DOC_EXAMPLES)
TEXIDEP = perl $(SRC_PATH)/doc/texidep.pl $(SRC_PATH) $< $@ >$(@:%=%.d)
doc/%.txt: TAG = TXT
doc/%.txt: doc/%.texi
$(Q)$(TEXIDEP)
$(M)makeinfo --force --no-headers -o $@ $< 2>/dev/null
GENTEXI = format codec
GENTEXI := $(GENTEXI:%=doc/avoptions_%.texi)
$(GENTEXI): TAG = GENTEXI
$(GENTEXI): doc/avoptions_%.texi: doc/print_options$(HOSTEXESUF)
$(M)doc/print_options $* > $@
doc/%.html: TAG = HTML
doc/%-all.html: TAG = HTML
ifdef HAVE_MAKEINFO_HTML
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.pm $(GENTEXI)
$(Q)$(TEXIDEP)
$(M)makeinfo --html -I doc --no-split -D config-not-all --init-file=$(SRC_PATH)/doc/t2h.pm --output $@ $<
doc/%-all.html: doc/%.texi $(SRC_PATH)/doc/t2h.pm $(GENTEXI)
$(Q)$(TEXIDEP)
$(M)makeinfo --html -I doc --no-split -D config-all --init-file=$(SRC_PATH)/doc/t2h.pm --output $@ $<
else
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
$(Q)$(TEXIDEP)
$(M)texi2html -I doc -monolithic --D=config-not-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
doc/%-all.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
$(Q)$(TEXIDEP)
$(M)texi2html -I doc -monolithic --D=config-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
endif
doc/%.pod: TAG = POD
doc/%.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
$(Q)$(TEXIDEP)
$(M)perl $(SRC_PATH)/doc/texi2pod.pl -Dconfig-not-all=yes -Idoc $< $@
doc/%-all.pod: TAG = POD
doc/%-all.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
$(Q)$(TEXIDEP)
$(M)perl $(SRC_PATH)/doc/texi2pod.pl -Dconfig-all=yes -Idoc $< $@
doc/%.1 doc/%.3: TAG = MAN
doc/%.1: doc/%.pod $(GENTEXI)
$(M)pod2man --section=1 --center=" " --release=" " --date=" " $< > $@
doc/%.3: doc/%.pod $(GENTEXI)
$(M)pod2man --section=3 --center=" " --release=" " --date=" " $< > $@
$(DOCS) doc/doxy/html: | doc/
$(DOC_EXAMPLES:%$(EXESUF)=%.o): | doc/examples
OBJDIRS += doc/examples
DOXY_INPUT = $(addprefix $(SRC_PATH)/, $(INSTHEADERS) $(DOC_EXAMPLES:%$(EXESUF)=%.c) $(LIB_EXAMPLES:%$(EXESUF)=%.c))
doc/doxy/html: TAG = DOXY
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(SRC_PATH)/doc/doxy-wrapper.sh $(DOXY_INPUT)
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $(SRC_PATH) $< $(DOXYGEN) $(DOXY_INPUT)
install-doc: install-html install-man
install-html:
install-man:
ifdef CONFIG_HTMLPAGES
install-progs-$(CONFIG_DOC): install-html
install-html: $(HTMLPAGES)
$(Q)mkdir -p "$(DOCDIR)"
$(INSTALL) -m 644 $(HTMLPAGES) "$(DOCDIR)"
endif
ifdef CONFIG_MANPAGES
install-progs-$(CONFIG_DOC): install-man
install-man: $(MANPAGES)
$(Q)mkdir -p "$(MANDIR)/man1"
$(INSTALL) -m 644 $(MANPAGES1) "$(MANDIR)/man1"
$(Q)mkdir -p "$(MANDIR)/man3"
$(INSTALL) -m 644 $(MANPAGES3) "$(MANDIR)/man3"
endif
uninstall: uninstall-doc
uninstall-doc: uninstall-html uninstall-man
uninstall-html:
$(RM) -r "$(DOCDIR)"
uninstall-man:
$(RM) $(addprefix "$(MANDIR)/man1/",$(AVPROGS-yes:%=%.1) $(AVPROGS-yes:%=%-all.1) $(COMPONENTS-yes:%=%.1))
$(RM) $(addprefix "$(MANDIR)/man3/",$(LIBRARIES-yes:%=%.3))
clean:: docclean
distclean:: docclean
$(RM) doc/config.texi
examplesclean:
$(RM) $(ALL_DOC_EXAMPLES) $(ALL_DOC_EXAMPLES_G)
$(RM) $(CLEANSUFFIXES:%=doc/examples/%)
docclean: examplesclean
$(RM) $(CLEANSUFFIXES:%=doc/%)
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 doc/*.3 doc/avoptions_*.texi
$(RM) -r doc/doxy/html
-include $(wildcard $(DOCS:%=%.d))
.PHONY: apidoc doc documentation

View File

@@ -0,0 +1,11 @@
@chapter Authors
The FFmpeg developers.
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
@command{git log} in the FFmpeg source directory, or browsing the
online repository at @url{http://source.ffmpeg.org}.
Maintainers for the specific components are listed in the file
@file{MAINTAINERS} in the source code tree.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,188 @@
@c DO NOT EDIT THIS FILE!
@c It was generated by print_options.
@section Format AVOptions
@table @option
@item -avioflags @var{flags} (@emph{input/output})
Possible values:
@table @samp
@item direct
reduce buffering
@end table
@item -probesize @var{integer} (@emph{input})
set probing size
@item -formatprobesize @var{integer} (@emph{input})
number of bytes to probe file format
@item -packetsize @var{integer} (@emph{output})
set packet size
@item -fflags @var{flags} (@emph{input/output})
Possible values:
@table @samp
@item flush_packets
reduce the latency by flushing out packets immediately
@item ignidx
ignore index
@item genpts
generate pts
@item nofillin
do not fill in missing values that can be exactly calculated
@item noparse
disable AVParsers, this needs nofillin too
@item igndts
ignore dts
@item discardcorrupt
discard corrupted frames
@item sortdts
try to interleave outputted packets by dts
@item keepside
don't merge side data
@item fastseek
fast but inaccurate seeks
@item latm
enable RTP MP4A-LATM payload
@item nobuffer
reduce the latency introduced by optional buffering
@item bitexact
do not write random/volatile data
@end table
@item -seek2any @var{integer} (@emph{input})
allow seeking to non-keyframes on demuxer level when supported
@item -analyzeduration @var{integer} (@emph{input})
specify how many microseconds are analyzed to probe the input
@item -cryptokey @var{hexadecimal string} (@emph{input})
decryption key
@item -indexmem @var{integer} (@emph{input})
max memory used for timestamp index (per stream)
@item -rtbufsize @var{integer} (@emph{input})
max memory used for buffering real-time frames
@item -fdebug @var{flags} (@emph{input/output})
print specific debug info
Possible values:
@table @samp
@item ts
@end table
@item -max_delay @var{integer} (@emph{input/output})
maximum muxing or demuxing delay in microseconds
@item -start_time_realtime @var{integer} (@emph{output})
wall-clock time when stream begins (PTS==0)
@item -fpsprobesize @var{integer} (@emph{input})
number of frames used to probe fps
@item -audio_preload @var{integer} (@emph{output})
microseconds by which audio packets should be interleaved earlier
@item -chunk_duration @var{integer} (@emph{output})
microseconds for each chunk
@item -chunk_size @var{integer} (@emph{output})
size in bytes for each chunk
@item -f_err_detect @var{flags} (@emph{input})
set error detection flags (deprecated; use err_detect, save via avconv)
Possible values:
@table @samp
@item crccheck
verify embedded CRCs
@item bitstream
detect bitstream specification deviations
@item buffer
detect improper bitstream length
@item explode
abort decoding on minor error detection
@item ignore_err
ignore errors
@item careful
consider things that violate the spec, are fast to check and have not been seen in the wild as errors
@item compliant
consider all spec non compliancies as errors
@item aggressive
consider things that a sane encoder shouldn't do as an error
@end table
@item -err_detect @var{flags} (@emph{input})
set error detection flags
Possible values:
@table @samp
@item crccheck
verify embedded CRCs
@item bitstream
detect bitstream specification deviations
@item buffer
detect improper bitstream length
@item explode
abort decoding on minor error detection
@item ignore_err
ignore errors
@item careful
consider things that violate the spec, are fast to check and have not been seen in the wild as errors
@item compliant
consider all spec non compliancies as errors
@item aggressive
consider things that a sane encoder shouldn't do as an error
@end table
@item -use_wallclock_as_timestamps @var{integer} (@emph{input})
use wallclock as timestamps
@item -skip_initial_bytes @var{integer} (@emph{input})
set number of bytes to skip before reading header and frames
@item -correct_ts_overflow @var{integer} (@emph{input})
correct single timestamp overflows
@item -flush_packets @var{integer} (@emph{output})
enable flushing of the I/O context after each packet
@item -metadata_header_padding @var{integer} (@emph{output})
set number of bytes to be written as padding in a metadata header
@item -output_ts_offset @var{value} (@emph{output})
set output timestamp offset
@item -max_interleave_delta @var{integer} (@emph{output})
maximum buffering duration for interleaving
@item -f_strict @var{integer} (@emph{input/output})
how strictly to follow the standards (deprecated; use strict, save via avconv)
Possible values:
@table @samp
@item strict
strictly conform to all the things in the spec no matter what the consequences
@item normal
@item unofficial
allow unofficial extensions
@item experimental
allow non-standardized experimental variants
@end table
@item -strict @var{integer} (@emph{input/output})
how strictly to follow the standards
Possible values:
@table @samp
@item strict
strictly conform to all the things in the spec no matter what the consequences
@item normal
@item unofficial
allow unofficial extensions
@item experimental
allow non-standardized experimental variants
@end table
@item -max_ts_probe @var{integer} (@emph{input})
maximum number of packets to read while waiting for the first timestamp
@item -avoid_negative_ts @var{integer} (@emph{output})
shift timestamps so they start at 0
Possible values:
@table @samp
@item auto
enabled when required by target format
@item disabled
do not change timestamps
@item make_non_negative
shift timestamps so they are non negative
@item make_zero
shift timestamps so they start at 0
@end table
@item -dump_separator @var{string} (@emph{input/output})
set information dump field separator
@item -codec_whitelist @var{string} (@emph{input})
List of decoders that are allowed to be used
@item -format_whitelist @var{string} (@emph{input})
List of demuxers that are allowed to be used
@end table

View File

@@ -0,0 +1,180 @@
@chapter Bitstream Filters
@c man begin BITSTREAM FILTERS
When you configure your FFmpeg build, all the supported bitstream
filters are enabled by default. You can list all available ones using
the configure option @code{--list-bsfs}.
You can disable all the bitstream filters using the configure option
@code{--disable-bsfs}, and selectively enable any bitstream filter using
the option @code{--enable-bsf=BSF}, or you can disable a particular
bitstream filter using the option @code{--disable-bsf=BSF}.
The option @code{-bsfs} of the ff* tools will display the list of
all the supported bitstream filters included in your build.
The ff* tools have a -bsf option applied per stream, taking a
comma-separated list of filters, whose parameters follow the filter
name after a '='.
@example
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
@end example
Below is a description of the currently available bitstream filters,
with their parameters, if any.
@section aac_adtstoasc
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
bitstream filter.
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
ADTS header and removes the ADTS header.
This is required for example when copying an AAC stream from a raw
ADTS AAC container to a FLV or a MOV/MP4 file.
@section chomp
Remove zero padding at the end of a packet.
@section dump_extra
Add extradata to the beginning of the filtered packets.
The additional argument specifies which packets should be filtered.
It accepts the values:
@table @samp
@item a
add extradata to all key packets, but only if @var{local_header} is
set in the @option{flags2} codec context field
@item k
add extradata to all key packets
@item e
add extradata to all packets
@end table
If not specified it is assumed @samp{k}.
For example the following @command{ffmpeg} command forces a global
header (thus disabling individual packet headers) in the H.264 packets
generated by the @code{libx264} encoder, but corrects them by adding
the header stored in extradata to the key packets:
@example
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
@end example
@section h264_mp4toannexb
Convert an H.264 bitstream from length prefixed mode to start code
prefixed mode (as defined in the Annex B of the ITU-T H.264
specification).
This is required by some streaming formats, typically the MPEG-2
transport stream format ("mpegts").
For example to remux an MP4 file containing an H.264 stream to mpegts
format with @command{ffmpeg}, you can use the command:
@example
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
@end example
@section imxdump
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
Pro decoder. This filter only applies to the mpeg2video codec, and is
likely not needed for Final Cut Pro 7 and newer with the appropriate
@option{-tag:v}.
For example, to remux 30 MB/sec NTSC IMX to MOV:
@example
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
@end example
@section mjpeg2jpeg
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
MJPEG is a video codec wherein each video frame is essentially a
JPEG image. The individual frames can be extracted without loss,
e.g. by
@example
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
@end example
Unfortunately, these chunks are incomplete JPEG images, because
they lack the DHT segment required for decoding. Quoting from
@url{http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml}:
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
commented that "MJPEG, or at least the MJPEG in AVIs having the
MJPG fourcc, is restricted JPEG with a fixed -- and *omitted* --
Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
and it must use basic Huffman encoding, not arithmetic or
progressive. . . . You can indeed extract the MJPEG frames and
decode them with a regular JPEG decoder, but you have to prepend
the DHT segment to them, or else the decoder won't have any idea
how to decompress the data. The exact table necessary is given in
the OpenDML spec."
This bitstream filter patches the header of frames extracted from an MJPEG
stream (carrying the AVI1 header ID and lacking a DHT segment) to
produce fully qualified JPEG images.
@example
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
exiftran -i -9 frame*.jpg
ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
@end example
@section mjpega_dump_header
@section movsub
@section mp3_header_decompress
@section mpeg4_unpack_bframes
Unpack DivX-style packed B-frames.
DivX-style packed B-frames are not valid MPEG-4 and were only a
workaround for the broken Video for Windows subsystem.
They use more space, can cause minor AV sync issues, require more
CPU power to decode (unless the player has some decoded picture queue
to compensate the 2,0,2,0 frame per packet style) and cause
trouble if copied into a standard container like mp4 or mpeg-ps/ts,
because MPEG-4 decoders may not be able to decode them, since they are
not valid MPEG-4.
For example to fix an AVI file containing an MPEG-4 stream with
DivX-style packed B-frames using @command{ffmpeg}, you can use the command:
@example
ffmpeg -i INPUT.avi -codec copy -bsf:v mpeg4_unpack_bframes OUTPUT.avi
@end example
@section noise
Damages the contents of packets without damaging the container. Can be
used for fuzzing or testing error resilience/concealment.
Parameters:
A numeral string, whose value is related to how often output bytes will
be modified. Therefore, values below or equal to 0 are forbidden, and
the lower the more frequent bytes will be modified, with 1 meaning
every byte is modified.
@example
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
@end example
applies the modification to every byte.
@section remove_extra
@c man end BITSTREAM FILTERS

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1,64 @@
FFmpeg currently uses a custom build system, this text attempts to document
some of its obscure features and options.
Makefile variables:
V
Disable the default terse mode, the full command issued by make and its
output will be shown on the screen.
DBG
Preprocess x86 external assembler files to a .dbg.asm file in the object
directory, which then gets compiled. Helps developping those assembler
files.
DESTDIR
Destination directory for the install targets, useful to prepare packages
or install FFmpeg in cross-environments.
GEN
Set to 1 to generate the missing or mismatched references.
Makefile targets:
all
Default target, builds all the libraries and the executables.
fate
Run the fate test suite, note you must have installed it
fate-list
Will list all fate/regression test targets
install
Install headers, libraries and programs.
examples
Build all examples located in doc/examples.
libavformat/output-example
Build the libavformat basic example.
libavcodec/api-example
Build the libavcodec basic example.
libswscale/swscale-test
Build the swscale self-test (useful also as example).
config
Reconfigure the project with current configuration.
Useful standard make commands:
make -t <target>
Touch all files that otherwise would be build, this is useful to reduce
unneeded rebuilding when changing headers, but note you must force rebuilds
of files that actually need it by hand then.
make -j<num>
rebuild with multiple jobs at the same time. Faster on multi processor systems
make -k
continue build in case of errors, this is useful for the regression tests
sometimes but note it will still not run all reg tests.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,288 @@
@chapter Decoders
@c man begin DECODERS
Decoders are configured elements in FFmpeg which allow the decoding of
multimedia streams.
When you configure your FFmpeg build, all the supported native decoders
are enabled by default. Decoders requiring an external library must be enabled
manually via the corresponding @code{--enable-lib} option. You can list all
available decoders using the configure option @code{--list-decoders}.
You can disable all the decoders with the configure option
@code{--disable-decoders} and selectively enable / disable single decoders
with the options @code{--enable-decoder=@var{DECODER}} /
@code{--disable-decoder=@var{DECODER}}.
The option @code{-decoders} of the ff* tools will display the list of
enabled decoders.
@c man end DECODERS
@chapter Video Decoders
@c man begin VIDEO DECODERS
A description of some of the currently available video decoders
follows.
@section hevc
HEVC / H.265 decoder.
Note: the @option{skip_loop_filter} option has effect only at level
@code{all}.
@section rawvideo
Raw video decoder.
This decoder decodes rawvideo streams.
@subsection Options
@table @option
@item top @var{top_field_first}
Specify the assumed field type of the input video.
@table @option
@item -1
the video is assumed to be progressive (default)
@item 0
bottom-field-first is assumed
@item 1
top-field-first is assumed
@end table
@end table
@c man end VIDEO DECODERS
@chapter Audio Decoders
@c man begin AUDIO DECODERS
A description of some of the currently available audio decoders
follows.
@section ac3
AC-3 audio decoder.
This decoder implements part of ATSC A/52:2010 and ETSI TS 102 366, as well as
the undocumented RealAudio 3 (a.k.a. dnet).
@subsection AC-3 Decoder Options
@table @option
@item -drc_scale @var{value}
Dynamic Range Scale Factor. The factor to apply to dynamic range values
from the AC-3 stream. This factor is applied exponentially.
There are 3 notable scale factor ranges:
@table @option
@item drc_scale == 0
DRC disabled. Produces full range audio.
@item 0 < drc_scale <= 1
DRC enabled. Applies a fraction of the stream DRC value.
Audio reproduction is between full range and full compression.
@item drc_scale > 1
DRC enabled. Applies drc_scale asymmetrically.
Loud sounds are fully compressed. Soft sounds are enhanced.
@end table
@end table
@section flac
FLAC audio decoder.
This decoder aims to implement the complete FLAC specification from Xiph.
@subsection FLAC Decoder options
@table @option
@item -use_buggy_lpc
The lavc FLAC encoder used to produce buggy streams with high lpc values
(like the default value). This option makes it possible to decode such streams
correctly by using lavc's old buggy lpc logic for decoding.
@end table
@section ffwavesynth
Internal wave synthetizer.
This decoder generates wave patterns according to predefined sequences. Its
use is purely internal and the format of the data it accepts is not publicly
documented.
@section libcelt
libcelt decoder wrapper.
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
Requires the presence of the libcelt headers and library during configuration.
You need to explicitly configure the build with @code{--enable-libcelt}.
@section libgsm
libgsm decoder wrapper.
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
the presence of the libgsm headers and library during configuration. You need
to explicitly configure the build with @code{--enable-libgsm}.
This decoder supports both the ordinary GSM and the Microsoft variant.
@section libilbc
libilbc decoder wrapper.
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
audio codec. Requires the presence of the libilbc headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libilbc}.
@subsection Options
The following option is supported by the libilbc wrapper.
@table @option
@item enhance
Enable the enhancement of the decoded audio when set to 1. The default
value is 0 (disabled).
@end table
@section libopencore-amrnb
libopencore-amrnb decoder wrapper.
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
Narrowband audio codec. Using it requires the presence of the
libopencore-amrnb headers and library during configuration. You need to
explicitly configure the build with @code{--enable-libopencore-amrnb}.
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
without this library.
@section libopencore-amrwb
libopencore-amrwb decoder wrapper.
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
Wideband audio codec. Using it requires the presence of the
libopencore-amrwb headers and library during configuration. You need to
explicitly configure the build with @code{--enable-libopencore-amrwb}.
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
without this library.
@section libopus
libopus decoder wrapper.
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
Requires the presence of the libopus headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libopus}.
An FFmpeg native decoder for Opus exists, so users can decode Opus
without this library.
@c man end AUDIO DECODERS
@chapter Subtitles Decoders
@c man begin SUBTILES DECODERS
@section dvbsub
@subsection Options
@table @option
@item compute_clut
@table @option
@item -1
Compute clut if no matching CLUT is in the stream.
@item 0
Never compute CLUT
@item 1
Always compute CLUT and override the one provided in the stream.
@end table
@item dvb_substream
Selects the dvb substream, or all substreams if -1 which is default.
@end table
@section dvdsub
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
also be found in VobSub file pairs and in some Matroska files.
@subsection Options
@table @option
@item palette
Specify the global palette used by the bitmaps. When stored in VobSub, the
palette is normally specified in the index file; in Matroska, the palette is
stored in the codec extra-data in the same format as in VobSub. In DVDs, the
palette is stored in the IFO file, and therefore not available when reading
from dumped VOB files.
The format for this option is a string containing 16 24-bits hexadecimal
numbers (without 0x prefix) separated by comas, for example @code{0d00ee,
ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}.
@item ifo_palette
Specify the IFO file from which the global palette is obtained.
(experimental)
@item forced_subs_only
Only decode subtitle entries marked as forced. Some titles have forced
and non-forced subtitles in the same track. Setting this flag to @code{1}
will only keep the forced subtitles. Default value is @code{0}.
@end table
@section libzvbi-teletext
Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext
subtitles. Requires the presence of the libzvbi headers and library during
configuration. You need to explicitly configure the build with
@code{--enable-libzvbi}.
@subsection Options
@table @option
@item txt_page
List of teletext page numbers to decode. You may use the special * string to
match all pages. Pages that do not match the specified list are dropped.
Default value is *.
@item txt_chop_top
Discards the top teletext line. Default value is 1.
@item txt_format
Specifies the format of the decoded subtitles. The teletext decoder is capable
of decoding the teletext pages to bitmaps or to simple text, you should use
"bitmap" for teletext pages, because certain graphics and colors cannot be
expressed in simple text. You might use "text" for teletext based subtitles if
your application can handle simple text based subtitles. Default value is
bitmap.
@item txt_left
X offset of generated bitmaps, default is 0.
@item txt_top
Y offset of generated bitmaps, default is 0.
@item txt_chop_spaces
Chops leading and trailing spaces and removes empty lines from the generated
text. This option is useful for teletext based subtitles where empty spaces may
be present at the start or at the end of the lines or empty lines may be
present between the subtitle lines because of double-sized teletext charactes.
Default value is 1.
@item txt_duration
Sets the display duration of the decoded teletext pages or subtitles in
miliseconds. Default value is 30000 which is 30 seconds.
@item txt_transparent
Force transparent background of the generated teletext bitmaps. Default value
is 0 which means an opaque (black) background.
@end table
@c man end SUBTILES DECODERS

View File

@@ -0,0 +1,165 @@
a.summary-letter {
text-decoration: none;
}
a {
color: #2D6198;
}
a:visited {
color: #884488;
}
#banner {
background-color: white;
position: relative;
text-align: center;
}
#banner img {
margin-bottom: 1px;
margin-top: 5px;
}
#body {
margin-left: 1em;
margin-right: 1em;
}
body {
background-color: #313131;
margin: 0;
text-align: justify;
}
.center {
margin-left: auto;
margin-right: auto;
text-align: center;
}
#container {
background-color: white;
color: #202020;
margin-left: 1em;
margin-right: 1em;
}
#footer {
text-align: center;
}
h1 a, h2 a, h3 a, h4 a {
text-decoration: inherit;
color: inherit;
}
h1, h2, h3, h4 {
padding-left: 0.4em;
border-radius: 4px;
padding-bottom: 0.25em;
padding-top: 0.25em;
border: 1px solid #6A996A;
}
h1 {
background-color: #7BB37B;
color: #151515;
font-size: 1.2em;
padding-bottom: 0.3em;
padding-top: 0.3em;
}
h2 {
color: #313131;
font-size: 1.0em;
background-color: #ABE3AB;
}
h3 {
color: #313131;
font-size: 0.9em;
margin-bottom: -6px;
background-color: #BBF3BB;
}
h4 {
color: #313131;
font-size: 0.8em;
margin-bottom: -8px;
background-color: #D1FDD1;
}
img {
border: 0;
}
#navbar {
background-color: #738073;
border-bottom: 1px solid #5C665C;
border-top: 1px solid #5C665C;
margin-top: 12px;
padding: 0.3em;
position: relative;
text-align: center;
}
#navbar a, #navbar_secondary a {
color: white;
padding: 0.3em;
text-decoration: none;
}
#navbar a:hover, #navbar_secondary a:hover {
background-color: #313131;
color: white;
text-decoration: none;
}
#navbar_secondary {
background-color: #738073;
border-bottom: 1px solid #5C665C;
border-left: 1px solid #5C665C;
border-right: 1px solid #5C665C;
padding: 0.3em;
position: relative;
text-align: center;
}
p {
margin-left: 1em;
margin-right: 1em;
}
pre {
margin-left: 3em;
margin-right: 3em;
padding: 0.3em;
border: 1px solid #bbb;
background-color: #f7f7f7;
}
dl dt {
font-weight: bold;
}
#proj_desc {
font-size: 1.2em;
}
#repos {
margin-left: 1em;
margin-right: 1em;
border-collapse: collapse;
border: solid 1px #6A996A;
}
#repos th {
background-color: #7BB37B;
border: solid 1px #6A996A;
}
#repos td {
padding: 0.2em;
border: solid 1px #6A996A;
}

View File

@@ -0,0 +1,525 @@
@chapter Demuxers
@c man begin DEMUXERS
Demuxers are configured elements in FFmpeg that can read the
multimedia streams from a particular type of file.
When you configure your FFmpeg build, all the supported demuxers
are enabled by default. You can list all available ones using the
configure option @code{--list-demuxers}.
You can disable all the demuxers using the configure option
@code{--disable-demuxers}, and selectively enable a single demuxer with
the option @code{--enable-demuxer=@var{DEMUXER}}, or disable it
with the option @code{--disable-demuxer=@var{DEMUXER}}.
The option @code{-formats} of the ff* tools will display the list of
enabled demuxers.
The description of some of the currently available demuxers follows.
@section aa
Audible Format 2, 3, and 4 demuxer.
This demuxer is used to demux Audible Format 2, 3, and 4 (.aa) files.
@section applehttp
Apple HTTP Live Streaming demuxer.
This demuxer presents all AVStreams from all variant streams.
The id field is set to the bitrate variant index number. By setting
the discard flags on AVStreams (by pressing 'a' or 'v' in ffplay),
the caller can decide which variant streams to actually receive.
The total bitrate of the variant that the stream belongs to is
available in a metadata key named "variant_bitrate".
@section apng
Animated Portable Network Graphics demuxer.
This demuxer is used to demux APNG files.
All headers, but the PNG signature, up to (but not including) the first
fcTL chunk are transmitted as extradata.
Frames are then split as being all the chunks between two fcTL ones, or
between the last fcTL and IEND chunks.
@table @option
@item -ignore_loop @var{bool}
Ignore the loop variable in the file if set.
@item -max_fps @var{int}
Maximum framerate in frames per second (0 for no limit).
@item -default_fps @var{int}
Default framerate in frames per second when none is specified in the file
(0 meaning as fast as possible).
@end table
@section asf
Advanced Systems Format demuxer.
This demuxer is used to demux ASF files and MMS network streams.
@table @option
@item -no_resync_search @var{bool}
Do not try to resynchronize by looking for a certain optional start code.
@end table
@anchor{concat}
@section concat
Virtual concatenation script demuxer.
This demuxer reads a list of files and other directives from a text file and
demuxes them one after the other, as if all their packet had been muxed
together.
The timestamps in the files are adjusted so that the first file starts at 0
and each next file starts where the previous one finishes. Note that it is
done globally and may cause gaps if all streams do not have exactly the same
length.
All files must have the same streams (same codecs, same time base, etc.).
The duration of each file is used to adjust the timestamps of the next file:
if the duration is incorrect (because it was computed using the bit-rate or
because the file is truncated, for example), it can cause artifacts. The
@code{duration} directive can be used to override the duration stored in
each file.
@subsection Syntax
The script is a text file in extended-ASCII, with one directive per line.
Empty lines, leading spaces and lines starting with '#' are ignored. The
following directive is recognized:
@table @option
@item @code{file @var{path}}
Path to a file to read; special characters and spaces must be escaped with
backslash or single quotes.
All subsequent file-related directives apply to that file.
@item @code{ffconcat version 1.0}
Identify the script type and version. It also sets the @option{safe} option
to 1 if it was to its default -1.
To make FFmpeg recognize the format automatically, this directive must
appears exactly as is (no extra space or byte-order-mark) on the very first
line of the script.
@item @code{duration @var{dur}}
Duration of the file. This information can be specified from the file;
specifying it here may be more efficient or help if the information from the
file is not available or accurate.
If the duration is set for all files, then it is possible to seek in the
whole concatenated video.
@item @code{inpoint @var{timestamp}}
In point of the file. When the demuxer opens the file it instantly seeks to the
specified timestamp. Seeking is done so that all streams can be presented
successfully at In point.
This directive works best with intra frame codecs, because for non-intra frame
ones you will usually get extra packets before the actual In point and the
decoded content will most likely contain frames before In point too.
For each file, packets before the file In point will have timestamps less than
the calculated start timestamp of the file (negative in case of the first
file), and the duration of the files (if not specified by the @code{duration}
directive) will be reduced based on their specified In point.
Because of potential packets before the specified In point, packet timestamps
may overlap between two concatenated files.
@item @code{outpoint @var{timestamp}}
Out point of the file. When the demuxer reaches the specified decoding
timestamp in any of the streams, it handles it as an end of file condition and
skips the current and all the remaining packets from all streams.
Out point is exclusive, which means that the demuxer will not output packets
with a decoding timestamp greater or equal to Out point.
This directive works best with intra frame codecs and formats where all streams
are tightly interleaved. For non-intra frame codecs you will usually get
additional packets with presentation timestamp after Out point therefore the
decoded content will most likely contain frames after Out point too. If your
streams are not tightly interleaved you may not get all the packets from all
streams before Out point and you may only will be able to decode the earliest
stream until Out point.
The duration of the files (if not specified by the @code{duration}
directive) will be reduced based on their specified Out point.
@item @code{file_packet_metadata @var{key=value}}
Metadata of the packets of the file. The specified metadata will be set for
each file packet. You can specify this directive multiple times to add multiple
metadata entries.
@item @code{stream}
Introduce a stream in the virtual file.
All subsequent stream-related directives apply to the last introduced
stream.
Some streams properties must be set in order to allow identifying the
matching streams in the subfiles.
If no streams are defined in the script, the streams from the first file are
copied.
@item @code{exact_stream_id @var{id}}
Set the id of the stream.
If this directive is given, the string with the corresponding id in the
subfiles will be used.
This is especially useful for MPEG-PS (VOB) files, where the order of the
streams is not reliable.
@end table
@subsection Options
This demuxer accepts the following option:
@table @option
@item safe
If set to 1, reject unsafe file paths. A file path is considered safe if it
does not contain a protocol specification and is relative and all components
only contain characters from the portable character set (letters, digits,
period, underscore and hyphen) and have no period at the beginning of a
component.
If set to 0, any file name is accepted.
The default is -1, it is equivalent to 1 if the format was automatically
probed and 0 otherwise.
@item auto_convert
If set to 1, try to perform automatic conversions on packet data to make the
streams concatenable.
The default is 1.
Currently, the only conversion is adding the h264_mp4toannexb bitstream
filter to H.264 streams in MP4 format. This is necessary in particular if
there are resolution changes.
@end table
@section flv
Adobe Flash Video Format demuxer.
This demuxer is used to demux FLV files and RTMP network streams.
@table @option
@item -flv_metadata @var{bool}
Allocate the streams according to the onMetaData array content.
@end table
@section libgme
The Game Music Emu library is a collection of video game music file emulators.
See @url{http://code.google.com/p/game-music-emu/} for more information.
Some files have multiple tracks. The demuxer will pick the first track by
default. The @option{track_index} option can be used to select a different
track. Track indexes start at 0. The demuxer exports the number of tracks as
@var{tracks} meta data entry.
For very large files, the @option{max_size} option may have to be adjusted.
@section libquvi
Play media from Internet services using the quvi project.
The demuxer accepts a @option{format} option to request a specific quality. It
is by default set to @var{best}.
See @url{http://quvi.sourceforge.net/} for more information.
FFmpeg needs to be built with @code{--enable-libquvi} for this demuxer to be
enabled.
@section gif
Animated GIF demuxer.
It accepts the following options:
@table @option
@item min_delay
Set the minimum valid delay between frames in hundredths of seconds.
Range is 0 to 6000. Default value is 2.
@item max_gif_delay
Set the maximum valid delay between frames in hundredth of seconds.
Range is 0 to 65535. Default value is 65535 (nearly eleven minutes),
the maximum value allowed by the specification.
@item default_delay
Set the default delay between frames in hundredths of seconds.
Range is 0 to 6000. Default value is 10.
@item ignore_loop
GIF files can contain information to loop a certain number of times (or
infinitely). If @option{ignore_loop} is set to 1, then the loop setting
from the input will be ignored and looping will not occur. If set to 0,
then looping will occur and will cycle the number of times according to
the GIF. Default value is 1.
@end table
For example, with the overlay filter, place an infinitely looping GIF
over another video:
@example
ffmpeg -i input.mp4 -ignore_loop 0 -i input.gif -filter_complex overlay=shortest=1 out.mkv
@end example
Note that in the above example the shortest option for overlay filter is
used to end the output video at the length of the shortest input file,
which in this case is @file{input.mp4} as the GIF in this example loops
infinitely.
@section image2
Image file demuxer.
This demuxer reads from a list of image files specified by a pattern.
The syntax and meaning of the pattern is specified by the
option @var{pattern_type}.
The pattern may contain a suffix which is used to automatically
determine the format of the images contained in the files.
The size, the pixel format, and the format of each image must be the
same for all the files in the sequence.
This demuxer accepts the following options:
@table @option
@item framerate
Set the frame rate for the video stream. It defaults to 25.
@item loop
If set to 1, loop over the input. Default value is 0.
@item pattern_type
Select the pattern type used to interpret the provided filename.
@var{pattern_type} accepts one of the following values.
@table @option
@item none
Disable pattern matching, therefore the video will only contain the specified
image. You should use this option if you do not want to create sequences from
multiple images and your filenames may contain special pattern characters.
@item sequence
Select a sequence pattern type, used to specify a sequence of files
indexed by sequential numbers.
A sequence pattern may contain the string "%d" or "%0@var{N}d", which
specifies the position of the characters representing a sequential
number in each filename matched by the pattern. If the form
"%d0@var{N}d" is used, the string representing the number in each
filename is 0-padded and @var{N} is the total number of 0-padded
digits representing the number. The literal character '%' can be
specified in the pattern with the string "%%".
If the sequence pattern contains "%d" or "%0@var{N}d", the first filename of
the file list specified by the pattern must contain a number
inclusively contained between @var{start_number} and
@var{start_number}+@var{start_number_range}-1, and all the following
numbers must be sequential.
For example the pattern "img-%03d.bmp" will match a sequence of
filenames of the form @file{img-001.bmp}, @file{img-002.bmp}, ...,
@file{img-010.bmp}, etc.; the pattern "i%%m%%g-%d.jpg" will match a
sequence of filenames of the form @file{i%m%g-1.jpg},
@file{i%m%g-2.jpg}, ..., @file{i%m%g-10.jpg}, etc.
Note that the pattern must not necessarily contain "%d" or
"%0@var{N}d", for example to convert a single image file
@file{img.jpeg} you can employ the command:
@example
ffmpeg -i img.jpeg img.png
@end example
@item glob
Select a glob wildcard pattern type.
The pattern is interpreted like a @code{glob()} pattern. This is only
selectable if libavformat was compiled with globbing support.
@item glob_sequence @emph{(deprecated, will be removed)}
Select a mixed glob wildcard/sequence pattern.
If your version of libavformat was compiled with globbing support, and
the provided pattern contains at least one glob meta character among
@code{%*?[]@{@}} that is preceded by an unescaped "%", the pattern is
interpreted like a @code{glob()} pattern, otherwise it is interpreted
like a sequence pattern.
All glob special characters @code{%*?[]@{@}} must be prefixed
with "%". To escape a literal "%" you shall use "%%".
For example the pattern @code{foo-%*.jpeg} will match all the
filenames prefixed by "foo-" and terminating with ".jpeg", and
@code{foo-%?%?%?.jpeg} will match all the filenames prefixed with
"foo-", followed by a sequence of three characters, and terminating
with ".jpeg".
This pattern type is deprecated in favor of @var{glob} and
@var{sequence}.
@end table
Default value is @var{glob_sequence}.
@item pixel_format
Set the pixel format of the images to read. If not specified the pixel
format is guessed from the first image file in the sequence.
@item start_number
Set the index of the file matched by the image file pattern to start
to read from. Default value is 0.
@item start_number_range
Set the index interval range to check when looking for the first image
file in the sequence, starting from @var{start_number}. Default value
is 5.
@item ts_from_file
If set to 1, will set frame timestamp to modification time of image file. Note
that monotonity of timestamps is not provided: images go in the same order as
without this option. Default value is 0.
If set to 2, will set frame timestamp to the modification time of the image file in
nanosecond precision.
@item video_size
Set the video size of the images to read. If not specified the video
size is guessed from the first image file in the sequence.
@end table
@subsection Examples
@itemize
@item
Use @command{ffmpeg} for creating a video from the images in the file
sequence @file{img-001.jpeg}, @file{img-002.jpeg}, ..., assuming an
input frame rate of 10 frames per second:
@example
ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
@end example
@item
As above, but start by reading from a file with index 100 in the sequence:
@example
ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
@end example
@item
Read images matching the "*.png" glob pattern , that is all the files
terminating with the ".png" suffix:
@example
ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
@end example
@end itemize
@section mpegts
MPEG-2 transport stream demuxer.
This demuxer accepts the following options:
@table @option
@item resync_size
Set size limit for looking up a new synchronization. Default value is
65536.
@item fix_teletext_pts
Override teletext packet PTS and DTS values with the timestamps calculated
from the PCR of the first program which the teletext stream is part of and is
not discarded. Default value is 1, set this option to 0 if you want your
teletext packet PTS and DTS values untouched.
@item ts_packetsize
Output option carrying the raw packet size in bytes.
Show the detected raw packet size, cannot be set by the user.
@item scan_all_pmts
Scan and combine all PMTs. The value is an integer with value from -1
to 1 (-1 means automatic setting, 1 means enabled, 0 means
disabled). Default value is -1.
@end table
@section rawvideo
Raw video demuxer.
This demuxer allows one to read raw video data. Since there is no header
specifying the assumed video parameters, the user must specify them
in order to be able to decode the data correctly.
This demuxer accepts the following options:
@table @option
@item framerate
Set input video frame rate. Default value is 25.
@item pixel_format
Set the input video pixel format. Default value is @code{yuv420p}.
@item video_size
Set the input video size. This value must be specified explicitly.
@end table
For example to read a rawvideo file @file{input.raw} with
@command{ffplay}, assuming a pixel format of @code{rgb24}, a video
size of @code{320x240}, and a frame rate of 10 images per second, use
the command:
@example
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
@end example
@section sbg
SBaGen script demuxer.
This demuxer reads the script language used by SBaGen
@url{http://uazu.net/sbagen/} to generate binaural beats sessions. A SBG
script looks like that:
@example
-SE
a: 300-2.5/3 440+4.5/0
b: 300-2.5/0 440+4.5/3
off: -
NOW == a
+0:07:00 == b
+0:14:00 == a
+0:21:00 == b
+0:30:00 off
@end example
A SBG script can mix absolute and relative timestamps. If the script uses
either only absolute timestamps (including the script start time) or only
relative ones, then its layout is fixed, and the conversion is
straightforward. On the other hand, if the script mixes both kind of
timestamps, then the @var{NOW} reference for relative timestamps will be
taken from the current time of day at the time the script is read, and the
script layout will be frozen according to that reference. That means that if
the script is directly played, the actual times will match the absolute
timestamps up to the sound controller's clock accuracy, but if the user
somehow pauses the playback or seeks, all times will be shifted accordingly.
@section tedcaptions
JSON captions used for @url{http://www.ted.com/, TED Talks}.
TED does not provide links to the captions, but they can be guessed from the
page. The file @file{tools/bookmarklets.html} from the FFmpeg source tree
contains a bookmarklet to expose them.
This demuxer accepts the following option:
@table @option
@item start_time
Set the start time of the TED talk, in milliseconds. The default is 15000
(15s). It is used to sync the captions with the downloadable videos, because
they include a 15s intro.
@end table
Example: convert the captions to a format most players understand:
@example
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
@end example
@c man end DEMUXERS

View File

@@ -0,0 +1,809 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle Developer Documentation
@titlepage
@center @titlefont{Developer Documentation}
@end titlepage
@top
@contents
@chapter Developers Guide
@section Notes for external developers
This document is mostly useful for internal FFmpeg developers.
External developers who need to use the API in their application should
refer to the API doxygen documentation in the public headers, and
check the examples in @file{doc/examples} and in the source code to
see how the public API is employed.
You can use the FFmpeg libraries in your commercial program, but you
are encouraged to @emph{publish any patch you make}. In this case the
best way to proceed is to send your patches to the ffmpeg-devel
mailing list following the guidelines illustrated in the remainder of
this document.
For more detailed legal information about the use of FFmpeg in
external programs read the @file{LICENSE} file in the source tree and
consult @url{http://ffmpeg.org/legal.html}.
@section Contributing
There are 3 ways by which code gets into ffmpeg.
@itemize @bullet
@item Submitting Patches to the main developer mailing list
see @ref{Submitting patches} for details.
@item Directly committing changes to the main tree.
@item Committing changes to a git clone, for example on github.com or
gitorious.org. And asking us to merge these changes.
@end itemize
Whichever way, changes should be reviewed by the maintainer of the code
before they are committed. And they should follow the @ref{Coding Rules}.
The developer making the commit and the author are responsible for their changes
and should try to fix issues their commit causes.
@anchor{Coding Rules}
@section Coding Rules
@subsection Code formatting conventions
There are the following guidelines regarding the indentation in files:
@itemize @bullet
@item
Indent size is 4.
@item
The TAB character is forbidden outside of Makefiles as is any
form of trailing whitespace. Commits containing either will be
rejected by the git repository.
@item
You should try to limit your code lines to 80 characters; however, do so if
and only if this improves readability.
@end itemize
The presentation is one inspired by 'indent -i4 -kr -nut'.
The main priority in FFmpeg is simplicity and small code size in order to
minimize the bug count.
@subsection Comments
Use the JavaDoc/Doxygen format (see examples below) so that code documentation
can be generated automatically. All nontrivial functions should have a comment
above them explaining what the function does, even if it is just one sentence.
All structures and their member variables should be documented, too.
Avoid Qt-style and similar Doxygen syntax with @code{!} in it, i.e. replace
@code{//!} with @code{///} and similar. Also @@ syntax should be employed
for markup commands, i.e. use @code{@@param} and not @code{\param}.
@example
/**
* @@file
* MPEG codec.
* @@author ...
*/
/**
* Summary sentence.
* more text ...
* ...
*/
typedef struct Foobar @{
int var1; /**< var1 description */
int var2; ///< var2 description
/** var3 description */
int var3;
@} Foobar;
/**
* Summary sentence.
* more text ...
* ...
* @@param my_parameter description of my_parameter
* @@return return value description
*/
int myfunc(int my_parameter)
...
@end example
@subsection C language features
FFmpeg is programmed in the ISO C90 language with a few additional
features from ISO C99, namely:
@itemize @bullet
@item
the @samp{inline} keyword;
@item
@samp{//} comments;
@item
designated struct initializers (@samp{struct s x = @{ .i = 17 @};})
@item
compound literals (@samp{x = (struct s) @{ 17, 23 @};})
@end itemize
These features are supported by all compilers we care about, so we will not
accept patches to remove their use unless they absolutely do not impair
clarity and performance.
All code must compile with recent versions of GCC and a number of other
currently supported compilers. To ensure compatibility, please do not use
additional C99 features or GCC extensions. Especially watch out for:
@itemize @bullet
@item
mixing statements and declarations;
@item
@samp{long long} (use @samp{int64_t} instead);
@item
@samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar;
@item
GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}).
@end itemize
@subsection Naming conventions
All names should be composed with underscores (_), not CamelCase. For example,
@samp{avfilter_get_video_buffer} is an acceptable function name and
@samp{AVFilterGetVideo} is not. The exception from this are type names, like
for example structs and enums; they should always be in the CamelCase
There are the following conventions for naming variables and functions:
@itemize @bullet
@item
For local variables no prefix is required.
@item
For file-scope variables and functions declared as @code{static}, no prefix
is required.
@item
For variables and functions visible outside of file scope, but only used
internally by a library, an @code{ff_} prefix should be used,
e.g. @samp{ff_w64_demuxer}.
@item
For variables and functions visible outside of file scope, used internally
across multiple libraries, use @code{avpriv_} as prefix, for example,
@samp{avpriv_aac_parse_header}.
@item
Each library has its own prefix for public symbols, in addition to the
commonly used @code{av_} (@code{avformat_} for libavformat,
@code{avcodec_} for libavcodec, @code{swr_} for libswresample, etc).
Check the existing code and choose names accordingly.
Note that some symbols without these prefixes are also exported for
retro-compatibility reasons. These exceptions are declared in the
@code{lib<name>/lib<name>.v} files.
@end itemize
Furthermore, name space reserved for the system should not be invaded.
Identifiers ending in @code{_t} are reserved by
@url{http://pubs.opengroup.org/onlinepubs/007904975/functions/xsh_chap02_02.html#tag_02_02_02, POSIX}.
Also avoid names starting with @code{__} or @code{_} followed by an uppercase
letter as they are reserved by the C standard. Names starting with @code{_}
are reserved at the file level and may not be used for externally visible
symbols. If in doubt, just avoid names starting with @code{_} altogether.
@subsection Miscellaneous conventions
@itemize @bullet
@item
fprintf and printf are forbidden in libavformat and libavcodec,
please use av_log() instead.
@item
Casts should be used only when necessary. Unneeded parentheses
should also be avoided if they don't make the code easier to understand.
@end itemize
@subsection Editor configuration
In order to configure Vim to follow FFmpeg formatting conventions, paste
the following snippet into your @file{.vimrc}:
@example
" indentation rules for FFmpeg: 4 spaces, no tabs
set expandtab
set shiftwidth=4
set softtabstop=4
set cindent
set cinoptions=(0
" Allow tabs in Makefiles.
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
" Trailing whitespace and tabs are forbidden, so highlight them.
highlight ForbiddenWhitespace ctermbg=red guibg=red
match ForbiddenWhitespace /\s\+$\|\t/
" Do not highlight spaces at the end of line while typing on that line.
autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
@end example
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
@lisp
(c-add-style "ffmpeg"
'("k&r"
(c-basic-offset . 4)
(indent-tabs-mode . nil)
(show-trailing-whitespace . t)
(c-offsets-alist
(statement-cont . (c-lineup-assignments +)))
)
)
(setq c-default-style "ffmpeg")
@end lisp
@section Development Policy
@enumerate
@item
Contributions should be licensed under the
@uref{http://www.gnu.org/licenses/lgpl-2.1.html, LGPL 2.1},
including an "or any later version" clause, or, if you prefer
a gift-style license, the
@uref{http://opensource.org/licenses/isc-license.txt, ISC} or
@uref{http://mit-license.org/, MIT} license.
@uref{http://www.gnu.org/licenses/gpl-2.0.html, GPL 2} including
an "or any later version" clause is also acceptable, but LGPL is
preferred.
If you add a new file, give it a proper license header. Do not copy and
paste it from a random place, use an existing file as template.
@item
You must not commit code which breaks FFmpeg! (Meaning unfinished but
enabled code which breaks compilation or compiles but does not work or
breaks the regression tests)
You can commit unfinished stuff (for testing etc), but it must be disabled
(#ifdef etc) by default so it does not interfere with other developers'
work.
@item
The commit message should have a short first line in the form of
a @samp{topic: short description} as a header, separated by a newline
from the body consisting of an explanation of why the change is necessary.
If the commit fixes a known bug on the bug tracker, the commit message
should include its bug ID. Referring to the issue on the bug tracker does
not exempt you from writing an excerpt of the bug in the commit message.
@item
You do not have to over-test things. If it works for you, and you think it
should work for others, then commit. If your code has problems
(portability, triggers compiler bugs, unusual environment etc) they will be
reported and eventually fixed.
@item
Do not commit unrelated changes together, split them into self-contained
pieces. Also do not forget that if part B depends on part A, but A does not
depend on B, then A can and should be committed first and separate from B.
Keeping changes well split into self-contained parts makes reviewing and
understanding them on the commit log mailing list easier. This also helps
in case of debugging later on.
Also if you have doubts about splitting or not splitting, do not hesitate to
ask/discuss it on the developer mailing list.
@item
Do not change behavior of the programs (renaming options etc) or public
API or ABI without first discussing it on the ffmpeg-devel mailing list.
Do not remove functionality from the code. Just improve!
Note: Redundant code can be removed.
@item
Do not commit changes to the build system (Makefiles, configure script)
which change behavior, defaults etc, without asking first. The same
applies to compiler warning fixes, trivial looking fixes and to code
maintained by other developers. We usually have a reason for doing things
the way we do. Send your changes as patches to the ffmpeg-devel mailing
list, and if the code maintainers say OK, you may commit. This does not
apply to files you wrote and/or maintain.
@item
We refuse source indentation and other cosmetic changes if they are mixed
with functional changes, such commits will be rejected and removed. Every
developer has his own indentation style, you should not change it. Of course
if you (re)write something, you can use your own style, even though we would
prefer if the indentation throughout FFmpeg was consistent (Many projects
force a given indentation style - we do not.). If you really need to make
indentation changes (try to avoid this), separate them strictly from real
changes.
NOTE: If you had to put if()@{ .. @} over a large (> 5 lines) chunk of code,
then either do NOT change the indentation of the inner part within (do not
move it to the right)! or do so in a separate commit
@item
Always fill out the commit log message. Describe in a few lines what you
changed and why. You can refer to mailing list postings if you fix a
particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
Recommended format:
@example
area changed: Short 1 line description
details describing what and why and giving references.
@end example
@item
Make sure the author of the commit is set correctly. (see git commit --author)
If you apply a patch, send an
answer to ffmpeg-devel (or wherever you got the patch from) saying that
you applied the patch.
@item
When applying patches that have been discussed (at length) on the mailing
list, reference the thread in the log message.
@item
Do NOT commit to code actively maintained by others without permission.
Send a patch to ffmpeg-devel instead. If no one answers within a reasonable
timeframe (12h for build failures and security fixes, 3 days small changes,
1 week for big patches) then commit your patch if you think it is OK.
Also note, the maintainer can simply ask for more time to review!
@item
Subscribe to the ffmpeg-cvslog mailing list. The diffs of all commits
are sent there and reviewed by all the other developers. Bugs and possible
improvements or general questions regarding commits are discussed there. We
expect you to react if problems with your code are uncovered.
@item
Update the documentation if you change behavior or add features. If you are
unsure how best to do this, send a patch to ffmpeg-devel, the documentation
maintainer(s) will review and commit your stuff.
@item
Try to keep important discussions and requests (also) on the public
developer mailing list, so that all developers can benefit from them.
@item
Never write to unallocated memory, never write over the end of arrays,
always check values read from some untrusted source before using them
as array index or other risky things.
@item
Remember to check if you need to bump versions for the specific libav*
parts (libavutil, libavcodec, libavformat) you are changing. You need
to change the version integer.
Incrementing the first component means no backward compatibility to
previous versions (e.g. removal of a function from the public API).
Incrementing the second component means backward compatible change
(e.g. addition of a function to the public API or extension of an
existing data structure).
Incrementing the third component means a noteworthy binary compatible
change (e.g. encoder bug fix that matters for the decoder). The third
component always starts at 100 to distinguish FFmpeg from Libav.
@item
Compiler warnings indicate potential bugs or code with bad style. If a type of
warning always points to correct and clean code, that warning should
be disabled, not the code changed.
Thus the remaining warnings can either be bugs or correct code.
If it is a bug, the bug has to be fixed. If it is not, the code should
be changed to not generate a warning unless that causes a slowdown
or obfuscates the code.
@item
Make sure that no parts of the codebase that you maintain are missing from the
@file{MAINTAINERS} file. If something that you want to maintain is missing add it with
your name after it.
If at some point you no longer want to maintain some code, then please help
finding a new maintainer and also don't forget updating the @file{MAINTAINERS} file.
@end enumerate
We think our rules are not too hard. If you have comments, contact us.
@anchor{Submitting patches}
@section Submitting patches
First, read the @ref{Coding Rules} above if you did not yet, in particular
the rules regarding patch submission.
When you submit your patch, please use @code{git format-patch} or
@code{git send-email}. We cannot read other diffs :-)
Also please do not submit a patch which contains several unrelated changes.
Split it into separate, self-contained pieces. This does not mean splitting
file by file. Instead, make the patch as small as possible while still
keeping it as a logical unit that contains an individual change, even
if it spans multiple files. This makes reviewing your patches much easier
for us and greatly increases your chances of getting your patch applied.
Use the patcheck tool of FFmpeg to check your patch.
The tool is located in the tools directory.
Run the @ref{Regression tests} before submitting a patch in order to verify
it does not cause unexpected problems.
It also helps quite a bit if you tell us what the patch does (for example
'replaces lrint by lrintf'), and why (for example '*BSD isn't C99 compliant
and has no lrint()')
Also please if you send several patches, send each patch as a separate mail,
do not attach several unrelated patches to the same mail.
Patches should be posted to the
@uref{http://lists.ffmpeg.org/mailman/listinfo/ffmpeg-devel, ffmpeg-devel}
mailing list. Use @code{git send-email} when possible since it will properly
send patches without requiring extra care. If you cannot, then send patches
as base64-encoded attachments, so your patch is not trashed during
transmission.
Your patch will be reviewed on the mailing list. You will likely be asked
to make some changes and are expected to send in an improved version that
incorporates the requests from the review. This process may go through
several iterations. Once your patch is deemed good enough, some developer
will pick it up and commit it to the official FFmpeg tree.
Give us a few days to react. But if some time passes without reaction,
send a reminder by email. Your patch should eventually be dealt with.
@section New codecs or formats checklist
@enumerate
@item
Did you use av_cold for codec initialization and close functions?
@item
Did you add a long_name under NULL_IF_CONFIG_SMALL to the AVCodec or
AVInputFormat/AVOutputFormat struct?
@item
Did you bump the minor version number (and reset the micro version
number) in @file{libavcodec/version.h} or @file{libavformat/version.h}?
@item
Did you register it in @file{allcodecs.c} or @file{allformats.c}?
@item
Did you add the AVCodecID to @file{avcodec.h}?
When adding new codec IDs, also add an entry to the codec descriptor
list in @file{libavcodec/codec_desc.c}.
@item
If it has a FourCC, did you add it to @file{libavformat/riff.c},
even if it is only a decoder?
@item
Did you add a rule to compile the appropriate files in the Makefile?
Remember to do this even if you're just adding a format to a file that is
already being compiled by some other rule, like a raw demuxer.
@item
Did you add an entry to the table of supported formats or codecs in
@file{doc/general.texi}?
@item
Did you add an entry in the Changelog?
@item
If it depends on a parser or a library, did you add that dependency in
configure?
@item
Did you @code{git add} the appropriate files before committing?
@item
Did you make sure it compiles standalone, i.e. with
@code{configure --disable-everything --enable-decoder=foo}
(or @code{--enable-demuxer} or whatever your component is)?
@end enumerate
@section patch submission checklist
@enumerate
@item
Does @code{make fate} pass with the patch applied?
@item
Was the patch generated with git format-patch or send-email?
@item
Did you sign off your patch? (git commit -s)
See @url{http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob_plain;f=Documentation/SubmittingPatches} for the meaning
of sign off.
@item
Did you provide a clear git commit log message?
@item
Is the patch against latest FFmpeg git master branch?
@item
Are you subscribed to ffmpeg-devel?
(the list is subscribers only due to spam)
@item
Have you checked that the changes are minimal, so that the same cannot be
achieved with a smaller patch and/or simpler final code?
@item
If the change is to speed critical code, did you benchmark it?
@item
If you did any benchmarks, did you provide them in the mail?
@item
Have you checked that the patch does not introduce buffer overflows or
other security issues?
@item
Did you test your decoder or demuxer against damaged data? If no, see
tools/trasher, the noise bitstream filter, and
@uref{http://caca.zoy.org/wiki/zzuf, zzuf}. Your decoder or demuxer
should not crash, end in a (near) infinite loop, or allocate ridiculous
amounts of memory when fed damaged data.
@item
Did you test your decoder or demuxer against sample files?
Samples may be obtained at @url{http://samples.ffmpeg.org}.
@item
Does the patch not mix functional and cosmetic changes?
@item
Did you add tabs or trailing whitespace to the code? Both are forbidden.
@item
Is the patch attached to the email you send?
@item
Is the mime type of the patch correct? It should be text/x-diff or
text/x-patch or at least text/plain and not application/octet-stream.
@item
If the patch fixes a bug, did you provide a verbose analysis of the bug?
@item
If the patch fixes a bug, did you provide enough information, including
a sample, so the bug can be reproduced and the fix can be verified?
Note please do not attach samples >100k to mails but rather provide a
URL, you can upload to ftp://upload.ffmpeg.org
@item
Did you provide a verbose summary about what the patch does change?
@item
Did you provide a verbose explanation why it changes things like it does?
@item
Did you provide a verbose summary of the user visible advantages and
disadvantages if the patch is applied?
@item
Did you provide an example so we can verify the new feature added by the
patch easily?
@item
If you added a new file, did you insert a license header? It should be
taken from FFmpeg, not randomly copied and pasted from somewhere else.
@item
You should maintain alphabetical order in alphabetically ordered lists as
long as doing so does not break API/ABI compatibility.
@item
Lines with similar content should be aligned vertically when doing so
improves readability.
@item
Consider to add a regression test for your code.
@item
If you added YASM code please check that things still work with --disable-yasm
@item
Make sure you check the return values of function and return appropriate
error codes. Especially memory allocation functions like @code{av_malloc()}
are notoriously left unchecked, which is a serious problem.
@item
Test your code with valgrind and or Address Sanitizer to ensure it's free
of leaks, out of array accesses, etc.
@end enumerate
@section Patch review process
All patches posted to ffmpeg-devel will be reviewed, unless they contain a
clear note that the patch is not for the git master branch.
Reviews and comments will be posted as replies to the patch on the
mailing list. The patch submitter then has to take care of every comment,
that can be by resubmitting a changed patch or by discussion. Resubmitted
patches will themselves be reviewed like any other patch. If at some point
a patch passes review with no comments then it is approved, that can for
simple and small patches happen immediately while large patches will generally
have to be changed and reviewed many times before they are approved.
After a patch is approved it will be committed to the repository.
We will review all submitted patches, but sometimes we are quite busy so
especially for large patches this can take several weeks.
If you feel that the review process is too slow and you are willing to try to
take over maintainership of the area of code you change then just clone
git master and maintain the area of code there. We will merge each area from
where its best maintained.
When resubmitting patches, please do not make any significant changes
not related to the comments received during review. Such patches will
be rejected. Instead, submit significant changes or new features as
separate patches.
Everyone is welcome to review patches. Also if you are waiting for your patch
to be reviewed, please consider helping to review other patches, that is a great
way to get everyone's patches reviewed sooner.
@anchor{Regression tests}
@section Regression tests
Before submitting a patch (or committing to the repository), you should at least
test that you did not break anything.
Running 'make fate' accomplishes this, please see @url{fate.html} for details.
[Of course, some patches may change the results of the regression tests. In
this case, the reference results of the regression tests shall be modified
accordingly].
@subsection Adding files to the fate-suite dataset
When there is no muxer or encoder available to generate test media for a
specific test then the media has to be included in the fate-suite.
First please make sure that the sample file is as small as possible to test the
respective decoder or demuxer sufficiently. Large files increase network
bandwidth and disk space requirements.
Once you have a working fate test and fate sample, provide in the commit
message or introductory message for the patch series that you post to
the ffmpeg-devel mailing list, a direct link to download the sample media.
@subsection Visualizing Test Coverage
The FFmpeg build system allows visualizing the test coverage in an easy
manner with the coverage tools @code{gcov}/@code{lcov}. This involves
the following steps:
@enumerate
@item
Configure to compile with instrumentation enabled:
@code{configure --toolchain=gcov}.
@item
Run your test case, either manually or via FATE. This can be either
the full FATE regression suite, or any arbitrary invocation of any
front-end tool provided by FFmpeg, in any combination.
@item
Run @code{make lcov} to generate coverage data in HTML format.
@item
View @code{lcov/index.html} in your preferred HTML viewer.
@end enumerate
You can use the command @code{make lcov-reset} to reset the coverage
measurements. You will need to rerun @code{make lcov} after running a
new test.
@subsection Using Valgrind
The configure script provides a shortcut for using valgrind to spot bugs
related to memory handling. Just add the option
@code{--toolchain=valgrind-memcheck} or @code{--toolchain=valgrind-massif}
to your configure line, and reasonable defaults will be set for running
FATE under the supervision of either the @strong{memcheck} or the
@strong{massif} tool of the valgrind suite.
In case you need finer control over how valgrind is invoked, use the
@code{--target-exec='valgrind <your_custom_valgrind_options>} option in
your configure line instead.
@anchor{Release process}
@section Release process
FFmpeg maintains a set of @strong{release branches}, which are the
recommended deliverable for system integrators and distributors (such as
Linux distributions, etc.). At regular times, a @strong{release
manager} prepares, tests and publishes tarballs on the
@url{http://ffmpeg.org} website.
There are two kinds of releases:
@enumerate
@item
@strong{Major releases} always include the latest and greatest
features and functionality.
@item
@strong{Point releases} are cut from @strong{release} branches,
which are named @code{release/X}, with @code{X} being the release
version number.
@end enumerate
Note that we promise to our users that shared libraries from any FFmpeg
release never break programs that have been @strong{compiled} against
previous versions of @strong{the same release series} in any case!
However, from time to time, we do make API changes that require adaptations
in applications. Such changes are only allowed in (new) major releases and
require further steps such as bumping library version numbers and/or
adjustments to the symbol versioning file. Please discuss such changes
on the @strong{ffmpeg-devel} mailing list in time to allow forward planning.
@anchor{Criteria for Point Releases}
@subsection Criteria for Point Releases
Changes that match the following criteria are valid candidates for
inclusion into a point release:
@enumerate
@item
Fixes a security issue, preferably identified by a @strong{CVE
number} issued by @url{http://cve.mitre.org/}.
@item
Fixes a documented bug in @url{https://trac.ffmpeg.org}.
@item
Improves the included documentation.
@item
Retains both source code and binary compatibility with previous
point releases of the same release branch.
@end enumerate
The order for checking the rules is (1 OR 2 OR 3) AND 4.
@subsection Release Checklist
The release process involves the following steps:
@enumerate
@item
Ensure that the @file{RELEASE} file contains the version number for
the upcoming release.
@item
Add the release at @url{https://trac.ffmpeg.org/admin/ticket/versions}.
@item
Announce the intent to do a release to the mailing list.
@item
Make sure all relevant security fixes have been backported. See
@url{https://ffmpeg.org/security.html}.
@item
Ensure that the FATE regression suite still passes in the release
branch on at least @strong{i386} and @strong{amd64}
(cf. @ref{Regression tests}).
@item
Prepare the release tarballs in @code{bz2} and @code{gz} formats, and
supplementing files that contain @code{gpg} signatures
@item
Publish the tarballs at @url{http://ffmpeg.org/releases}. Create and
push an annotated tag in the form @code{nX}, with @code{X}
containing the version number.
@item
Propose and send a patch to the @strong{ffmpeg-devel} mailing list
with a news entry for the website.
@item
Publish the news entry.
@item
Send announcement to the mailing list.
@end enumerate
@bye

View File

@@ -0,0 +1,25 @@
@chapter Device Options
@c man begin DEVICE OPTIONS
The libavdevice library provides the same interface as
libavformat. Namely, an input device is considered like a demuxer, and
an output device like a muxer, and the interface and generic device
options are the same provided by libavformat (see the ffmpeg-formats
manual).
In addition each input or output device may support so-called private
options, which are specific for that component.
Options may be set by specifying -@var{option} @var{value} in the
FFmpeg tools, or by setting the value explicitly in the device
@code{AVFormatContext} options or using the @file{libavutil/opt.h} API
for programmatic use.
@c man end DEVICE OPTIONS
@ifclear config-writeonly
@include indevs.texi
@end ifclear
@ifclear config-readonly
@include outdevs.texi
@end ifclear

View File

@@ -0,0 +1,21 @@
#!/bin/sh
SRC_PATH="${1}"
DOXYFILE="${2}"
DOXYGEN="${3}"
shift 3
if [ -e "$SRC_PATH/VERSION" ]; then
VERSION=`cat "$SRC_PATH/VERSION"`
else
VERSION=`cd "$SRC_PATH"; git describe`
fi
$DOXYGEN - <<EOF
@INCLUDE = ${DOXYFILE}
INPUT = $@
EXAMPLE_PATH = ${SRC_PATH}/doc/examples
HTML_TIMESTAMP = NO
PROJECT_NUMBER = $VERSION
EOF

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,174 @@
The following table lists most error codes found in various operating
systems supported by FFmpeg.
OS
Code Std F LBMWwb Text (YMMV)
E2BIG POSIX ++++++ Argument list too long
EACCES POSIX ++++++ Permission denied
EADDRINUSE POSIX +++..+ Address in use
EADDRNOTAVAIL POSIX +++..+ Cannot assign requested address
EADV +..... Advertise error
EAFNOSUPPORT POSIX +++..+ Address family not supported
EAGAIN POSIX + ++++++ Resource temporarily unavailable
EALREADY POSIX +++..+ Operation already in progress
EAUTH .++... Authentication error
EBADARCH ..+... Bad CPU type in executable
EBADE +..... Invalid exchange
EBADEXEC ..+... Bad executable
EBADF POSIX ++++++ Bad file descriptor
EBADFD +..... File descriptor in bad state
EBADMACHO ..+... Malformed Macho file
EBADMSG POSIX ++4... Bad message
EBADR +..... Invalid request descriptor
EBADRPC .++... RPC struct is bad
EBADRQC +..... Invalid request code
EBADSLT +..... Invalid slot
EBFONT +..... Bad font file format
EBUSY POSIX - ++++++ Device or resource busy
ECANCELED POSIX +++... Operation canceled
ECHILD POSIX ++++++ No child processes
ECHRNG +..... Channel number out of range
ECOMM +..... Communication error on send
ECONNABORTED POSIX +++..+ Software caused connection abort
ECONNREFUSED POSIX - +++ss+ Connection refused
ECONNRESET POSIX +++..+ Connection reset
EDEADLK POSIX ++++++ Resource deadlock avoided
EDEADLOCK +..++. File locking deadlock error
EDESTADDRREQ POSIX +++... Destination address required
EDEVERR ..+... Device error
EDOM C89 - ++++++ Numerical argument out of domain
EDOOFUS .F.... Programming error
EDOTDOT +..... RFS specific error
EDQUOT POSIX +++... Disc quota exceeded
EEXIST POSIX ++++++ File exists
EFAULT POSIX - ++++++ Bad address
EFBIG POSIX - ++++++ File too large
EFTYPE .++... Inappropriate file type or format
EHOSTDOWN +++... Host is down
EHOSTUNREACH POSIX +++..+ No route to host
EHWPOISON +..... Memory page has hardware error
EIDRM POSIX +++... Identifier removed
EILSEQ C99 ++++++ Illegal byte sequence
EINPROGRESS POSIX - +++ss+ Operation in progress
EINTR POSIX - ++++++ Interrupted system call
EINVAL POSIX + ++++++ Invalid argument
EIO POSIX + ++++++ I/O error
EISCONN POSIX +++..+ Socket is already connected
EISDIR POSIX ++++++ Is a directory
EISNAM +..... Is a named type file
EKEYEXPIRED +..... Key has expired
EKEYREJECTED +..... Key was rejected by service
EKEYREVOKED +..... Key has been revoked
EL2HLT +..... Level 2 halted
EL2NSYNC +..... Level 2 not synchronized
EL3HLT +..... Level 3 halted
EL3RST +..... Level 3 reset
ELIBACC +..... Can not access a needed shared library
ELIBBAD +..... Accessing a corrupted shared library
ELIBEXEC +..... Cannot exec a shared library directly
ELIBMAX +..... Too many shared libraries
ELIBSCN +..... .lib section in a.out corrupted
ELNRNG +..... Link number out of range
ELOOP POSIX +++..+ Too many levels of symbolic links
EMEDIUMTYPE +..... Wrong medium type
EMFILE POSIX ++++++ Too many open files
EMLINK POSIX ++++++ Too many links
EMSGSIZE POSIX +++..+ Message too long
EMULTIHOP POSIX ++4... Multihop attempted
ENAMETOOLONG POSIX - ++++++ Filen ame too long
ENAVAIL +..... No XENIX semaphores available
ENEEDAUTH .++... Need authenticator
ENETDOWN POSIX +++..+ Network is down
ENETRESET SUSv3 +++..+ Network dropped connection on reset
ENETUNREACH POSIX +++..+ Network unreachable
ENFILE POSIX ++++++ Too many open files in system
ENOANO +..... No anode
ENOATTR .++... Attribute not found
ENOBUFS POSIX - +++..+ No buffer space available
ENOCSI +..... No CSI structure available
ENODATA XSR +N4... No message available
ENODEV POSIX - ++++++ No such device
ENOENT POSIX - ++++++ No such file or directory
ENOEXEC POSIX ++++++ Exec format error
ENOFILE ...++. No such file or directory
ENOKEY +..... Required key not available
ENOLCK POSIX ++++++ No locks available
ENOLINK POSIX ++4... Link has been severed
ENOMEDIUM +..... No medium found
ENOMEM POSIX ++++++ Not enough space
ENOMSG POSIX +++..+ No message of desired type
ENONET +..... Machine is not on the network
ENOPKG +..... Package not installed
ENOPROTOOPT POSIX +++..+ Protocol not available
ENOSPC POSIX ++++++ No space left on device
ENOSR XSR +N4... No STREAM resources
ENOSTR XSR +N4... Not a STREAM
ENOSYS POSIX + ++++++ Function not implemented
ENOTBLK +++... Block device required
ENOTCONN POSIX +++..+ Socket is not connected
ENOTDIR POSIX ++++++ Not a directory
ENOTEMPTY POSIX ++++++ Directory not empty
ENOTNAM +..... Not a XENIX named type file
ENOTRECOVERABLE SUSv4 - +..... State not recoverable
ENOTSOCK POSIX +++..+ Socket operation on non-socket
ENOTSUP POSIX +++... Operation not supported
ENOTTY POSIX ++++++ Inappropriate I/O control operation
ENOTUNIQ +..... Name not unique on network
ENXIO POSIX ++++++ No such device or address
EOPNOTSUPP POSIX +++..+ Operation not supported (on socket)
EOVERFLOW POSIX +++..+ Value too large to be stored in data type
EOWNERDEAD SUSv4 +..... Owner died
EPERM POSIX - ++++++ Operation not permitted
EPFNOSUPPORT +++..+ Protocol family not supported
EPIPE POSIX - ++++++ Broken pipe
EPROCLIM .++... Too many processes
EPROCUNAVAIL .++... Bad procedure for program
EPROGMISMATCH .++... Program version wrong
EPROGUNAVAIL .++... RPC prog. not avail
EPROTO POSIX ++4... Protocol error
EPROTONOSUPPORT POSIX - +++ss+ Protocol not supported
EPROTOTYPE POSIX +++..+ Protocol wrong type for socket
EPWROFF ..+... Device power is off
ERANGE C89 - ++++++ Result too large
EREMCHG +..... Remote address changed
EREMOTE +++... Object is remote
EREMOTEIO +..... Remote I/O error
ERESTART +..... Interrupted system call should be restarted
ERFKILL +..... Operation not possible due to RF-kill
EROFS POSIX ++++++ Read-only file system
ERPCMISMATCH .++... RPC version wrong
ESHLIBVERS ..+... Shared library version mismatch
ESHUTDOWN +++..+ Cannot send after socket shutdown
ESOCKTNOSUPPORT +++... Socket type not supported
ESPIPE POSIX ++++++ Illegal seek
ESRCH POSIX ++++++ No such process
ESRMNT +..... Srmount error
ESTALE POSIX +++..+ Stale NFS file handle
ESTRPIPE +..... Streams pipe error
ETIME XSR +N4... Stream ioctl timeout
ETIMEDOUT POSIX - +++ss+ Connection timed out
ETOOMANYREFS +++... Too many references: cannot splice
ETXTBSY POSIX +++... Text file busy
EUCLEAN +..... Structure needs cleaning
EUNATCH +..... Protocol driver not attached
EUSERS +++... Too many users
EWOULDBLOCK POSIX +++..+ Operation would block
EXDEV POSIX ++++++ Cross-device link
EXFULL +..... Exchange full
Notations:
F: used in FFmpeg (-: a few times, +: a lot)
SUSv3: Single Unix Specification, version 3
SUSv4: Single Unix Specification, version 4
XSR: XSI STREAMS (obsolete)
OS: availability on some supported operating systems
L: GNU/Linux
B: BSD (F: FreeBSD, N: NetBSD)
M: MacOS X
W: Microsoft Windows (s: emulated with winsock, see libavformat/network.h)
w: Mingw32 (3.17) and Mingw64 (2.0.1)
b: BeOS

View File

@@ -0,0 +1,46 @@
# use pkg-config for getting CFLAGS and LDLIBS
FFMPEG_LIBS= libavdevice \
libavformat \
libavfilter \
libavcodec \
libswresample \
libswscale \
libavutil \
CFLAGS += -Wall -g
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
EXAMPLES= avio_dir_cmd \
avio_reading \
decoding_encoding \
demuxing_decoding \
extract_mvs \
filtering_video \
filtering_audio \
http_multiclient \
metadata \
muxing \
remuxing \
resampling_audio \
scaling_video \
transcode_aac \
transcoding \
OBJS=$(addsuffix .o,$(EXAMPLES))
# the following examples make explicit use of the math library
avcodec: LDLIBS += -lm
decoding_encoding: LDLIBS += -lm
muxing: LDLIBS += -lm
resampling_audio: LDLIBS += -lm
.phony: all clean-test clean
all: $(OBJS) $(EXAMPLES)
clean-test:
$(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg
clean: clean-test
$(RM) $(EXAMPLES) $(OBJS)

View File

@@ -0,0 +1,23 @@
FFmpeg examples README
----------------------
Both following use cases rely on pkg-config and make, thus make sure
that you have them installed and working on your system.
Method 1: build the installed examples in a generic read/write user directory
Copy to a read/write user directory and just use "make", it will link
to the libraries on your system, assuming the PKG_CONFIG_PATH is
correctly configured.
Method 2: build the examples in-tree
Assuming you are in the source FFmpeg checkout directory, you need to build
FFmpeg (no need to make install in any prefix). Then just run "make examples".
This will build the examples using the FFmpeg build system. You can clean those
examples using "make examplesclean"
If you want to try the dedicated Makefile examples (to emulate the first
method), go into doc/examples and run a command such as
PKG_CONFIG_PATH=pc-uninstalled make.

View File

@@ -0,0 +1,180 @@
/*
* Copyright (c) 2014 Lukasz Marek
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
static const char *type_string(int type)
{
switch (type) {
case AVIO_ENTRY_DIRECTORY:
return "<DIR>";
case AVIO_ENTRY_FILE:
return "<FILE>";
case AVIO_ENTRY_BLOCK_DEVICE:
return "<BLOCK DEVICE>";
case AVIO_ENTRY_CHARACTER_DEVICE:
return "<CHARACTER DEVICE>";
case AVIO_ENTRY_NAMED_PIPE:
return "<PIPE>";
case AVIO_ENTRY_SYMBOLIC_LINK:
return "<LINK>";
case AVIO_ENTRY_SOCKET:
return "<SOCKET>";
case AVIO_ENTRY_SERVER:
return "<SERVER>";
case AVIO_ENTRY_SHARE:
return "<SHARE>";
case AVIO_ENTRY_WORKGROUP:
return "<WORKGROUP>";
case AVIO_ENTRY_UNKNOWN:
default:
break;
}
return "<UNKNOWN>";
}
static int list_op(const char *input_dir)
{
AVIODirEntry *entry = NULL;
AVIODirContext *ctx = NULL;
int cnt, ret;
char filemode[4], uid_and_gid[20];
if ((ret = avio_open_dir(&ctx, input_dir, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open directory: %s.\n", av_err2str(ret));
goto fail;
}
cnt = 0;
for (;;) {
if ((ret = avio_read_dir(ctx, &entry)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot list directory: %s.\n", av_err2str(ret));
goto fail;
}
if (!entry)
break;
if (entry->filemode == -1) {
snprintf(filemode, 4, "???");
} else {
snprintf(filemode, 4, "%3"PRIo64, entry->filemode);
}
snprintf(uid_and_gid, 20, "%"PRId64"(%"PRId64")", entry->user_id, entry->group_id);
if (cnt == 0)
av_log(NULL, AV_LOG_INFO, "%-9s %12s %30s %10s %s %16s %16s %16s\n",
"TYPE", "SIZE", "NAME", "UID(GID)", "UGO", "MODIFIED",
"ACCESSED", "STATUS_CHANGED");
av_log(NULL, AV_LOG_INFO, "%-9s %12"PRId64" %30s %10s %s %16"PRId64" %16"PRId64" %16"PRId64"\n",
type_string(entry->type),
entry->size,
entry->name,
uid_and_gid,
filemode,
entry->modification_timestamp,
entry->access_timestamp,
entry->status_change_timestamp);
avio_free_directory_entry(&entry);
cnt++;
};
fail:
avio_close_dir(&ctx);
return ret;
}
static int del_op(const char *url)
{
int ret = avpriv_io_delete(url);
if (ret < 0)
av_log(NULL, AV_LOG_ERROR, "Cannot delete '%s': %s.\n", url, av_err2str(ret));
return ret;
}
static int move_op(const char *src, const char *dst)
{
int ret = avpriv_io_move(src, dst);
if (ret < 0)
av_log(NULL, AV_LOG_ERROR, "Cannot move '%s' into '%s': %s.\n", src, dst, av_err2str(ret));
return ret;
}
static void usage(const char *program_name)
{
fprintf(stderr, "usage: %s OPERATION entry1 [entry2]\n"
"API example program to show how to manipulate resources "
"accessed through AVIOContext.\n"
"OPERATIONS:\n"
"list list content of the directory\n"
"move rename content in directory\n"
"del delete content in directory\n",
program_name);
}
int main(int argc, char *argv[])
{
const char *op = NULL;
int ret;
av_log_set_level(AV_LOG_DEBUG);
if (argc < 2) {
usage(argv[0]);
return 1;
}
/* register codecs and formats and other lavf/lavc components*/
av_register_all();
avformat_network_init();
op = argv[1];
if (strcmp(op, "list") == 0) {
if (argc < 3) {
av_log(NULL, AV_LOG_INFO, "Missing argument for list operation.\n");
ret = AVERROR(EINVAL);
} else {
ret = list_op(argv[2]);
}
} else if (strcmp(op, "del") == 0) {
if (argc < 3) {
av_log(NULL, AV_LOG_INFO, "Missing argument for del operation.\n");
ret = AVERROR(EINVAL);
} else {
ret = del_op(argv[2]);
}
} else if (strcmp(op, "move") == 0) {
if (argc < 4) {
av_log(NULL, AV_LOG_INFO, "Missing argument for move operation.\n");
ret = AVERROR(EINVAL);
} else {
ret = move_op(argv[2], argv[3]);
}
} else {
av_log(NULL, AV_LOG_INFO, "Invalid operation %s\n", op);
ret = AVERROR(EINVAL);
}
avformat_network_deinit();
return ret < 0 ? 1 : 0;
}

View File

@@ -0,0 +1,134 @@
/*
* Copyright (c) 2014 Stefano Sabatini
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* libavformat AVIOContext API example.
*
* Make libavformat demuxer access media content through a custom
* AVIOContext read callback.
* @example avio_reading.c
*/
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavformat/avio.h>
#include <libavutil/file.h>
struct buffer_data {
uint8_t *ptr;
size_t size; ///< size left in the buffer
};
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
{
struct buffer_data *bd = (struct buffer_data *)opaque;
buf_size = FFMIN(buf_size, bd->size);
printf("ptr:%p size:%zu\n", bd->ptr, bd->size);
/* copy internal buffer data to buf */
memcpy(buf, bd->ptr, buf_size);
bd->ptr += buf_size;
bd->size -= buf_size;
return buf_size;
}
int main(int argc, char *argv[])
{
AVFormatContext *fmt_ctx = NULL;
AVIOContext *avio_ctx = NULL;
uint8_t *buffer = NULL, *avio_ctx_buffer = NULL;
size_t buffer_size, avio_ctx_buffer_size = 4096;
char *input_filename = NULL;
int ret = 0;
struct buffer_data bd = { 0 };
if (argc != 2) {
fprintf(stderr, "usage: %s input_file\n"
"API example program to show how to read from a custom buffer "
"accessed through AVIOContext.\n", argv[0]);
return 1;
}
input_filename = argv[1];
/* register codecs and formats and other lavf/lavc components*/
av_register_all();
/* slurp file content into buffer */
ret = av_file_map(input_filename, &buffer, &buffer_size, 0, NULL);
if (ret < 0)
goto end;
/* fill opaque structure used by the AVIOContext read callback */
bd.ptr = buffer;
bd.size = buffer_size;
if (!(fmt_ctx = avformat_alloc_context())) {
ret = AVERROR(ENOMEM);
goto end;
}
avio_ctx_buffer = av_malloc(avio_ctx_buffer_size);
if (!avio_ctx_buffer) {
ret = AVERROR(ENOMEM);
goto end;
}
avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size,
0, &bd, &read_packet, NULL, NULL);
if (!avio_ctx) {
ret = AVERROR(ENOMEM);
goto end;
}
fmt_ctx->pb = avio_ctx;
ret = avformat_open_input(&fmt_ctx, NULL, NULL, NULL);
if (ret < 0) {
fprintf(stderr, "Could not open input\n");
goto end;
}
ret = avformat_find_stream_info(fmt_ctx, NULL);
if (ret < 0) {
fprintf(stderr, "Could not find stream information\n");
goto end;
}
av_dump_format(fmt_ctx, 0, input_filename, 0);
end:
avformat_close_input(&fmt_ctx);
/* note: the internal buffer could have changed, and be != avio_ctx_buffer */
if (avio_ctx) {
av_freep(&avio_ctx->buffer);
av_freep(&avio_ctx);
}
av_file_unmap(buffer, buffer_size);
if (ret < 0) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
return 1;
}
return 0;
}

View File

@@ -0,0 +1,665 @@
/*
* Copyright (c) 2001 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* libavcodec API use example.
*
* @example decoding_encoding.c
* Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
* not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
* format handling
*/
#include <math.h>
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
#define INBUF_SIZE 4096
#define AUDIO_INBUF_SIZE 20480
#define AUDIO_REFILL_THRESH 4096
/* check that a given sample format is supported by the encoder */
static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
{
const enum AVSampleFormat *p = codec->sample_fmts;
while (*p != AV_SAMPLE_FMT_NONE) {
if (*p == sample_fmt)
return 1;
p++;
}
return 0;
}
/* just pick the highest supported samplerate */
static int select_sample_rate(AVCodec *codec)
{
const int *p;
int best_samplerate = 0;
if (!codec->supported_samplerates)
return 44100;
p = codec->supported_samplerates;
while (*p) {
best_samplerate = FFMAX(*p, best_samplerate);
p++;
}
return best_samplerate;
}
/* select layout with the highest channel count */
static int select_channel_layout(AVCodec *codec)
{
const uint64_t *p;
uint64_t best_ch_layout = 0;
int best_nb_channels = 0;
if (!codec->channel_layouts)
return AV_CH_LAYOUT_STEREO;
p = codec->channel_layouts;
while (*p) {
int nb_channels = av_get_channel_layout_nb_channels(*p);
if (nb_channels > best_nb_channels) {
best_ch_layout = *p;
best_nb_channels = nb_channels;
}
p++;
}
return best_ch_layout;
}
/*
* Audio encoding example
*/
static void audio_encode_example(const char *filename)
{
AVCodec *codec;
AVCodecContext *c= NULL;
AVFrame *frame;
AVPacket pkt;
int i, j, k, ret, got_output;
int buffer_size;
FILE *f;
uint16_t *samples;
float t, tincr;
printf("Encode audio file %s\n", filename);
/* find the MP2 encoder */
codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate audio codec context\n");
exit(1);
}
/* put sample parameters */
c->bit_rate = 64000;
/* check that the encoder supports s16 pcm input */
c->sample_fmt = AV_SAMPLE_FMT_S16;
if (!check_sample_fmt(codec, c->sample_fmt)) {
fprintf(stderr, "Encoder does not support sample format %s",
av_get_sample_fmt_name(c->sample_fmt));
exit(1);
}
/* select other audio parameters supported by the encoder */
c->sample_rate = select_sample_rate(codec);
c->channel_layout = select_channel_layout(codec);
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(filename, "wb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
/* frame containing input raw audio */
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
frame->nb_samples = c->frame_size;
frame->format = c->sample_fmt;
frame->channel_layout = c->channel_layout;
/* the codec gives us the frame size, in samples,
* we calculate the size of the samples buffer in bytes */
buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
c->sample_fmt, 0);
if (buffer_size < 0) {
fprintf(stderr, "Could not get sample buffer size\n");
exit(1);
}
samples = av_malloc(buffer_size);
if (!samples) {
fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
buffer_size);
exit(1);
}
/* setup the data pointers in the AVFrame */
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
(const uint8_t*)samples, buffer_size, 0);
if (ret < 0) {
fprintf(stderr, "Could not setup audio frame\n");
exit(1);
}
/* encode a single tone sound */
t = 0;
tincr = 2 * M_PI * 440.0 / c->sample_rate;
for (i = 0; i < 200; i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
for (j = 0; j < c->frame_size; j++) {
samples[2*j] = (int)(sin(t) * 10000);
for (k = 1; k < c->channels; k++)
samples[2*j + k] = samples[2*j];
t += tincr;
}
/* encode the samples */
ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding audio frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
/* get the delayed frames */
for (got_output = 1; got_output; i++) {
ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
fclose(f);
av_freep(&samples);
av_frame_free(&frame);
avcodec_close(c);
av_free(c);
}
/*
* Audio decoding.
*/
static void audio_decode_example(const char *outfilename, const char *filename)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int len;
FILE *f, *outfile;
uint8_t inbuf[AUDIO_INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
AVPacket avpkt;
AVFrame *decoded_frame = NULL;
av_init_packet(&avpkt);
printf("Decode audio file %s to %s\n", filename, outfilename);
/* find the mpeg audio decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate audio codec context\n");
exit(1);
}
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
outfile = fopen(outfilename, "wb");
if (!outfile) {
av_free(c);
exit(1);
}
/* decode until eof */
avpkt.data = inbuf;
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
while (avpkt.size > 0) {
int i, ch;
int got_frame = 0;
if (!decoded_frame) {
if (!(decoded_frame = av_frame_alloc())) {
fprintf(stderr, "Could not allocate audio frame\n");
exit(1);
}
}
len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
if (len < 0) {
fprintf(stderr, "Error while decoding\n");
exit(1);
}
if (got_frame) {
/* if a frame has been decoded, output it */
int data_size = av_get_bytes_per_sample(c->sample_fmt);
if (data_size < 0) {
/* This should not occur, checking just for paranoia */
fprintf(stderr, "Failed to calculate data size\n");
exit(1);
}
for (i=0; i<decoded_frame->nb_samples; i++)
for (ch=0; ch<c->channels; ch++)
fwrite(decoded_frame->data[ch] + data_size*i, 1, data_size, outfile);
}
avpkt.size -= len;
avpkt.data += len;
avpkt.dts =
avpkt.pts = AV_NOPTS_VALUE;
if (avpkt.size < AUDIO_REFILL_THRESH) {
/* Refill the input buffer, to avoid trying to decode
* incomplete frames. Instead of this, one could also use
* a parser, or use a proper container format through
* libavformat. */
memmove(inbuf, avpkt.data, avpkt.size);
avpkt.data = inbuf;
len = fread(avpkt.data + avpkt.size, 1,
AUDIO_INBUF_SIZE - avpkt.size, f);
if (len > 0)
avpkt.size += len;
}
}
fclose(outfile);
fclose(f);
avcodec_close(c);
av_free(c);
av_frame_free(&decoded_frame);
}
/*
* Video encoding example
*/
static void video_encode_example(const char *filename, int codec_id)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int i, ret, x, y, got_output;
FILE *f;
AVFrame *frame;
AVPacket pkt;
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
printf("Encode video file %s\n", filename);
/* find the mpeg1 video encoder */
codec = avcodec_find_encoder(codec_id);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
/* put sample parameters */
c->bit_rate = 400000;
/* resolution must be a multiple of two */
c->width = 352;
c->height = 288;
/* frames per second */
c->time_base = (AVRational){1,25};
/* emit one intra frame every ten frames
* check frame pict_type before passing frame
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
* then gop_size is ignored and the output of encoder
* will always be I frame irrespective to gop_size
*/
c->gop_size = 10;
c->max_b_frames = 1;
c->pix_fmt = AV_PIX_FMT_YUV420P;
if (codec_id == AV_CODEC_ID_H264)
av_opt_set(c->priv_data, "preset", "slow", 0);
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(filename, "wb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame->format = c->pix_fmt;
frame->width = c->width;
frame->height = c->height;
/* the image can be allocated by any means and av_image_alloc() is
* just the most convenient way if av_malloc() is to be used */
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
c->pix_fmt, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw picture buffer\n");
exit(1);
}
/* encode 1 second of video */
for (i = 0; i < 25; i++) {
av_init_packet(&pkt);
pkt.data = NULL; // packet data will be allocated by the encoder
pkt.size = 0;
fflush(stdout);
/* prepare a dummy image */
/* Y */
for (y = 0; y < c->height; y++) {
for (x = 0; x < c->width; x++) {
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
}
}
/* Cb and Cr */
for (y = 0; y < c->height/2; y++) {
for (x = 0; x < c->width/2; x++) {
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
}
}
frame->pts = i;
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
/* get the delayed frames */
for (got_output = 1; got_output; i++) {
fflush(stdout);
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
if (ret < 0) {
fprintf(stderr, "Error encoding frame\n");
exit(1);
}
if (got_output) {
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
fwrite(pkt.data, 1, pkt.size, f);
av_free_packet(&pkt);
}
}
/* add sequence end code to have a real mpeg file */
fwrite(endcode, 1, sizeof(endcode), f);
fclose(f);
avcodec_close(c);
av_free(c);
av_freep(&frame->data[0]);
av_frame_free(&frame);
printf("\n");
}
/*
* Video decoding example
*/
static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
char *filename)
{
FILE *f;
int i;
f = fopen(filename,"w");
fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
for (i = 0; i < ysize; i++)
fwrite(buf + i * wrap, 1, xsize, f);
fclose(f);
}
static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
{
int len, got_frame;
char buf[1024];
len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
if (len < 0) {
fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
return len;
}
if (got_frame) {
printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
fflush(stdout);
/* the picture is allocated by the decoder, no need to free it */
snprintf(buf, sizeof(buf), outfilename, *frame_count);
pgm_save(frame->data[0], frame->linesize[0],
frame->width, frame->height, buf);
(*frame_count)++;
}
if (pkt->data) {
pkt->size -= len;
pkt->data += len;
}
return 0;
}
static void video_decode_example(const char *outfilename, const char *filename)
{
AVCodec *codec;
AVCodecContext *c= NULL;
int frame_count;
FILE *f;
AVFrame *frame;
uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
AVPacket avpkt;
av_init_packet(&avpkt);
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
printf("Decode video file %s to %s\n", filename, outfilename);
/* find the mpeg1 video decoder */
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
if (!codec) {
fprintf(stderr, "Codec not found\n");
exit(1);
}
c = avcodec_alloc_context3(codec);
if (!c) {
fprintf(stderr, "Could not allocate video codec context\n");
exit(1);
}
if (codec->capabilities & AV_CODEC_CAP_TRUNCATED)
c->flags |= AV_CODEC_FLAG_TRUNCATED; // we do not send complete frames
/* For some codecs, such as msmpeg4 and mpeg4, width and height
MUST be initialized there because this information is not
available in the bitstream. */
/* open it */
if (avcodec_open2(c, codec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
exit(1);
}
f = fopen(filename, "rb");
if (!f) {
fprintf(stderr, "Could not open %s\n", filename);
exit(1);
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
frame_count = 0;
for (;;) {
avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
if (avpkt.size == 0)
break;
/* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
and this is the only method to use them because you cannot
know the compressed data size before analysing it.
BUT some other codecs (msmpeg4, mpeg4) are inherently frame
based, so you must call them with all the data for one
frame exactly. You must also initialize 'width' and
'height' before initializing them. */
/* NOTE2: some codecs allow the raw parameters (frame size,
sample rate) to be changed at any frame. We handle this, so
you should also take care of it */
/* here, we use a stream based decoder (mpeg1video), so we
feed decoder and see if it could decode a frame */
avpkt.data = inbuf;
while (avpkt.size > 0)
if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
exit(1);
}
/* some codecs, such as MPEG, transmit the I and P frame with a
latency of one frame. You must do the following to have a
chance to get the last frame of the video */
avpkt.data = NULL;
avpkt.size = 0;
decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);
fclose(f);
avcodec_close(c);
av_free(c);
av_frame_free(&frame);
printf("\n");
}
int main(int argc, char **argv)
{
const char *output_type;
/* register all the codecs */
avcodec_register_all();
if (argc < 2) {
printf("usage: %s output_type\n"
"API example program to decode/encode a media stream with libavcodec.\n"
"This program generates a synthetic stream and encodes it to a file\n"
"named test.h264, test.mp2 or test.mpg depending on output_type.\n"
"The encoded stream is then decoded and written to a raw data output.\n"
"output_type must be chosen between 'h264', 'mp2', 'mpg'.\n",
argv[0]);
return 1;
}
output_type = argv[1];
if (!strcmp(output_type, "h264")) {
video_encode_example("test.h264", AV_CODEC_ID_H264);
} else if (!strcmp(output_type, "mp2")) {
audio_encode_example("test.mp2");
audio_decode_example("test.pcm", "test.mp2");
} else if (!strcmp(output_type, "mpg")) {
video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
video_decode_example("test%02d.pgm", "test.mpg");
} else {
fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n",
output_type);
return 1;
}
return 0;
}

View File

@@ -0,0 +1,407 @@
/*
* Copyright (c) 2012 Stefano Sabatini
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* Demuxing and decoding example.
*
* Show how to use the libavformat and libavcodec API to demux and
* decode audio and video data.
* @example demuxing_decoding.c
*/
#include <libavutil/imgutils.h>
#include <libavutil/samplefmt.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
static AVFormatContext *fmt_ctx = NULL;
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
static int width, height;
static enum AVPixelFormat pix_fmt;
static AVStream *video_stream = NULL, *audio_stream = NULL;
static const char *src_filename = NULL;
static const char *video_dst_filename = NULL;
static const char *audio_dst_filename = NULL;
static FILE *video_dst_file = NULL;
static FILE *audio_dst_file = NULL;
static uint8_t *video_dst_data[4] = {NULL};
static int video_dst_linesize[4];
static int video_dst_bufsize;
static int video_stream_idx = -1, audio_stream_idx = -1;
static AVFrame *frame = NULL;
static AVPacket pkt;
static int video_frame_count = 0;
static int audio_frame_count = 0;
/* The different ways of decoding and managing data memory. You are not
* supposed to support all the modes in your application but pick the one most
* appropriate to your needs. Look for the use of api_mode in this example to
* see what are the differences of API usage between them */
enum {
API_MODE_OLD = 0, /* old method, deprecated */
API_MODE_NEW_API_REF_COUNT = 1, /* new method, using the frame reference counting */
API_MODE_NEW_API_NO_REF_COUNT = 2, /* new method, without reference counting */
};
static int api_mode = API_MODE_OLD;
static int decode_packet(int *got_frame, int cached)
{
int ret = 0;
int decoded = pkt.size;
*got_frame = 0;
if (pkt.stream_index == video_stream_idx) {
/* decode video frame */
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
if (ret < 0) {
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
return ret;
}
if (*got_frame) {
if (frame->width != width || frame->height != height ||
frame->format != pix_fmt) {
/* To handle this change, one could call av_image_alloc again and
* decode the following frames into another rawvideo file. */
fprintf(stderr, "Error: Width, height and pixel format have to be "
"constant in a rawvideo file, but the width, height or "
"pixel format of the input video changed:\n"
"old: width = %d, height = %d, format = %s\n"
"new: width = %d, height = %d, format = %s\n",
width, height, av_get_pix_fmt_name(pix_fmt),
frame->width, frame->height,
av_get_pix_fmt_name(frame->format));
return -1;
}
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
cached ? "(cached)" : "",
video_frame_count++, frame->coded_picture_number,
av_ts2timestr(frame->pts, &video_dec_ctx->time_base));
/* copy decoded frame to destination buffer:
* this is required since rawvideo expects non aligned data */
av_image_copy(video_dst_data, video_dst_linesize,
(const uint8_t **)(frame->data), frame->linesize,
pix_fmt, width, height);
/* write to rawvideo file */
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
}
} else if (pkt.stream_index == audio_stream_idx) {
/* decode audio frame */
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
if (ret < 0) {
fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
return ret;
}
/* Some audio decoders decode only part of the packet, and have to be
* called again with the remainder of the packet data.
* Sample: fate-suite/lossless-audio/luckynight-partial.shn
* Also, some decoders might over-read the packet. */
decoded = FFMIN(ret, pkt.size);
if (*got_frame) {
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
cached ? "(cached)" : "",
audio_frame_count++, frame->nb_samples,
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
/* Write the raw audio data samples of the first plane. This works
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
* most audio decoders output planar audio, which uses a separate
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
* In other words, this code will write only the first audio channel
* in these cases.
* You should use libswresample or libavfilter to convert the frame
* to packed data. */
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
}
}
/* If we use the new API with reference counting, we own the data and need
* to de-reference it when we don't use it anymore */
if (*got_frame && api_mode == API_MODE_NEW_API_REF_COUNT)
av_frame_unref(frame);
return decoded;
}
static int open_codec_context(int *stream_idx,
AVFormatContext *fmt_ctx, enum AVMediaType type)
{
int ret, stream_index;
AVStream *st;
AVCodecContext *dec_ctx = NULL;
AVCodec *dec = NULL;
AVDictionary *opts = NULL;
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
if (ret < 0) {
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
av_get_media_type_string(type), src_filename);
return ret;
} else {
stream_index = ret;
st = fmt_ctx->streams[stream_index];
/* find decoder for the stream */
dec_ctx = st->codec;
dec = avcodec_find_decoder(dec_ctx->codec_id);
if (!dec) {
fprintf(stderr, "Failed to find %s codec\n",
av_get_media_type_string(type));
return AVERROR(EINVAL);
}
/* Init the decoders, with or without reference counting */
if (api_mode == API_MODE_NEW_API_REF_COUNT)
av_dict_set(&opts, "refcounted_frames", "1", 0);
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
fprintf(stderr, "Failed to open %s codec\n",
av_get_media_type_string(type));
return ret;
}
*stream_idx = stream_index;
}
return 0;
}
static int get_format_from_sample_fmt(const char **fmt,
enum AVSampleFormat sample_fmt)
{
int i;
struct sample_fmt_entry {
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
} sample_fmt_entries[] = {
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
};
*fmt = NULL;
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
if (sample_fmt == entry->sample_fmt) {
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
return 0;
}
}
fprintf(stderr,
"sample format %s is not supported as output format\n",
av_get_sample_fmt_name(sample_fmt));
return -1;
}
int main (int argc, char **argv)
{
int ret = 0, got_frame;
if (argc != 4 && argc != 5) {
fprintf(stderr, "usage: %s [-refcount=<old|new_norefcount|new_refcount>] "
"input_file video_output_file audio_output_file\n"
"API example program to show how to read frames from an input file.\n"
"This program reads frames from a file, decodes them, and writes decoded\n"
"video frames to a rawvideo file named video_output_file, and decoded\n"
"audio frames to a rawaudio file named audio_output_file.\n\n"
"If the -refcount option is specified, the program use the\n"
"reference counting frame system which allows keeping a copy of\n"
"the data for longer than one decode call. If unset, it's using\n"
"the classic old method.\n"
"\n", argv[0]);
exit(1);
}
if (argc == 5) {
const char *mode = argv[1] + strlen("-refcount=");
if (!strcmp(mode, "old")) api_mode = API_MODE_OLD;
else if (!strcmp(mode, "new_norefcount")) api_mode = API_MODE_NEW_API_NO_REF_COUNT;
else if (!strcmp(mode, "new_refcount")) api_mode = API_MODE_NEW_API_REF_COUNT;
else {
fprintf(stderr, "unknow mode '%s'\n", mode);
exit(1);
}
argv++;
}
src_filename = argv[1];
video_dst_filename = argv[2];
audio_dst_filename = argv[3];
/* register all formats and codecs */
av_register_all();
/* open input file, and allocate format context */
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
fprintf(stderr, "Could not open source file %s\n", src_filename);
exit(1);
}
/* retrieve stream information */
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
fprintf(stderr, "Could not find stream information\n");
exit(1);
}
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
video_stream = fmt_ctx->streams[video_stream_idx];
video_dec_ctx = video_stream->codec;
video_dst_file = fopen(video_dst_filename, "wb");
if (!video_dst_file) {
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
ret = 1;
goto end;
}
/* allocate image where the decoded image will be put */
width = video_dec_ctx->width;
height = video_dec_ctx->height;
pix_fmt = video_dec_ctx->pix_fmt;
ret = av_image_alloc(video_dst_data, video_dst_linesize,
width, height, pix_fmt, 1);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw video buffer\n");
goto end;
}
video_dst_bufsize = ret;
}
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
audio_stream = fmt_ctx->streams[audio_stream_idx];
audio_dec_ctx = audio_stream->codec;
audio_dst_file = fopen(audio_dst_filename, "wb");
if (!audio_dst_file) {
fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
ret = 1;
goto end;
}
}
/* dump input information to stderr */
av_dump_format(fmt_ctx, 0, src_filename, 0);
if (!audio_stream && !video_stream) {
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
ret = 1;
goto end;
}
/* When using the new API, you need to use the libavutil/frame.h API, while
* the classic frame management is available in libavcodec */
if (api_mode == API_MODE_OLD)
frame = avcodec_alloc_frame();
else
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate frame\n");
ret = AVERROR(ENOMEM);
goto end;
}
/* initialize packet, set data to NULL, let the demuxer fill it */
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
if (video_stream)
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
if (audio_stream)
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
/* read frames from the file */
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
AVPacket orig_pkt = pkt;
do {
ret = decode_packet(&got_frame, 0);
if (ret < 0)
break;
pkt.data += ret;
pkt.size -= ret;
} while (pkt.size > 0);
av_free_packet(&orig_pkt);
}
/* flush cached frames */
pkt.data = NULL;
pkt.size = 0;
do {
decode_packet(&got_frame, 1);
} while (got_frame);
printf("Demuxing succeeded.\n");
if (video_stream) {
printf("Play the output video file with the command:\n"
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
av_get_pix_fmt_name(pix_fmt), width, height,
video_dst_filename);
}
if (audio_stream) {
enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
int n_channels = audio_dec_ctx->channels;
const char *fmt;
if (av_sample_fmt_is_planar(sfmt)) {
const char *packed = av_get_sample_fmt_name(sfmt);
printf("Warning: the sample format the decoder produced is planar "
"(%s). This example will output the first channel only.\n",
packed ? packed : "?");
sfmt = av_get_packed_sample_fmt(sfmt);
n_channels = 1;
}
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
goto end;
printf("Play the output audio file with the command:\n"
"ffplay -f %s -ac %d -ar %d %s\n",
fmt, n_channels, audio_dec_ctx->sample_rate,
audio_dst_filename);
}
end:
avcodec_close(video_dec_ctx);
avcodec_close(audio_dec_ctx);
avformat_close_input(&fmt_ctx);
if (video_dst_file)
fclose(video_dst_file);
if (audio_dst_file)
fclose(audio_dst_file);
if (api_mode == API_MODE_OLD)
avcodec_free_frame(&frame);
else
av_frame_free(&frame);
av_free(video_dst_data[0]);
return ret < 0;
}

View File

@@ -0,0 +1,185 @@
/*
* Copyright (c) 2012 Stefano Sabatini
* Copyright (c) 2014 Clément Bœsch
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
#include <libavutil/motion_vector.h>
#include <libavformat/avformat.h>
static AVFormatContext *fmt_ctx = NULL;
static AVCodecContext *video_dec_ctx = NULL;
static AVStream *video_stream = NULL;
static const char *src_filename = NULL;
static int video_stream_idx = -1;
static AVFrame *frame = NULL;
static AVPacket pkt;
static int video_frame_count = 0;
static int decode_packet(int *got_frame, int cached)
{
int decoded = pkt.size;
*got_frame = 0;
if (pkt.stream_index == video_stream_idx) {
int ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
if (ret < 0) {
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
return ret;
}
if (*got_frame) {
int i;
AVFrameSideData *sd;
video_frame_count++;
sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
if (sd) {
const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
for (i = 0; i < sd->size / sizeof(*mvs); i++) {
const AVMotionVector *mv = &mvs[i];
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64"\n",
video_frame_count, mv->source,
mv->w, mv->h, mv->src_x, mv->src_y,
mv->dst_x, mv->dst_y, mv->flags);
}
}
}
}
return decoded;
}
static int open_codec_context(int *stream_idx,
AVFormatContext *fmt_ctx, enum AVMediaType type)
{
int ret;
AVStream *st;
AVCodecContext *dec_ctx = NULL;
AVCodec *dec = NULL;
AVDictionary *opts = NULL;
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
if (ret < 0) {
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
av_get_media_type_string(type), src_filename);
return ret;
} else {
*stream_idx = ret;
st = fmt_ctx->streams[*stream_idx];
/* find decoder for the stream */
dec_ctx = st->codec;
dec = avcodec_find_decoder(dec_ctx->codec_id);
if (!dec) {
fprintf(stderr, "Failed to find %s codec\n",
av_get_media_type_string(type));
return AVERROR(EINVAL);
}
/* Init the video decoder */
av_dict_set(&opts, "flags2", "+export_mvs", 0);
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
fprintf(stderr, "Failed to open %s codec\n",
av_get_media_type_string(type));
return ret;
}
}
return 0;
}
int main(int argc, char **argv)
{
int ret = 0, got_frame;
if (argc != 2) {
fprintf(stderr, "Usage: %s <video>\n", argv[0]);
exit(1);
}
src_filename = argv[1];
av_register_all();
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
fprintf(stderr, "Could not open source file %s\n", src_filename);
exit(1);
}
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
fprintf(stderr, "Could not find stream information\n");
exit(1);
}
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
video_stream = fmt_ctx->streams[video_stream_idx];
video_dec_ctx = video_stream->codec;
}
av_dump_format(fmt_ctx, 0, src_filename, 0);
if (!video_stream) {
fprintf(stderr, "Could not find video stream in the input, aborting\n");
ret = 1;
goto end;
}
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Could not allocate frame\n");
ret = AVERROR(ENOMEM);
goto end;
}
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
/* initialize packet, set data to NULL, let the demuxer fill it */
av_init_packet(&pkt);
pkt.data = NULL;
pkt.size = 0;
/* read frames from the file */
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
AVPacket orig_pkt = pkt;
do {
ret = decode_packet(&got_frame, 0);
if (ret < 0)
break;
pkt.data += ret;
pkt.size -= ret;
} while (pkt.size > 0);
av_free_packet(&orig_pkt);
}
/* flush cached frames */
pkt.data = NULL;
pkt.size = 0;
do {
decode_packet(&got_frame, 1);
} while (got_frame);
end:
avcodec_close(video_dec_ctx);
avformat_close_input(&fmt_ctx);
av_frame_free(&frame);
return ret < 0;
}

View File

@@ -0,0 +1,365 @@
/*
* copyright (c) 2013 Andrew Kelley
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* libavfilter API usage example.
*
* @example filter_audio.c
* This example will generate a sine wave audio,
* pass it through a simple filter chain, and then compute the MD5 checksum of
* the output data.
*
* The filter chain it uses is:
* (input) -> abuffer -> volume -> aformat -> abuffersink -> (output)
*
* abuffer: This provides the endpoint where you can feed the decoded samples.
* volume: In this example we hardcode it to 0.90.
* aformat: This converts the samples to the samplefreq, channel layout,
* and sample format required by the audio device.
* abuffersink: This provides the endpoint where you can read the samples after
* they have passed through the filter chain.
*/
#include <inttypes.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include "libavutil/channel_layout.h"
#include "libavutil/md5.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavfilter/buffersrc.h"
#define INPUT_SAMPLERATE 48000
#define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_5POINT0
#define VOLUME_VAL 0.90
static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
AVFilterContext **sink)
{
AVFilterGraph *filter_graph;
AVFilterContext *abuffer_ctx;
AVFilter *abuffer;
AVFilterContext *volume_ctx;
AVFilter *volume;
AVFilterContext *aformat_ctx;
AVFilter *aformat;
AVFilterContext *abuffersink_ctx;
AVFilter *abuffersink;
AVDictionary *options_dict = NULL;
uint8_t options_str[1024];
uint8_t ch_layout[64];
int err;
/* Create a new filtergraph, which will contain all the filters. */
filter_graph = avfilter_graph_alloc();
if (!filter_graph) {
fprintf(stderr, "Unable to create filter graph.\n");
return AVERROR(ENOMEM);
}
/* Create the abuffer filter;
* it will be used for feeding the data into the graph. */
abuffer = avfilter_get_by_name("abuffer");
if (!abuffer) {
fprintf(stderr, "Could not find the abuffer filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, "src");
if (!abuffer_ctx) {
fprintf(stderr, "Could not allocate the abuffer instance.\n");
return AVERROR(ENOMEM);
}
/* Set the filter options through the AVOptions API. */
av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN);
av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
av_opt_set_int(abuffer_ctx, "sample_rate", INPUT_SAMPLERATE, AV_OPT_SEARCH_CHILDREN);
/* Now initialize the filter; we pass NULL options, since we have already
* set all the options above. */
err = avfilter_init_str(abuffer_ctx, NULL);
if (err < 0) {
fprintf(stderr, "Could not initialize the abuffer filter.\n");
return err;
}
/* Create volume filter. */
volume = avfilter_get_by_name("volume");
if (!volume) {
fprintf(stderr, "Could not find the volume filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
volume_ctx = avfilter_graph_alloc_filter(filter_graph, volume, "volume");
if (!volume_ctx) {
fprintf(stderr, "Could not allocate the volume instance.\n");
return AVERROR(ENOMEM);
}
/* A different way of passing the options is as key/value pairs in a
* dictionary. */
av_dict_set(&options_dict, "volume", AV_STRINGIFY(VOLUME_VAL), 0);
err = avfilter_init_dict(volume_ctx, &options_dict);
av_dict_free(&options_dict);
if (err < 0) {
fprintf(stderr, "Could not initialize the volume filter.\n");
return err;
}
/* Create the aformat filter;
* it ensures that the output is of the format we want. */
aformat = avfilter_get_by_name("aformat");
if (!aformat) {
fprintf(stderr, "Could not find the aformat filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
aformat_ctx = avfilter_graph_alloc_filter(filter_graph, aformat, "aformat");
if (!aformat_ctx) {
fprintf(stderr, "Could not allocate the aformat instance.\n");
return AVERROR(ENOMEM);
}
/* A third way of passing the options is in a string of the form
* key1=value1:key2=value2.... */
snprintf(options_str, sizeof(options_str),
"sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100,
(uint64_t)AV_CH_LAYOUT_STEREO);
err = avfilter_init_str(aformat_ctx, options_str);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
return err;
}
/* Finally create the abuffersink filter;
* it will be used to get the filtered data out of the graph. */
abuffersink = avfilter_get_by_name("abuffersink");
if (!abuffersink) {
fprintf(stderr, "Could not find the abuffersink filter.\n");
return AVERROR_FILTER_NOT_FOUND;
}
abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
if (!abuffersink_ctx) {
fprintf(stderr, "Could not allocate the abuffersink instance.\n");
return AVERROR(ENOMEM);
}
/* This filter takes no options. */
err = avfilter_init_str(abuffersink_ctx, NULL);
if (err < 0) {
fprintf(stderr, "Could not initialize the abuffersink instance.\n");
return err;
}
/* Connect the filters;
* in this simple case the filters just form a linear chain. */
err = avfilter_link(abuffer_ctx, 0, volume_ctx, 0);
if (err >= 0)
err = avfilter_link(volume_ctx, 0, aformat_ctx, 0);
if (err >= 0)
err = avfilter_link(aformat_ctx, 0, abuffersink_ctx, 0);
if (err < 0) {
fprintf(stderr, "Error connecting filters\n");
return err;
}
/* Configure the graph. */
err = avfilter_graph_config(filter_graph, NULL);
if (err < 0) {
av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");
return err;
}
*graph = filter_graph;
*src = abuffer_ctx;
*sink = abuffersink_ctx;
return 0;
}
/* Do something useful with the filtered data: this simple
* example just prints the MD5 checksum of each plane to stdout. */
static int process_output(struct AVMD5 *md5, AVFrame *frame)
{
int planar = av_sample_fmt_is_planar(frame->format);
int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
int planes = planar ? channels : 1;
int bps = av_get_bytes_per_sample(frame->format);
int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
int i, j;
for (i = 0; i < planes; i++) {
uint8_t checksum[16];
av_md5_init(md5);
av_md5_sum(checksum, frame->extended_data[i], plane_size);
fprintf(stdout, "plane %d: 0x", i);
for (j = 0; j < sizeof(checksum); j++)
fprintf(stdout, "%02X", checksum[j]);
fprintf(stdout, "\n");
}
fprintf(stdout, "\n");
return 0;
}
/* Construct a frame of audio data to be filtered;
* this simple example just synthesizes a sine wave. */
static int get_input(AVFrame *frame, int frame_num)
{
int err, i, j;
#define FRAME_SIZE 1024
/* Set up the frame properties and allocate the buffer for the data. */
frame->sample_rate = INPUT_SAMPLERATE;
frame->format = INPUT_FORMAT;
frame->channel_layout = INPUT_CHANNEL_LAYOUT;
frame->nb_samples = FRAME_SIZE;
frame->pts = frame_num * FRAME_SIZE;
err = av_frame_get_buffer(frame, 0);
if (err < 0)
return err;
/* Fill the data for each channel. */
for (i = 0; i < 5; i++) {
float *data = (float*)frame->extended_data[i];
for (j = 0; j < frame->nb_samples; j++)
data[j] = sin(2 * M_PI * (frame_num + j) * (i + 1) / FRAME_SIZE);
}
return 0;
}
int main(int argc, char *argv[])
{
struct AVMD5 *md5;
AVFilterGraph *graph;
AVFilterContext *src, *sink;
AVFrame *frame;
uint8_t errstr[1024];
float duration;
int err, nb_frames, i;
if (argc < 2) {
fprintf(stderr, "Usage: %s <duration>\n", argv[0]);
return 1;
}
duration = atof(argv[1]);
nb_frames = duration * INPUT_SAMPLERATE / FRAME_SIZE;
if (nb_frames <= 0) {
fprintf(stderr, "Invalid duration: %s\n", argv[1]);
return 1;
}
avfilter_register_all();
/* Allocate the frame we will be using to store the data. */
frame = av_frame_alloc();
if (!frame) {
fprintf(stderr, "Error allocating the frame\n");
return 1;
}
md5 = av_md5_alloc();
if (!md5) {
fprintf(stderr, "Error allocating the MD5 context\n");
return 1;
}
/* Set up the filtergraph. */
err = init_filter_graph(&graph, &src, &sink);
if (err < 0) {
fprintf(stderr, "Unable to init filter graph:");
goto fail;
}
/* the main filtering loop */
for (i = 0; i < nb_frames; i++) {
/* get an input frame to be filtered */
err = get_input(frame, i);
if (err < 0) {
fprintf(stderr, "Error generating input frame:");
goto fail;
}
/* Send the frame to the input of the filtergraph. */
err = av_buffersrc_add_frame(src, frame);
if (err < 0) {
av_frame_unref(frame);
fprintf(stderr, "Error submitting the frame to the filtergraph:");
goto fail;
}
/* Get all the filtered output that is available. */
while ((err = av_buffersink_get_frame(sink, frame)) >= 0) {
/* now do something with our filtered frame */
err = process_output(md5, frame);
if (err < 0) {
fprintf(stderr, "Error processing the filtered frame:");
goto fail;
}
av_frame_unref(frame);
}
if (err == AVERROR(EAGAIN)) {
/* Need to feed more frames in. */
continue;
} else if (err == AVERROR_EOF) {
/* Nothing more to do, finish. */
break;
} else if (err < 0) {
/* An error occurred. */
fprintf(stderr, "Error filtering the data:");
goto fail;
}
}
avfilter_graph_free(&graph);
av_frame_free(&frame);
av_freep(&md5);
return 0;
fail:
av_strerror(err, errstr, sizeof(errstr));
fprintf(stderr, "%s\n", errstr);
return 1;
}

View File

@@ -0,0 +1,296 @@
/*
* Copyright (c) 2010 Nicolas George
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2012 Clément Bœsch
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* API example for audio decoding and filtering
* @example filtering_audio.c
*/
#include <unistd.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
static AVFormatContext *fmt_ctx;
static AVCodecContext *dec_ctx;
AVFilterContext *buffersink_ctx;
AVFilterContext *buffersrc_ctx;
AVFilterGraph *filter_graph;
static int audio_stream_index = -1;
static int open_input_file(const char *filename)
{
int ret;
AVCodec *dec;
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
/* select the audio stream */
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find a audio stream in the input file\n");
return ret;
}
audio_stream_index = ret;
dec_ctx = fmt_ctx->streams[audio_stream_index]->codec;
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
/* init the audio decoder */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
return ret;
}
return 0;
}
static int init_filters(const char *filters_descr)
{
char args[512];
int ret = 0;
AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
static const int out_sample_rates[] = { 8000, -1 };
const AVFilterLink *outlink;
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
ret = AVERROR(ENOMEM);
goto end;
}
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
if (!dec_ctx->channel_layout)
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
snprintf(args, sizeof(args),
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
time_base.num, time_base.den, dec_ctx->sample_rate,
av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
goto end;
}
/* buffer audio sink: to terminate the filter chain. */
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
goto end;
}
ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
goto end;
}
ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
goto end;
}
ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
goto end;
}
/*
* Set the endpoints for the filter graph. The filter_graph will
* be linked to the graph described by filters_descr.
*/
/*
* The buffer source output must be connected to the input pad of
* the first filter described by filters_descr; since the first
* filter input label is not specified, it is set to "in" by
* default.
*/
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
/*
* The buffer sink input must be connected to the output pad of
* the last filter described by filters_descr; since the last
* filter output label is not specified, it is set to "out" by
* default.
*/
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
goto end;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
goto end;
/* Print summary of the sink buffer
* Note: args buffer is reused to store channel layout string */
outlink = buffersink_ctx->inputs[0];
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
(int)outlink->sample_rate,
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
args);
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
static void print_frame(const AVFrame *frame)
{
const int n = frame->nb_samples * av_get_channel_layout_nb_channels(av_frame_get_channel_layout(frame));
const uint16_t *p = (uint16_t*)frame->data[0];
const uint16_t *p_end = p + n;
while (p < p_end) {
fputc(*p & 0xff, stdout);
fputc(*p>>8 & 0xff, stdout);
p++;
}
fflush(stdout);
}
int main(int argc, char **argv)
{
int ret;
AVPacket packet0, packet;
AVFrame *frame = av_frame_alloc();
AVFrame *filt_frame = av_frame_alloc();
int got_frame;
if (!frame || !filt_frame) {
perror("Could not allocate frame");
exit(1);
}
if (argc != 2) {
fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
exit(1);
}
av_register_all();
avfilter_register_all();
if ((ret = open_input_file(argv[1])) < 0)
goto end;
if ((ret = init_filters(filter_descr)) < 0)
goto end;
/* read all packets */
packet0.data = NULL;
packet.data = NULL;
while (1) {
if (!packet0.data) {
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
break;
packet0 = packet;
}
if (packet.stream_index == audio_stream_index) {
got_frame = 0;
ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
continue;
}
packet.size -= ret;
packet.data += ret;
if (got_frame) {
/* push the audio data from decoded frame into the filtergraph */
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
break;
}
/* pull filtered audio from the filtergraph */
while (1) {
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
goto end;
print_frame(filt_frame);
av_frame_unref(filt_frame);
}
}
if (packet.size <= 0)
av_free_packet(&packet0);
} else {
/* discard non-wanted packets */
av_free_packet(&packet0);
}
}
end:
avfilter_graph_free(&filter_graph);
avcodec_close(dec_ctx);
avformat_close_input(&fmt_ctx);
av_frame_free(&frame);
av_frame_free(&filt_frame);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
exit(1);
}
exit(0);
}

View File

@@ -0,0 +1,281 @@
/*
* Copyright (c) 2010 Nicolas George
* Copyright (c) 2011 Stefano Sabatini
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* API example for decoding and filtering
* @example filtering_video.c
*/
#define _XOPEN_SOURCE 600 /* for usleep */
#include <unistd.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
const char *filter_descr = "scale=78:24,transpose=cclock";
/* other way:
scale=78:24 [scl]; [scl] transpose=cclock // assumes "[in]" and "[out]" to be input output pads respectively
*/
static AVFormatContext *fmt_ctx;
static AVCodecContext *dec_ctx;
AVFilterContext *buffersink_ctx;
AVFilterContext *buffersrc_ctx;
AVFilterGraph *filter_graph;
static int video_stream_index = -1;
static int64_t last_pts = AV_NOPTS_VALUE;
static int open_input_file(const char *filename)
{
int ret;
AVCodec *dec;
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
/* select the video stream */
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
return ret;
}
video_stream_index = ret;
dec_ctx = fmt_ctx->streams[video_stream_index]->codec;
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
/* init the video decoder */
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
return ret;
}
return 0;
}
static int init_filters(const char *filters_descr)
{
char args[512];
int ret = 0;
AVFilter *buffersrc = avfilter_get_by_name("buffer");
AVFilter *buffersink = avfilter_get_by_name("buffersink");
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
ret = AVERROR(ENOMEM);
goto end;
}
/* buffer video source: the decoded frames from the decoder will be inserted here. */
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
time_base.num, time_base.den,
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
goto end;
}
/* buffer video sink: to terminate the filter chain. */
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
goto end;
}
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
goto end;
}
/*
* Set the endpoints for the filter graph. The filter_graph will
* be linked to the graph described by filters_descr.
*/
/*
* The buffer source output must be connected to the input pad of
* the first filter described by filters_descr; since the first
* filter input label is not specified, it is set to "in" by
* default.
*/
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
/*
* The buffer sink input must be connected to the output pad of
* the last filter described by filters_descr; since the last
* filter output label is not specified, it is set to "out" by
* default.
*/
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
&inputs, &outputs, NULL)) < 0)
goto end;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
goto end;
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
static void display_frame(const AVFrame *frame, AVRational time_base)
{
int x, y;
uint8_t *p0, *p;
int64_t delay;
if (frame->pts != AV_NOPTS_VALUE) {
if (last_pts != AV_NOPTS_VALUE) {
/* sleep roughly the right amount of time;
* usleep is in microseconds, just like AV_TIME_BASE. */
delay = av_rescale_q(frame->pts - last_pts,
time_base, AV_TIME_BASE_Q);
if (delay > 0 && delay < 1000000)
usleep(delay);
}
last_pts = frame->pts;
}
/* Trivial ASCII grayscale display. */
p0 = frame->data[0];
puts("\033c");
for (y = 0; y < frame->height; y++) {
p = p0;
for (x = 0; x < frame->width; x++)
putchar(" .-+#"[*(p++) / 52]);
putchar('\n');
p0 += frame->linesize[0];
}
fflush(stdout);
}
int main(int argc, char **argv)
{
int ret;
AVPacket packet;
AVFrame *frame = av_frame_alloc();
AVFrame *filt_frame = av_frame_alloc();
int got_frame;
if (!frame || !filt_frame) {
perror("Could not allocate frame");
exit(1);
}
if (argc != 2) {
fprintf(stderr, "Usage: %s file\n", argv[0]);
exit(1);
}
av_register_all();
avfilter_register_all();
if ((ret = open_input_file(argv[1])) < 0)
goto end;
if ((ret = init_filters(filter_descr)) < 0)
goto end;
/* read all packets */
while (1) {
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
break;
if (packet.stream_index == video_stream_index) {
got_frame = 0;
ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
break;
}
if (got_frame) {
frame->pts = av_frame_get_best_effort_timestamp(frame);
/* push the decoded frame into the filtergraph */
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
break;
}
/* pull filtered frames from the filtergraph */
while (1) {
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
break;
if (ret < 0)
goto end;
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
av_frame_unref(filt_frame);
}
av_frame_unref(frame);
}
}
av_free_packet(&packet);
}
end:
avfilter_graph_free(&filter_graph);
avcodec_close(dec_ctx);
avformat_close_input(&fmt_ctx);
av_frame_free(&frame);
av_frame_free(&filt_frame);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
exit(1);
}
exit(0);
}

View File

@@ -0,0 +1,155 @@
/*
* Copyright (c) 2015 Stephan Holljes
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* libavformat multi-client network API usage example.
*
* @example http_multiclient.c
* This example will serve a file without decoding or demuxing it over http.
* Multiple clients can connect and will receive the same file.
*/
#include <libavformat/avformat.h>
#include <libavutil/opt.h>
#include <unistd.h>
void process_client(AVIOContext *client, const char *in_uri)
{
AVIOContext *input = NULL;
uint8_t buf[1024];
int ret, n, reply_code;
char *resource = NULL;
while ((ret = avio_handshake(client)) > 0) {
av_opt_get(client, "resource", AV_OPT_SEARCH_CHILDREN, &resource);
// check for strlen(resource) is necessary, because av_opt_get()
// may return empty string.
if (resource && strlen(resource))
break;
}
if (ret < 0)
goto end;
av_log(client, AV_LOG_TRACE, "resource=%p\n", resource);
if (resource && resource[0] == '/' && !strcmp((resource + 1), in_uri)) {
reply_code = 200;
} else {
reply_code = AVERROR_HTTP_NOT_FOUND;
}
if ((ret = av_opt_set_int(client, "reply_code", reply_code, AV_OPT_SEARCH_CHILDREN)) < 0) {
av_log(client, AV_LOG_ERROR, "Failed to set reply_code: %s.\n", av_err2str(ret));
goto end;
}
av_log(client, AV_LOG_TRACE, "Set reply code to %d\n", reply_code);
while ((ret = avio_handshake(client)) > 0);
if (ret < 0)
goto end;
fprintf(stderr, "Handshake performed.\n");
if (reply_code != 200)
goto end;
fprintf(stderr, "Opening input file.\n");
if ((ret = avio_open2(&input, in_uri, AVIO_FLAG_READ, NULL, NULL)) < 0) {
av_log(input, AV_LOG_ERROR, "Failed to open input: %s: %s.\n", in_uri,
av_err2str(ret));
goto end;
}
for(;;) {
n = avio_read(input, buf, sizeof(buf));
if (n < 0) {
if (n == AVERROR_EOF)
break;
av_log(input, AV_LOG_ERROR, "Error reading from input: %s.\n",
av_err2str(n));
break;
}
avio_write(client, buf, n);
avio_flush(client);
}
end:
fprintf(stderr, "Flushing client\n");
avio_flush(client);
fprintf(stderr, "Closing client\n");
avio_close(client);
fprintf(stderr, "Closing input\n");
avio_close(input);
}
int main(int argc, char **argv)
{
av_log_set_level(AV_LOG_TRACE);
AVDictionary *options = NULL;
AVIOContext *client = NULL, *server = NULL;
const char *in_uri, *out_uri;
int ret, pid;
if (argc < 3) {
printf("usage: %s input http://hostname[:port]\n"
"API example program to serve http to multiple clients.\n"
"\n", argv[0]);
return 1;
}
in_uri = argv[1];
out_uri = argv[2];
av_register_all();
avformat_network_init();
if ((ret = av_dict_set(&options, "listen", "2", 0)) < 0) {
fprintf(stderr, "Failed to set listen mode for server: %s\n", av_err2str(ret));
return ret;
}
if ((ret = avio_open2(&server, out_uri, AVIO_FLAG_WRITE, NULL, &options)) < 0) {
fprintf(stderr, "Failed to open server: %s\n", av_err2str(ret));
return ret;
}
fprintf(stderr, "Entering main loop.\n");
for(;;) {
if ((ret = avio_accept(server, &client)) < 0)
goto end;
fprintf(stderr, "Accepted client, forking process.\n");
// XXX: Since we don't reap our children and don't ignore signals
// this produces zombie processes.
pid = fork();
if (pid < 0) {
perror("Fork failed");
ret = AVERROR(errno);
goto end;
}
if (pid == 0) {
fprintf(stderr, "In child.\n");
process_client(client, in_uri);
avio_close(server);
exit(0);
}
if (pid > 0)
avio_close(client);
}
end:
avio_close(server);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Some errors occurred: %s\n", av_err2str(ret));
return 1;
}
return 0;
}

View File

@@ -0,0 +1,56 @@
/*
* Copyright (c) 2011 Reinhard Tartler
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* Shows how the metadata API can be used in application programs.
* @example metadata.c
*/
#include <stdio.h>
#include <libavformat/avformat.h>
#include <libavutil/dict.h>
int main (int argc, char **argv)
{
AVFormatContext *fmt_ctx = NULL;
AVDictionaryEntry *tag = NULL;
int ret;
if (argc != 2) {
printf("usage: %s <input_file>\n"
"example program to demonstrate the use of the libavformat metadata API.\n"
"\n", argv[0]);
return 1;
}
av_register_all();
if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL)))
return ret;
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
printf("%s=%s\n", tag->key, tag->value);
avformat_close_input(&fmt_ctx);
return 0;
}

View File

@@ -0,0 +1,670 @@
/*
* Copyright (c) 2003 Fabrice Bellard
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* libavformat API example.
*
* Output a media file in any supported libavformat format. The default
* codecs are used.
* @example muxing.c
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <libavutil/avassert.h>
#include <libavutil/channel_layout.h>
#include <libavutil/opt.h>
#include <libavutil/mathematics.h>
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libswresample/swresample.h>
#define STREAM_DURATION 10.0
#define STREAM_FRAME_RATE 25 /* 25 images/s */
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
#define SCALE_FLAGS SWS_BICUBIC
// a wrapper around a single output AVStream
typedef struct OutputStream {
AVStream *st;
/* pts of the next frame that will be generated */
int64_t next_pts;
int samples_count;
AVFrame *frame;
AVFrame *tmp_frame;
float t, tincr, tincr2;
struct SwsContext *sws_ctx;
struct SwrContext *swr_ctx;
} OutputStream;
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
pkt->stream_index);
}
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
/* rescale output packet timestamp values from codec to stream timebase */
av_packet_rescale_ts(pkt, *time_base, st->time_base);
pkt->stream_index = st->index;
/* Write the compressed frame to the media file. */
log_packet(fmt_ctx, pkt);
return av_interleaved_write_frame(fmt_ctx, pkt);
}
/* Add an output stream. */
static void add_stream(OutputStream *ost, AVFormatContext *oc,
AVCodec **codec,
enum AVCodecID codec_id)
{
AVCodecContext *c;
int i;
/* find the encoder */
*codec = avcodec_find_encoder(codec_id);
if (!(*codec)) {
fprintf(stderr, "Could not find encoder for '%s'\n",
avcodec_get_name(codec_id));
exit(1);
}
ost->st = avformat_new_stream(oc, *codec);
if (!ost->st) {
fprintf(stderr, "Could not allocate stream\n");
exit(1);
}
ost->st->id = oc->nb_streams-1;
c = ost->st->codec;
switch ((*codec)->type) {
case AVMEDIA_TYPE_AUDIO:
c->sample_fmt = (*codec)->sample_fmts ?
(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
c->bit_rate = 64000;
c->sample_rate = 44100;
if ((*codec)->supported_samplerates) {
c->sample_rate = (*codec)->supported_samplerates[0];
for (i = 0; (*codec)->supported_samplerates[i]; i++) {
if ((*codec)->supported_samplerates[i] == 44100)
c->sample_rate = 44100;
}
}
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
c->channel_layout = AV_CH_LAYOUT_STEREO;
if ((*codec)->channel_layouts) {
c->channel_layout = (*codec)->channel_layouts[0];
for (i = 0; (*codec)->channel_layouts[i]; i++) {
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
c->channel_layout = AV_CH_LAYOUT_STEREO;
}
}
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
ost->st->time_base = (AVRational){ 1, c->sample_rate };
break;
case AVMEDIA_TYPE_VIDEO:
c->codec_id = codec_id;
c->bit_rate = 400000;
/* Resolution must be a multiple of two. */
c->width = 352;
c->height = 288;
/* timebase: This is the fundamental unit of time (in seconds) in terms
* of which frame timestamps are represented. For fixed-fps content,
* timebase should be 1/framerate and timestamp increments should be
* identical to 1. */
ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
c->time_base = ost->st->time_base;
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
c->pix_fmt = STREAM_PIX_FMT;
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
/* just for testing, we also add B frames */
c->max_b_frames = 2;
}
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
/* Needed to avoid using macroblocks in which some coeffs overflow.
* This does not happen with normal video, it just happens here as
* the motion of the chroma plane does not match the luma plane. */
c->mb_decision = 2;
}
break;
default:
break;
}
/* Some formats want stream headers to be separate. */
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
/**************************************************************/
/* audio output */
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
uint64_t channel_layout,
int sample_rate, int nb_samples)
{
AVFrame *frame = av_frame_alloc();
int ret;
if (!frame) {
fprintf(stderr, "Error allocating an audio frame\n");
exit(1);
}
frame->format = sample_fmt;
frame->channel_layout = channel_layout;
frame->sample_rate = sample_rate;
frame->nb_samples = nb_samples;
if (nb_samples) {
ret = av_frame_get_buffer(frame, 0);
if (ret < 0) {
fprintf(stderr, "Error allocating an audio buffer\n");
exit(1);
}
}
return frame;
}
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
AVCodecContext *c;
int nb_samples;
int ret;
AVDictionary *opt = NULL;
c = ost->st->codec;
/* open it */
av_dict_copy(&opt, opt_arg, 0);
ret = avcodec_open2(c, codec, &opt);
av_dict_free(&opt);
if (ret < 0) {
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
exit(1);
}
/* init signal generator */
ost->t = 0;
ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
/* increment frequency by 110 Hz per second */
ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
nb_samples = 10000;
else
nb_samples = c->frame_size;
ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
c->sample_rate, nb_samples);
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
c->sample_rate, nb_samples);
/* create resampler context */
ost->swr_ctx = swr_alloc();
if (!ost->swr_ctx) {
fprintf(stderr, "Could not allocate resampler context\n");
exit(1);
}
/* set options */
av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
/* initialize the resampling context */
if ((ret = swr_init(ost->swr_ctx)) < 0) {
fprintf(stderr, "Failed to initialize the resampling context\n");
exit(1);
}
}
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
* 'nb_channels' channels. */
static AVFrame *get_audio_frame(OutputStream *ost)
{
AVFrame *frame = ost->tmp_frame;
int j, i, v;
int16_t *q = (int16_t*)frame->data[0];
/* check if we want to generate more frames */
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
return NULL;
for (j = 0; j <frame->nb_samples; j++) {
v = (int)(sin(ost->t) * 10000);
for (i = 0; i < ost->st->codec->channels; i++)
*q++ = v;
ost->t += ost->tincr;
ost->tincr += ost->tincr2;
}
frame->pts = ost->next_pts;
ost->next_pts += frame->nb_samples;
return frame;
}
/*
* encode one audio frame and send it to the muxer
* return 1 when encoding is finished, 0 otherwise
*/
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
{
AVCodecContext *c;
AVPacket pkt = { 0 }; // data and size must be 0;
AVFrame *frame;
int ret;
int got_packet;
int dst_nb_samples;
av_init_packet(&pkt);
c = ost->st->codec;
frame = get_audio_frame(ost);
if (frame) {
/* convert samples from native format to destination codec format, using the resampler */
/* compute destination number of samples */
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
c->sample_rate, c->sample_rate, AV_ROUND_UP);
av_assert0(dst_nb_samples == frame->nb_samples);
/* when we pass a frame to the encoder, it may keep a reference to it
* internally;
* make sure we do not overwrite it here
*/
ret = av_frame_make_writable(ost->frame);
if (ret < 0)
exit(1);
/* convert to destination format */
ret = swr_convert(ost->swr_ctx,
ost->frame->data, dst_nb_samples,
(const uint8_t **)frame->data, frame->nb_samples);
if (ret < 0) {
fprintf(stderr, "Error while converting\n");
exit(1);
}
frame = ost->frame;
frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
ost->samples_count += dst_nb_samples;
}
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
exit(1);
}
if (got_packet) {
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
if (ret < 0) {
fprintf(stderr, "Error while writing audio frame: %s\n",
av_err2str(ret));
exit(1);
}
}
return (frame || got_packet) ? 0 : 1;
}
/**************************************************************/
/* video output */
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
{
AVFrame *picture;
int ret;
picture = av_frame_alloc();
if (!picture)
return NULL;
picture->format = pix_fmt;
picture->width = width;
picture->height = height;
/* allocate the buffers for the frame data */
ret = av_frame_get_buffer(picture, 32);
if (ret < 0) {
fprintf(stderr, "Could not allocate frame data.\n");
exit(1);
}
return picture;
}
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
int ret;
AVCodecContext *c = ost->st->codec;
AVDictionary *opt = NULL;
av_dict_copy(&opt, opt_arg, 0);
/* open the codec */
ret = avcodec_open2(c, codec, &opt);
av_dict_free(&opt);
if (ret < 0) {
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
exit(1);
}
/* allocate and init a re-usable frame */
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
if (!ost->frame) {
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
/* If the output format is not YUV420P, then a temporary YUV420P
* picture is needed too. It is then converted to the required
* output format. */
ost->tmp_frame = NULL;
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
if (!ost->tmp_frame) {
fprintf(stderr, "Could not allocate temporary picture\n");
exit(1);
}
}
}
/* Prepare a dummy image. */
static void fill_yuv_image(AVFrame *pict, int frame_index,
int width, int height)
{
int x, y, i, ret;
/* when we pass a frame to the encoder, it may keep a reference to it
* internally;
* make sure we do not overwrite it here
*/
ret = av_frame_make_writable(pict);
if (ret < 0)
exit(1);
i = frame_index;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
}
}
}
static AVFrame *get_video_frame(OutputStream *ost)
{
AVCodecContext *c = ost->st->codec;
/* check if we want to generate more frames */
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
return NULL;
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
/* as we only generate a YUV420P picture, we must convert it
* to the codec pixel format if needed */
if (!ost->sws_ctx) {
ost->sws_ctx = sws_getContext(c->width, c->height,
AV_PIX_FMT_YUV420P,
c->width, c->height,
c->pix_fmt,
SCALE_FLAGS, NULL, NULL, NULL);
if (!ost->sws_ctx) {
fprintf(stderr,
"Could not initialize the conversion context\n");
exit(1);
}
}
fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
sws_scale(ost->sws_ctx,
(const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
0, c->height, ost->frame->data, ost->frame->linesize);
} else {
fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
}
ost->frame->pts = ost->next_pts++;
return ost->frame;
}
/*
* encode one video frame and send it to the muxer
* return 1 when encoding is finished, 0 otherwise
*/
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
{
int ret;
AVCodecContext *c;
AVFrame *frame;
int got_packet = 0;
c = ost->st->codec;
frame = get_video_frame(ost);
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
/* a hack to avoid data copy with some raw video muxers */
AVPacket pkt;
av_init_packet(&pkt);
if (!frame)
return 1;
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = ost->st->index;
pkt.data = (uint8_t *)frame;
pkt.size = sizeof(AVPicture);
pkt.pts = pkt.dts = frame->pts;
av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
ret = av_interleaved_write_frame(oc, &pkt);
} else {
AVPacket pkt = { 0 };
av_init_packet(&pkt);
/* encode the image */
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
if (ret < 0) {
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
exit(1);
}
if (got_packet) {
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
} else {
ret = 0;
}
}
if (ret < 0) {
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
exit(1);
}
return (frame || got_packet) ? 0 : 1;
}
static void close_stream(AVFormatContext *oc, OutputStream *ost)
{
avcodec_close(ost->st->codec);
av_frame_free(&ost->frame);
av_frame_free(&ost->tmp_frame);
sws_freeContext(ost->sws_ctx);
swr_free(&ost->swr_ctx);
}
/**************************************************************/
/* media file output */
int main(int argc, char **argv)
{
OutputStream video_st = { 0 }, audio_st = { 0 };
const char *filename;
AVOutputFormat *fmt;
AVFormatContext *oc;
AVCodec *audio_codec, *video_codec;
int ret;
int have_video = 0, have_audio = 0;
int encode_video = 0, encode_audio = 0;
AVDictionary *opt = NULL;
/* Initialize libavcodec, and register all codecs and formats. */
av_register_all();
if (argc < 2) {
printf("usage: %s output_file\n"
"API example program to output a media file with libavformat.\n"
"This program generates a synthetic audio and video stream, encodes and\n"
"muxes them into a file named output_file.\n"
"The output format is automatically guessed according to the file extension.\n"
"Raw images can also be output by using '%%d' in the filename.\n"
"\n", argv[0]);
return 1;
}
filename = argv[1];
if (argc > 3 && !strcmp(argv[2], "-flags")) {
av_dict_set(&opt, argv[2]+1, argv[3], 0);
}
/* allocate the output media context */
avformat_alloc_output_context2(&oc, NULL, NULL, filename);
if (!oc) {
printf("Could not deduce output format from file extension: using MPEG.\n");
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
}
if (!oc)
return 1;
fmt = oc->oformat;
/* Add the audio and video streams using the default format codecs
* and initialize the codecs. */
if (fmt->video_codec != AV_CODEC_ID_NONE) {
add_stream(&video_st, oc, &video_codec, fmt->video_codec);
have_video = 1;
encode_video = 1;
}
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
have_audio = 1;
encode_audio = 1;
}
/* Now that all the parameters are set, we can open the audio and
* video codecs and allocate the necessary encode buffers. */
if (have_video)
open_video(oc, video_codec, &video_st, opt);
if (have_audio)
open_audio(oc, audio_codec, &audio_st, opt);
av_dump_format(oc, 0, filename, 1);
/* open the output file, if needed */
if (!(fmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open '%s': %s\n", filename,
av_err2str(ret));
return 1;
}
}
/* Write the stream header, if any. */
ret = avformat_write_header(oc, &opt);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file: %s\n",
av_err2str(ret));
return 1;
}
while (encode_video || encode_audio) {
/* select the stream to encode */
if (encode_video &&
(!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
encode_video = !write_video_frame(oc, &video_st);
} else {
encode_audio = !write_audio_frame(oc, &audio_st);
}
}
/* Write the trailer, if any. The trailer must be written before you
* close the CodecContexts open when you wrote the header; otherwise
* av_write_trailer() may try to use memory that was freed on
* av_codec_close(). */
av_write_trailer(oc);
/* Close each codec. */
if (have_video)
close_stream(oc, &video_st);
if (have_audio)
close_stream(oc, &audio_st);
if (!(fmt->flags & AVFMT_NOFILE))
/* Close the output file. */
avio_closep(&oc->pb);
/* free the stream */
avformat_free_context(oc);
return 0;
}

View File

@@ -0,0 +1,12 @@
prefix=
exec_prefix=
libdir=${pcfiledir}/../../../libavcodec
includedir=${pcfiledir}/../../..
Name: libavcodec
Description: FFmpeg codec library
Version: 56.60.100
Requires: libswresample >= 1.2.101, libavutil >= 54.31.100
Conflicts:
Libs: -L${libdir} -Wl,-rpath,${libdir} -lavcodec
Cflags: -I${includedir}

View File

@@ -0,0 +1,12 @@
prefix=
exec_prefix=
libdir=${pcfiledir}/../../../libavdevice
includedir=${pcfiledir}/../../..
Name: libavdevice
Description: FFmpeg device handling library
Version: 56.4.100
Requires: libavformat >= 56.40.101, libavcodec >= 56.60.100, libswresample >= 1.2.101, libavutil >= 54.31.100
Conflicts:
Libs: -L${libdir} -Wl,-rpath,${libdir} -lavdevice
Cflags: -I${includedir}

View File

@@ -0,0 +1,12 @@
prefix=
exec_prefix=
libdir=${pcfiledir}/../../../libavformat
includedir=${pcfiledir}/../../..
Name: libavformat
Description: FFmpeg container format library
Version: 56.40.101
Requires: libavcodec >= 56.60.100, libswresample >= 1.2.101, libavutil >= 54.31.100
Conflicts:
Libs: -L${libdir} -Wl,-rpath,${libdir} -lavformat
Cflags: -I${includedir}

View File

@@ -0,0 +1,12 @@
prefix=
exec_prefix=
libdir=${pcfiledir}/../../../libavutil
includedir=${pcfiledir}/../../..
Name: libavutil
Description: FFmpeg utility library
Version: 54.31.100
Requires:
Conflicts:
Libs: -L${libdir} -Wl,-rpath,${libdir} -lavutil
Cflags: -I${includedir}

View File

@@ -0,0 +1,12 @@
prefix=
exec_prefix=
libdir=${pcfiledir}/../../../libswresample
includedir=${pcfiledir}/../../..
Name: libswresample
Description: FFmpeg audio resampling library
Version: 1.2.101
Requires: libavutil >= 54.31.100
Conflicts:
Libs: -L${libdir} -Wl,-rpath,${libdir} -lswresample
Cflags: -I${includedir}

View File

@@ -0,0 +1,12 @@
prefix=
exec_prefix=
libdir=${pcfiledir}/../../../libswscale
includedir=${pcfiledir}/../../..
Name: libswscale
Description: FFmpeg image rescaling library
Version: 3.1.101
Requires: libavutil >= 54.31.100
Conflicts:
Libs: -L${libdir} -Wl,-rpath,${libdir} -lswscale
Cflags: -I${includedir}

View File

@@ -0,0 +1,484 @@
/*
* Copyright (c) 2015 Anton Khirnov
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* Intel QSV-accelerated H.264 decoding example.
*
* @example qsvdec.c
* This example shows how to do QSV-accelerated H.264 decoding with output
* frames in the VA-API video surfaces.
*/
#include "config.h"
#include <stdio.h>
#include <mfx/mfxvideo.h>
#include <va/va.h>
#include <va/va_x11.h>
#include <X11/Xlib.h>
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
#include "libavcodec/avcodec.h"
#include "libavcodec/qsv.h"
#include "libavutil/error.h"
#include "libavutil/mem.h"
typedef struct DecodeContext {
mfxSession mfx_session;
VADisplay va_dpy;
VASurfaceID *surfaces;
mfxMemId *surface_ids;
int *surface_used;
int nb_surfaces;
mfxFrameInfo frame_info;
} DecodeContext;
static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
mfxFrameAllocResponse *resp)
{
DecodeContext *decode = pthis;
int err, i;
if (decode->surfaces) {
fprintf(stderr, "Multiple allocation requests.\n");
return MFX_ERR_MEMORY_ALLOC;
}
if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)) {
fprintf(stderr, "Unsupported surface type: %d\n", req->Type);
return MFX_ERR_UNSUPPORTED;
}
if (req->Info.BitDepthLuma != 8 || req->Info.BitDepthChroma != 8 ||
req->Info.Shift || req->Info.FourCC != MFX_FOURCC_NV12 ||
req->Info.ChromaFormat != MFX_CHROMAFORMAT_YUV420) {
fprintf(stderr, "Unsupported surface properties.\n");
return MFX_ERR_UNSUPPORTED;
}
decode->surfaces = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surfaces));
decode->surface_ids = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surface_ids));
decode->surface_used = av_mallocz_array(req->NumFrameSuggested, sizeof(*decode->surface_used));
if (!decode->surfaces || !decode->surface_ids || !decode->surface_used)
goto fail;
err = vaCreateSurfaces(decode->va_dpy, VA_RT_FORMAT_YUV420,
req->Info.Width, req->Info.Height,
decode->surfaces, req->NumFrameSuggested,
NULL, 0);
if (err != VA_STATUS_SUCCESS) {
fprintf(stderr, "Error allocating VA surfaces\n");
goto fail;
}
decode->nb_surfaces = req->NumFrameSuggested;
for (i = 0; i < decode->nb_surfaces; i++)
decode->surface_ids[i] = &decode->surfaces[i];
resp->mids = decode->surface_ids;
resp->NumFrameActual = decode->nb_surfaces;
decode->frame_info = req->Info;
return MFX_ERR_NONE;
fail:
av_freep(&decode->surfaces);
av_freep(&decode->surface_ids);
av_freep(&decode->surface_used);
return MFX_ERR_MEMORY_ALLOC;
}
static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
{
DecodeContext *decode = pthis;
if (decode->surfaces)
vaDestroySurfaces(decode->va_dpy, decode->surfaces, decode->nb_surfaces);
av_freep(&decode->surfaces);
av_freep(&decode->surface_ids);
av_freep(&decode->surface_used);
decode->nb_surfaces = 0;
return MFX_ERR_NONE;
}
static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
{
return MFX_ERR_UNSUPPORTED;
}
static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
{
return MFX_ERR_UNSUPPORTED;
}
static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
{
*hdl = mid;
return MFX_ERR_NONE;
}
static void free_buffer(void *opaque, uint8_t *data)
{
int *used = opaque;
*used = 0;
av_freep(&data);
}
static int get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
{
DecodeContext *decode = avctx->opaque;
mfxFrameSurface1 *surf;
AVBufferRef *surf_buf;
int idx;
for (idx = 0; idx < decode->nb_surfaces; idx++) {
if (!decode->surface_used[idx])
break;
}
if (idx == decode->nb_surfaces) {
fprintf(stderr, "No free surfaces\n");
return AVERROR(ENOMEM);
}
surf = av_mallocz(sizeof(*surf));
if (!surf)
return AVERROR(ENOMEM);
surf_buf = av_buffer_create((uint8_t*)surf, sizeof(*surf), free_buffer,
&decode->surface_used[idx], AV_BUFFER_FLAG_READONLY);
if (!surf_buf) {
av_freep(&surf);
return AVERROR(ENOMEM);
}
surf->Info = decode->frame_info;
surf->Data.MemId = &decode->surfaces[idx];
frame->buf[0] = surf_buf;
frame->data[3] = (uint8_t*)surf;
decode->surface_used[idx] = 1;
return 0;
}
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
{
while (*pix_fmts != AV_PIX_FMT_NONE) {
if (*pix_fmts == AV_PIX_FMT_QSV) {
if (!avctx->hwaccel_context) {
DecodeContext *decode = avctx->opaque;
AVQSVContext *qsv = av_qsv_alloc_context();
if (!qsv)
return AV_PIX_FMT_NONE;
qsv->session = decode->mfx_session;
qsv->iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
avctx->hwaccel_context = qsv;
}
return AV_PIX_FMT_QSV;
}
pix_fmts++;
}
fprintf(stderr, "The QSV pixel format not offered in get_format()\n");
return AV_PIX_FMT_NONE;
}
static int decode_packet(DecodeContext *decode, AVCodecContext *decoder_ctx,
AVFrame *frame, AVPacket *pkt,
AVIOContext *output_ctx)
{
int ret = 0;
int got_frame = 1;
while (pkt->size > 0 || (!pkt->data && got_frame)) {
ret = avcodec_decode_video2(decoder_ctx, frame, &got_frame, pkt);
if (ret < 0) {
fprintf(stderr, "Error during decoding\n");
return ret;
}
pkt->data += ret;
pkt->size -= ret;
/* A real program would do something useful with the decoded frame here.
* We just retrieve the raw data and write it to a file, which is rather
* useless but pedagogic. */
if (got_frame) {
mfxFrameSurface1 *surf = (mfxFrameSurface1*)frame->data[3];
VASurfaceID surface = *(VASurfaceID*)surf->Data.MemId;
VAImageFormat img_fmt = {
.fourcc = VA_FOURCC_NV12,
.byte_order = VA_LSB_FIRST,
.bits_per_pixel = 8,
.depth = 8,
};
VAImage img;
VAStatus err;
uint8_t *data;
int i, j;
img.buf = VA_INVALID_ID;
img.image_id = VA_INVALID_ID;
err = vaCreateImage(decode->va_dpy, &img_fmt,
frame->width, frame->height, &img);
if (err != VA_STATUS_SUCCESS) {
fprintf(stderr, "Error creating an image: %s\n",
vaErrorStr(err));
ret = AVERROR_UNKNOWN;
goto fail;
}
err = vaGetImage(decode->va_dpy, surface, 0, 0,
frame->width, frame->height,
img.image_id);
if (err != VA_STATUS_SUCCESS) {
fprintf(stderr, "Error getting an image: %s\n",
vaErrorStr(err));
ret = AVERROR_UNKNOWN;
goto fail;
}
err = vaMapBuffer(decode->va_dpy, img.buf, (void**)&data);
if (err != VA_STATUS_SUCCESS) {
fprintf(stderr, "Error mapping the image buffer: %s\n",
vaErrorStr(err));
ret = AVERROR_UNKNOWN;
goto fail;
}
for (i = 0; i < img.num_planes; i++)
for (j = 0; j < (img.height >> (i > 0)); j++)
avio_write(output_ctx, data + img.offsets[i] + j * img.pitches[i], img.width);
fail:
if (img.buf != VA_INVALID_ID)
vaUnmapBuffer(decode->va_dpy, img.buf);
if (img.image_id != VA_INVALID_ID)
vaDestroyImage(decode->va_dpy, img.image_id);
av_frame_unref(frame);
if (ret < 0)
return ret;
}
}
return 0;
}
int main(int argc, char **argv)
{
AVFormatContext *input_ctx = NULL;
AVStream *video_st = NULL;
AVCodecContext *decoder_ctx = NULL;
const AVCodec *decoder;
AVPacket pkt = { 0 };
AVFrame *frame = NULL;
DecodeContext decode = { NULL };
Display *dpy = NULL;
int va_ver_major, va_ver_minor;
mfxIMPL mfx_impl = MFX_IMPL_AUTO_ANY;
mfxVersion mfx_ver = { { 1, 1 } };
mfxFrameAllocator frame_allocator = {
.pthis = &decode,
.Alloc = frame_alloc,
.Lock = frame_lock,
.Unlock = frame_unlock,
.GetHDL = frame_get_hdl,
.Free = frame_free,
};
AVIOContext *output_ctx = NULL;
int ret, i, err;
av_register_all();
if (argc < 3) {
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
return 1;
}
/* open the input file */
ret = avformat_open_input(&input_ctx, argv[1], NULL, NULL);
if (ret < 0) {
fprintf(stderr, "Cannot open input file '%s': ", argv[1]);
goto finish;
}
/* find the first H.264 video stream */
for (i = 0; i < input_ctx->nb_streams; i++) {
AVStream *st = input_ctx->streams[i];
if (st->codec->codec_id == AV_CODEC_ID_H264 && !video_st)
video_st = st;
else
st->discard = AVDISCARD_ALL;
}
if (!video_st) {
fprintf(stderr, "No H.264 video stream in the input file\n");
goto finish;
}
/* initialize VA-API */
dpy = XOpenDisplay(NULL);
if (!dpy) {
fprintf(stderr, "Cannot open the X display\n");
goto finish;
}
decode.va_dpy = vaGetDisplay(dpy);
if (!decode.va_dpy) {
fprintf(stderr, "Cannot open the VA display\n");
goto finish;
}
err = vaInitialize(decode.va_dpy, &va_ver_major, &va_ver_minor);
if (err != VA_STATUS_SUCCESS) {
fprintf(stderr, "Cannot initialize VA: %s\n", vaErrorStr(err));
goto finish;
}
fprintf(stderr, "Initialized VA v%d.%d\n", va_ver_major, va_ver_minor);
/* initialize an MFX session */
err = MFXInit(mfx_impl, &mfx_ver, &decode.mfx_session);
if (err != MFX_ERR_NONE) {
fprintf(stderr, "Error initializing an MFX session\n");
goto finish;
}
MFXVideoCORE_SetHandle(decode.mfx_session, MFX_HANDLE_VA_DISPLAY, decode.va_dpy);
MFXVideoCORE_SetFrameAllocator(decode.mfx_session, &frame_allocator);
/* initialize the decoder */
decoder = avcodec_find_decoder_by_name("h264_qsv");
if (!decoder) {
fprintf(stderr, "The QSV decoder is not present in libavcodec\n");
goto finish;
}
decoder_ctx = avcodec_alloc_context3(decoder);
if (!decoder_ctx) {
ret = AVERROR(ENOMEM);
goto finish;
}
decoder_ctx->codec_id = AV_CODEC_ID_H264;
if (video_st->codec->extradata_size) {
decoder_ctx->extradata = av_mallocz(video_st->codec->extradata_size +
AV_INPUT_BUFFER_PADDING_SIZE);
if (!decoder_ctx->extradata) {
ret = AVERROR(ENOMEM);
goto finish;
}
memcpy(decoder_ctx->extradata, video_st->codec->extradata,
video_st->codec->extradata_size);
decoder_ctx->extradata_size = video_st->codec->extradata_size;
}
decoder_ctx->refcounted_frames = 1;
decoder_ctx->opaque = &decode;
decoder_ctx->get_buffer2 = get_buffer;
decoder_ctx->get_format = get_format;
ret = avcodec_open2(decoder_ctx, NULL, NULL);
if (ret < 0) {
fprintf(stderr, "Error opening the decoder: ");
goto finish;
}
/* open the output stream */
ret = avio_open(&output_ctx, argv[2], AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Error opening the output context: ");
goto finish;
}
frame = av_frame_alloc();
if (!frame) {
ret = AVERROR(ENOMEM);
goto finish;
}
/* actual decoding */
while (ret >= 0) {
ret = av_read_frame(input_ctx, &pkt);
if (ret < 0)
break;
if (pkt.stream_index == video_st->index)
ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
av_packet_unref(&pkt);
}
/* flush the decoder */
pkt.data = NULL;
pkt.size = 0;
ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
finish:
if (ret < 0) {
char buf[1024];
av_strerror(ret, buf, sizeof(buf));
fprintf(stderr, "%s\n", buf);
}
avformat_close_input(&input_ctx);
av_frame_free(&frame);
if (decode.mfx_session)
MFXClose(decode.mfx_session);
if (decode.va_dpy)
vaTerminate(decode.va_dpy);
if (dpy)
XCloseDisplay(dpy);
if (decoder_ctx)
av_freep(&decoder_ctx->hwaccel_context);
avcodec_free_context(&decoder_ctx);
avio_close(output_ctx);
return ret;
}

View File

@@ -0,0 +1,165 @@
/*
* Copyright (c) 2013 Stefano Sabatini
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* libavformat/libavcodec demuxing and muxing API example.
*
* Remux streams from one container format to another.
* @example remuxing.c
*/
#include <libavutil/timestamp.h>
#include <libavformat/avformat.h>
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
{
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
tag,
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
pkt->stream_index);
}
int main(int argc, char **argv)
{
AVOutputFormat *ofmt = NULL;
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
AVPacket pkt;
const char *in_filename, *out_filename;
int ret, i;
if (argc < 3) {
printf("usage: %s input output\n"
"API example program to remux a media file with libavformat and libavcodec.\n"
"The output format is guessed according to the file extension.\n"
"\n", argv[0]);
return 1;
}
in_filename = argv[1];
out_filename = argv[2];
av_register_all();
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
fprintf(stderr, "Could not open input file '%s'", in_filename);
goto end;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
fprintf(stderr, "Failed to retrieve input stream information");
goto end;
}
av_dump_format(ifmt_ctx, 0, in_filename, 0);
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
if (!ofmt_ctx) {
fprintf(stderr, "Could not create output context\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ofmt = ofmt_ctx->oformat;
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
AVStream *in_stream = ifmt_ctx->streams[i];
AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
if (!out_stream) {
fprintf(stderr, "Failed allocating output stream\n");
ret = AVERROR_UNKNOWN;
goto end;
}
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
if (ret < 0) {
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
goto end;
}
out_stream->codec->codec_tag = 0;
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
av_dump_format(ofmt_ctx, 0, out_filename, 1);
if (!(ofmt->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
if (ret < 0) {
fprintf(stderr, "Could not open output file '%s'", out_filename);
goto end;
}
}
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
fprintf(stderr, "Error occurred when opening output file\n");
goto end;
}
while (1) {
AVStream *in_stream, *out_stream;
ret = av_read_frame(ifmt_ctx, &pkt);
if (ret < 0)
break;
in_stream = ifmt_ctx->streams[pkt.stream_index];
out_stream = ofmt_ctx->streams[pkt.stream_index];
log_packet(ifmt_ctx, &pkt, "in");
/* copy packet */
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
pkt.pos = -1;
log_packet(ofmt_ctx, &pkt, "out");
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
if (ret < 0) {
fprintf(stderr, "Error muxing packet\n");
break;
}
av_free_packet(&pkt);
}
av_write_trailer(ofmt_ctx);
end:
avformat_close_input(&ifmt_ctx);
/* close output */
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
avio_closep(&ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
if (ret < 0 && ret != AVERROR_EOF) {
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
return 1;
}
return 0;
}

View File

@@ -0,0 +1,214 @@
/*
* Copyright (c) 2012 Stefano Sabatini
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @example resampling_audio.c
* libswresample API use example.
*/
#include <libavutil/opt.h>
#include <libavutil/channel_layout.h>
#include <libavutil/samplefmt.h>
#include <libswresample/swresample.h>
static int get_format_from_sample_fmt(const char **fmt,
enum AVSampleFormat sample_fmt)
{
int i;
struct sample_fmt_entry {
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
} sample_fmt_entries[] = {
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
};
*fmt = NULL;
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
if (sample_fmt == entry->sample_fmt) {
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
return 0;
}
}
fprintf(stderr,
"Sample format %s not supported as output format\n",
av_get_sample_fmt_name(sample_fmt));
return AVERROR(EINVAL);
}
/**
* Fill dst buffer with nb_samples, generated starting from t.
*/
static void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
{
int i, j;
double tincr = 1.0 / sample_rate, *dstp = dst;
const double c = 2 * M_PI * 440.0;
/* generate sin tone with 440Hz frequency and duplicated channels */
for (i = 0; i < nb_samples; i++) {
*dstp = sin(c * *t);
for (j = 1; j < nb_channels; j++)
dstp[j] = dstp[0];
dstp += nb_channels;
*t += tincr;
}
}
int main(int argc, char **argv)
{
int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
int src_rate = 48000, dst_rate = 44100;
uint8_t **src_data = NULL, **dst_data = NULL;
int src_nb_channels = 0, dst_nb_channels = 0;
int src_linesize, dst_linesize;
int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
const char *dst_filename = NULL;
FILE *dst_file;
int dst_bufsize;
const char *fmt;
struct SwrContext *swr_ctx;
double t;
int ret;
if (argc != 2) {
fprintf(stderr, "Usage: %s output_file\n"
"API example program to show how to resample an audio stream with libswresample.\n"
"This program generates a series of audio frames, resamples them to a specified "
"output format and rate and saves them to an output file named output_file.\n",
argv[0]);
exit(1);
}
dst_filename = argv[1];
dst_file = fopen(dst_filename, "wb");
if (!dst_file) {
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
exit(1);
}
/* create resampler context */
swr_ctx = swr_alloc();
if (!swr_ctx) {
fprintf(stderr, "Could not allocate resampler context\n");
ret = AVERROR(ENOMEM);
goto end;
}
/* set options */
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
/* initialize the resampling context */
if ((ret = swr_init(swr_ctx)) < 0) {
fprintf(stderr, "Failed to initialize the resampling context\n");
goto end;
}
/* allocate source and destination samples buffers */
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
src_nb_samples, src_sample_fmt, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate source samples\n");
goto end;
}
/* compute the number of converted samples: buffering is avoided
* ensuring that the output buffer will contain at least all the
* converted input samples */
max_dst_nb_samples = dst_nb_samples =
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
/* buffer is going to be directly written to a rawaudio file, no alignment */
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
dst_nb_samples, dst_sample_fmt, 0);
if (ret < 0) {
fprintf(stderr, "Could not allocate destination samples\n");
goto end;
}
t = 0;
do {
/* generate synthetic audio */
fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);
/* compute destination number of samples */
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
if (dst_nb_samples > max_dst_nb_samples) {
av_freep(&dst_data[0]);
ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
dst_nb_samples, dst_sample_fmt, 1);
if (ret < 0)
break;
max_dst_nb_samples = dst_nb_samples;
}
/* convert to destination format */
ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
if (ret < 0) {
fprintf(stderr, "Error while converting\n");
goto end;
}
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
ret, dst_sample_fmt, 1);
if (dst_bufsize < 0) {
fprintf(stderr, "Could not get sample buffer size\n");
goto end;
}
printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
} while (t < 10);
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
goto end;
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
"ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
end:
fclose(dst_file);
if (src_data)
av_freep(&src_data[0]);
av_freep(&src_data);
if (dst_data)
av_freep(&dst_data[0]);
av_freep(&dst_data);
swr_free(&swr_ctx);
return ret < 0;
}

View File

@@ -0,0 +1,140 @@
/*
* Copyright (c) 2012 Stefano Sabatini
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* libswscale API use example.
* @example scaling_video.c
*/
#include <libavutil/imgutils.h>
#include <libavutil/parseutils.h>
#include <libswscale/swscale.h>
static void fill_yuv_image(uint8_t *data[4], int linesize[4],
int width, int height, int frame_index)
{
int x, y;
/* Y */
for (y = 0; y < height; y++)
for (x = 0; x < width; x++)
data[0][y * linesize[0] + x] = x + y + frame_index * 3;
/* Cb and Cr */
for (y = 0; y < height / 2; y++) {
for (x = 0; x < width / 2; x++) {
data[1][y * linesize[1] + x] = 128 + y + frame_index * 2;
data[2][y * linesize[2] + x] = 64 + x + frame_index * 5;
}
}
}
int main(int argc, char **argv)
{
uint8_t *src_data[4], *dst_data[4];
int src_linesize[4], dst_linesize[4];
int src_w = 320, src_h = 240, dst_w, dst_h;
enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24;
const char *dst_size = NULL;
const char *dst_filename = NULL;
FILE *dst_file;
int dst_bufsize;
struct SwsContext *sws_ctx;
int i, ret;
if (argc != 3) {
fprintf(stderr, "Usage: %s output_file output_size\n"
"API example program to show how to scale an image with libswscale.\n"
"This program generates a series of pictures, rescales them to the given "
"output_size and saves them to an output file named output_file\n."
"\n", argv[0]);
exit(1);
}
dst_filename = argv[1];
dst_size = argv[2];
if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) {
fprintf(stderr,
"Invalid size '%s', must be in the form WxH or a valid size abbreviation\n",
dst_size);
exit(1);
}
dst_file = fopen(dst_filename, "wb");
if (!dst_file) {
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
exit(1);
}
/* create scaling context */
sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
dst_w, dst_h, dst_pix_fmt,
SWS_BILINEAR, NULL, NULL, NULL);
if (!sws_ctx) {
fprintf(stderr,
"Impossible to create scale context for the conversion "
"fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
ret = AVERROR(EINVAL);
goto end;
}
/* allocate source and destination image buffers */
if ((ret = av_image_alloc(src_data, src_linesize,
src_w, src_h, src_pix_fmt, 16)) < 0) {
fprintf(stderr, "Could not allocate source image\n");
goto end;
}
/* buffer is going to be written to rawvideo file, no alignment */
if ((ret = av_image_alloc(dst_data, dst_linesize,
dst_w, dst_h, dst_pix_fmt, 1)) < 0) {
fprintf(stderr, "Could not allocate destination image\n");
goto end;
}
dst_bufsize = ret;
for (i = 0; i < 100; i++) {
/* generate synthetic video */
fill_yuv_image(src_data, src_linesize, src_w, src_h, i);
/* convert to destination format */
sws_scale(sws_ctx, (const uint8_t * const*)src_data,
src_linesize, 0, src_h, dst_data, dst_linesize);
/* write scaled image to file */
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
}
fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n"
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
end:
fclose(dst_file);
av_freep(&src_data[0]);
av_freep(&dst_data[0]);
sws_freeContext(sws_ctx);
return ret < 0;
}

View File

@@ -0,0 +1,770 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* simple audio converter
*
* @example transcode_aac.c
* Convert an input audio file to AAC in an MP4 container using FFmpeg.
* @author Andreas Unterweger (dustsigns@gmail.com)
*/
#include <stdio.h>
#include "libavformat/avformat.h"
#include "libavformat/avio.h"
#include "libavcodec/avcodec.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/frame.h"
#include "libavutil/opt.h"
#include "libswresample/swresample.h"
/** The output bit rate in kbit/s */
#define OUTPUT_BIT_RATE 96000
/** The number of output channels */
#define OUTPUT_CHANNELS 2
/**
* Convert an error code into a text message.
* @param error Error code to be converted
* @return Corresponding error text (not thread-safe)
*/
static const char *get_error_text(const int error)
{
static char error_buffer[255];
av_strerror(error, error_buffer, sizeof(error_buffer));
return error_buffer;
}
/** Open an input file and the required decoder. */
static int open_input_file(const char *filename,
AVFormatContext **input_format_context,
AVCodecContext **input_codec_context)
{
AVCodec *input_codec;
int error;
/** Open the input file to read from it. */
if ((error = avformat_open_input(input_format_context, filename, NULL,
NULL)) < 0) {
fprintf(stderr, "Could not open input file '%s' (error '%s')\n",
filename, get_error_text(error));
*input_format_context = NULL;
return error;
}
/** Get information on the input file (number of streams etc.). */
if ((error = avformat_find_stream_info(*input_format_context, NULL)) < 0) {
fprintf(stderr, "Could not open find stream info (error '%s')\n",
get_error_text(error));
avformat_close_input(input_format_context);
return error;
}
/** Make sure that there is only one stream in the input file. */
if ((*input_format_context)->nb_streams != 1) {
fprintf(stderr, "Expected one audio input stream, but found %d\n",
(*input_format_context)->nb_streams);
avformat_close_input(input_format_context);
return AVERROR_EXIT;
}
/** Find a decoder for the audio stream. */
if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codec->codec_id))) {
fprintf(stderr, "Could not find input codec\n");
avformat_close_input(input_format_context);
return AVERROR_EXIT;
}
/** Open the decoder for the audio stream to use it later. */
if ((error = avcodec_open2((*input_format_context)->streams[0]->codec,
input_codec, NULL)) < 0) {
fprintf(stderr, "Could not open input codec (error '%s')\n",
get_error_text(error));
avformat_close_input(input_format_context);
return error;
}
/** Save the decoder context for easier access later. */
*input_codec_context = (*input_format_context)->streams[0]->codec;
return 0;
}
/**
* Open an output file and the required encoder.
* Also set some basic encoder parameters.
* Some of these parameters are based on the input file's parameters.
*/
static int open_output_file(const char *filename,
AVCodecContext *input_codec_context,
AVFormatContext **output_format_context,
AVCodecContext **output_codec_context)
{
AVIOContext *output_io_context = NULL;
AVStream *stream = NULL;
AVCodec *output_codec = NULL;
int error;
/** Open the output file to write to it. */
if ((error = avio_open(&output_io_context, filename,
AVIO_FLAG_WRITE)) < 0) {
fprintf(stderr, "Could not open output file '%s' (error '%s')\n",
filename, get_error_text(error));
return error;
}
/** Create a new format context for the output container format. */
if (!(*output_format_context = avformat_alloc_context())) {
fprintf(stderr, "Could not allocate output format context\n");
return AVERROR(ENOMEM);
}
/** Associate the output file (pointer) with the container format context. */
(*output_format_context)->pb = output_io_context;
/** Guess the desired container format based on the file extension. */
if (!((*output_format_context)->oformat = av_guess_format(NULL, filename,
NULL))) {
fprintf(stderr, "Could not find output file format\n");
goto cleanup;
}
av_strlcpy((*output_format_context)->filename, filename,
sizeof((*output_format_context)->filename));
/** Find the encoder to be used by its name. */
if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_AAC))) {
fprintf(stderr, "Could not find an AAC encoder.\n");
goto cleanup;
}
/** Create a new audio stream in the output file container. */
if (!(stream = avformat_new_stream(*output_format_context, output_codec))) {
fprintf(stderr, "Could not create new stream\n");
error = AVERROR(ENOMEM);
goto cleanup;
}
/** Save the encoder context for easier access later. */
*output_codec_context = stream->codec;
/**
* Set the basic encoder parameters.
* The input file's sample rate is used to avoid a sample rate conversion.
*/
(*output_codec_context)->channels = OUTPUT_CHANNELS;
(*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
(*output_codec_context)->sample_rate = input_codec_context->sample_rate;
(*output_codec_context)->sample_fmt = output_codec->sample_fmts[0];
(*output_codec_context)->bit_rate = OUTPUT_BIT_RATE;
/** Allow the use of the experimental AAC encoder */
(*output_codec_context)->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
/** Set the sample rate for the container. */
stream->time_base.den = input_codec_context->sample_rate;
stream->time_base.num = 1;
/**
* Some container formats (like MP4) require global headers to be present
* Mark the encoder so that it behaves accordingly.
*/
if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER)
(*output_codec_context)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
/** Open the encoder for the audio stream to use it later. */
if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) {
fprintf(stderr, "Could not open output codec (error '%s')\n",
get_error_text(error));
goto cleanup;
}
return 0;
cleanup:
avio_closep(&(*output_format_context)->pb);
avformat_free_context(*output_format_context);
*output_format_context = NULL;
return error < 0 ? error : AVERROR_EXIT;
}
/** Initialize one data packet for reading or writing. */
static void init_packet(AVPacket *packet)
{
av_init_packet(packet);
/** Set the packet data and size so that it is recognized as being empty. */
packet->data = NULL;
packet->size = 0;
}
/** Initialize one audio frame for reading from the input file */
static int init_input_frame(AVFrame **frame)
{
if (!(*frame = av_frame_alloc())) {
fprintf(stderr, "Could not allocate input frame\n");
return AVERROR(ENOMEM);
}
return 0;
}
/**
* Initialize the audio resampler based on the input and output codec settings.
* If the input and output sample formats differ, a conversion is required
* libswresample takes care of this, but requires initialization.
*/
static int init_resampler(AVCodecContext *input_codec_context,
AVCodecContext *output_codec_context,
SwrContext **resample_context)
{
int error;
/**
* Create a resampler context for the conversion.
* Set the conversion parameters.
* Default channel layouts based on the number of channels
* are assumed for simplicity (they are sometimes not detected
* properly by the demuxer and/or decoder).
*/
*resample_context = swr_alloc_set_opts(NULL,
av_get_default_channel_layout(output_codec_context->channels),
output_codec_context->sample_fmt,
output_codec_context->sample_rate,
av_get_default_channel_layout(input_codec_context->channels),
input_codec_context->sample_fmt,
input_codec_context->sample_rate,
0, NULL);
if (!*resample_context) {
fprintf(stderr, "Could not allocate resample context\n");
return AVERROR(ENOMEM);
}
/**
* Perform a sanity check so that the number of converted samples is
* not greater than the number of samples to be converted.
* If the sample rates differ, this case has to be handled differently
*/
av_assert0(output_codec_context->sample_rate == input_codec_context->sample_rate);
/** Open the resampler with the specified parameters. */
if ((error = swr_init(*resample_context)) < 0) {
fprintf(stderr, "Could not open resample context\n");
swr_free(resample_context);
return error;
}
return 0;
}
/** Initialize a FIFO buffer for the audio samples to be encoded. */
static int init_fifo(AVAudioFifo **fifo, AVCodecContext *output_codec_context)
{
/** Create the FIFO buffer based on the specified output sample format. */
if (!(*fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt,
output_codec_context->channels, 1))) {
fprintf(stderr, "Could not allocate FIFO\n");
return AVERROR(ENOMEM);
}
return 0;
}
/** Write the header of the output file container. */
static int write_output_file_header(AVFormatContext *output_format_context)
{
int error;
if ((error = avformat_write_header(output_format_context, NULL)) < 0) {
fprintf(stderr, "Could not write output file header (error '%s')\n",
get_error_text(error));
return error;
}
return 0;
}
/** Decode one audio frame from the input file. */
static int decode_audio_frame(AVFrame *frame,
AVFormatContext *input_format_context,
AVCodecContext *input_codec_context,
int *data_present, int *finished)
{
/** Packet used for temporary storage. */
AVPacket input_packet;
int error;
init_packet(&input_packet);
/** Read one audio frame from the input file into a temporary packet. */
if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
/** If we are at the end of the file, flush the decoder below. */
if (error == AVERROR_EOF)
*finished = 1;
else {
fprintf(stderr, "Could not read frame (error '%s')\n",
get_error_text(error));
return error;
}
}
/**
* Decode the audio frame stored in the temporary packet.
* The input audio stream decoder is used to do this.
* If we are at the end of the file, pass an empty packet to the decoder
* to flush it.
*/
if ((error = avcodec_decode_audio4(input_codec_context, frame,
data_present, &input_packet)) < 0) {
fprintf(stderr, "Could not decode frame (error '%s')\n",
get_error_text(error));
av_free_packet(&input_packet);
return error;
}
/**
* If the decoder has not been flushed completely, we are not finished,
* so that this function has to be called again.
*/
if (*finished && *data_present)
*finished = 0;
av_free_packet(&input_packet);
return 0;
}
/**
* Initialize a temporary storage for the specified number of audio samples.
* The conversion requires temporary storage due to the different format.
* The number of audio samples to be allocated is specified in frame_size.
*/
static int init_converted_samples(uint8_t ***converted_input_samples,
AVCodecContext *output_codec_context,
int frame_size)
{
int error;
/**
* Allocate as many pointers as there are audio channels.
* Each pointer will later point to the audio samples of the corresponding
* channels (although it may be NULL for interleaved formats).
*/
if (!(*converted_input_samples = calloc(output_codec_context->channels,
sizeof(**converted_input_samples)))) {
fprintf(stderr, "Could not allocate converted input sample pointers\n");
return AVERROR(ENOMEM);
}
/**
* Allocate memory for the samples of all channels in one consecutive
* block for convenience.
*/
if ((error = av_samples_alloc(*converted_input_samples, NULL,
output_codec_context->channels,
frame_size,
output_codec_context->sample_fmt, 0)) < 0) {
fprintf(stderr,
"Could not allocate converted input samples (error '%s')\n",
get_error_text(error));
av_freep(&(*converted_input_samples)[0]);
free(*converted_input_samples);
return error;
}
return 0;
}
/**
* Convert the input audio samples into the output sample format.
* The conversion happens on a per-frame basis, the size of which is specified
* by frame_size.
*/
static int convert_samples(const uint8_t **input_data,
uint8_t **converted_data, const int frame_size,
SwrContext *resample_context)
{
int error;
/** Convert the samples using the resampler. */
if ((error = swr_convert(resample_context,
converted_data, frame_size,
input_data , frame_size)) < 0) {
fprintf(stderr, "Could not convert input samples (error '%s')\n",
get_error_text(error));
return error;
}
return 0;
}
/** Add converted input audio samples to the FIFO buffer for later processing. */
static int add_samples_to_fifo(AVAudioFifo *fifo,
uint8_t **converted_input_samples,
const int frame_size)
{
int error;
/**
* Make the FIFO as large as it needs to be to hold both,
* the old and the new samples.
*/
if ((error = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame_size)) < 0) {
fprintf(stderr, "Could not reallocate FIFO\n");
return error;
}
/** Store the new samples in the FIFO buffer. */
if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
frame_size) < frame_size) {
fprintf(stderr, "Could not write data to FIFO\n");
return AVERROR_EXIT;
}
return 0;
}
/**
* Read one audio frame from the input file, decodes, converts and stores
* it in the FIFO buffer.
*/
static int read_decode_convert_and_store(AVAudioFifo *fifo,
AVFormatContext *input_format_context,
AVCodecContext *input_codec_context,
AVCodecContext *output_codec_context,
SwrContext *resampler_context,
int *finished)
{
/** Temporary storage of the input samples of the frame read from the file. */
AVFrame *input_frame = NULL;
/** Temporary storage for the converted input samples. */
uint8_t **converted_input_samples = NULL;
int data_present;
int ret = AVERROR_EXIT;
/** Initialize temporary storage for one input frame. */
if (init_input_frame(&input_frame))
goto cleanup;
/** Decode one frame worth of audio samples. */
if (decode_audio_frame(input_frame, input_format_context,
input_codec_context, &data_present, finished))
goto cleanup;
/**
* If we are at the end of the file and there are no more samples
* in the decoder which are delayed, we are actually finished.
* This must not be treated as an error.
*/
if (*finished && !data_present) {
ret = 0;
goto cleanup;
}
/** If there is decoded data, convert and store it */
if (data_present) {
/** Initialize the temporary storage for the converted input samples. */
if (init_converted_samples(&converted_input_samples, output_codec_context,
input_frame->nb_samples))
goto cleanup;
/**
* Convert the input samples to the desired output sample format.
* This requires a temporary storage provided by converted_input_samples.
*/
if (convert_samples((const uint8_t**)input_frame->extended_data, converted_input_samples,
input_frame->nb_samples, resampler_context))
goto cleanup;
/** Add the converted input samples to the FIFO buffer for later processing. */
if (add_samples_to_fifo(fifo, converted_input_samples,
input_frame->nb_samples))
goto cleanup;
ret = 0;
}
ret = 0;
cleanup:
if (converted_input_samples) {
av_freep(&converted_input_samples[0]);
free(converted_input_samples);
}
av_frame_free(&input_frame);
return ret;
}
/**
* Initialize one input frame for writing to the output file.
* The frame will be exactly frame_size samples large.
*/
static int init_output_frame(AVFrame **frame,
AVCodecContext *output_codec_context,
int frame_size)
{
int error;
/** Create a new frame to store the audio samples. */
if (!(*frame = av_frame_alloc())) {
fprintf(stderr, "Could not allocate output frame\n");
return AVERROR_EXIT;
}
/**
* Set the frame's parameters, especially its size and format.
* av_frame_get_buffer needs this to allocate memory for the
* audio samples of the frame.
* Default channel layouts based on the number of channels
* are assumed for simplicity.
*/
(*frame)->nb_samples = frame_size;
(*frame)->channel_layout = output_codec_context->channel_layout;
(*frame)->format = output_codec_context->sample_fmt;
(*frame)->sample_rate = output_codec_context->sample_rate;
/**
* Allocate the samples of the created frame. This call will make
* sure that the audio frame can hold as many samples as specified.
*/
if ((error = av_frame_get_buffer(*frame, 0)) < 0) {
fprintf(stderr, "Could allocate output frame samples (error '%s')\n",
get_error_text(error));
av_frame_free(frame);
return error;
}
return 0;
}
/** Global timestamp for the audio frames */
static int64_t pts = 0;
/** Encode one frame worth of audio to the output file. */
static int encode_audio_frame(AVFrame *frame,
AVFormatContext *output_format_context,
AVCodecContext *output_codec_context,
int *data_present)
{
/** Packet used for temporary storage. */
AVPacket output_packet;
int error;
init_packet(&output_packet);
/** Set a timestamp based on the sample rate for the container. */
if (frame) {
frame->pts = pts;
pts += frame->nb_samples;
}
/**
* Encode the audio frame and store it in the temporary packet.
* The output audio stream encoder is used to do this.
*/
if ((error = avcodec_encode_audio2(output_codec_context, &output_packet,
frame, data_present)) < 0) {
fprintf(stderr, "Could not encode frame (error '%s')\n",
get_error_text(error));
av_free_packet(&output_packet);
return error;
}
/** Write one audio frame from the temporary packet to the output file. */
if (*data_present) {
if ((error = av_write_frame(output_format_context, &output_packet)) < 0) {
fprintf(stderr, "Could not write frame (error '%s')\n",
get_error_text(error));
av_free_packet(&output_packet);
return error;
}
av_free_packet(&output_packet);
}
return 0;
}
/**
* Load one audio frame from the FIFO buffer, encode and write it to the
* output file.
*/
static int load_encode_and_write(AVAudioFifo *fifo,
AVFormatContext *output_format_context,
AVCodecContext *output_codec_context)
{
/** Temporary storage of the output samples of the frame written to the file. */
AVFrame *output_frame;
/**
* Use the maximum number of possible samples per frame.
* If there is less than the maximum possible frame size in the FIFO
* buffer use this number. Otherwise, use the maximum possible frame size
*/
const int frame_size = FFMIN(av_audio_fifo_size(fifo),
output_codec_context->frame_size);
int data_written;
/** Initialize temporary storage for one output frame. */
if (init_output_frame(&output_frame, output_codec_context, frame_size))
return AVERROR_EXIT;
/**
* Read as many samples from the FIFO buffer as required to fill the frame.
* The samples are stored in the frame temporarily.
*/
if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
fprintf(stderr, "Could not read data from FIFO\n");
av_frame_free(&output_frame);
return AVERROR_EXIT;
}
/** Encode one frame worth of audio samples. */
if (encode_audio_frame(output_frame, output_format_context,
output_codec_context, &data_written)) {
av_frame_free(&output_frame);
return AVERROR_EXIT;
}
av_frame_free(&output_frame);
return 0;
}
/** Write the trailer of the output file container. */
static int write_output_file_trailer(AVFormatContext *output_format_context)
{
int error;
if ((error = av_write_trailer(output_format_context)) < 0) {
fprintf(stderr, "Could not write output file trailer (error '%s')\n",
get_error_text(error));
return error;
}
return 0;
}
/** Convert an audio file to an AAC file in an MP4 container. */
int main(int argc, char **argv)
{
AVFormatContext *input_format_context = NULL, *output_format_context = NULL;
AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
SwrContext *resample_context = NULL;
AVAudioFifo *fifo = NULL;
int ret = AVERROR_EXIT;
if (argc < 3) {
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
exit(1);
}
/** Register all codecs and formats so that they can be used. */
av_register_all();
/** Open the input file for reading. */
if (open_input_file(argv[1], &input_format_context,
&input_codec_context))
goto cleanup;
/** Open the output file for writing. */
if (open_output_file(argv[2], input_codec_context,
&output_format_context, &output_codec_context))
goto cleanup;
/** Initialize the resampler to be able to convert audio sample formats. */
if (init_resampler(input_codec_context, output_codec_context,
&resample_context))
goto cleanup;
/** Initialize the FIFO buffer to store audio samples to be encoded. */
if (init_fifo(&fifo, output_codec_context))
goto cleanup;
/** Write the header of the output file container. */
if (write_output_file_header(output_format_context))
goto cleanup;
/**
* Loop as long as we have input samples to read or output samples
* to write; abort as soon as we have neither.
*/
while (1) {
/** Use the encoder's desired frame size for processing. */
const int output_frame_size = output_codec_context->frame_size;
int finished = 0;
/**
* Make sure that there is one frame worth of samples in the FIFO
* buffer so that the encoder can do its work.
* Since the decoder's and the encoder's frame size may differ, we
* need to FIFO buffer to store as many frames worth of input samples
* that they make up at least one frame worth of output samples.
*/
while (av_audio_fifo_size(fifo) < output_frame_size) {
/**
* Decode one frame worth of audio samples, convert it to the
* output sample format and put it into the FIFO buffer.
*/
if (read_decode_convert_and_store(fifo, input_format_context,
input_codec_context,
output_codec_context,
resample_context, &finished))
goto cleanup;
/**
* If we are at the end of the input file, we continue
* encoding the remaining audio samples to the output file.
*/
if (finished)
break;
}
/**
* If we have enough samples for the encoder, we encode them.
* At the end of the file, we pass the remaining samples to
* the encoder.
*/
while (av_audio_fifo_size(fifo) >= output_frame_size ||
(finished && av_audio_fifo_size(fifo) > 0))
/**
* Take one frame worth of audio samples from the FIFO buffer,
* encode it and write it to the output file.
*/
if (load_encode_and_write(fifo, output_format_context,
output_codec_context))
goto cleanup;
/**
* If we are at the end of the input file and have encoded
* all remaining samples, we can exit this loop and finish.
*/
if (finished) {
int data_written;
/** Flush the encoder as it may have delayed frames. */
do {
if (encode_audio_frame(NULL, output_format_context,
output_codec_context, &data_written))
goto cleanup;
} while (data_written);
break;
}
}
/** Write the trailer of the output file container. */
if (write_output_file_trailer(output_format_context))
goto cleanup;
ret = 0;
cleanup:
if (fifo)
av_audio_fifo_free(fifo);
swr_free(&resample_context);
if (output_codec_context)
avcodec_close(output_codec_context);
if (output_format_context) {
avio_closep(&output_format_context->pb);
avformat_free_context(output_format_context);
}
if (input_codec_context)
avcodec_close(input_codec_context);
if (input_format_context)
avformat_close_input(&input_format_context);
return ret;
}

View File

@@ -0,0 +1,583 @@
/*
* Copyright (c) 2010 Nicolas George
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2014 Andrey Utkin
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
/**
* @file
* API example for demuxing, decoding, filtering, encoding and muxing
* @example transcoding.c
*/
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libavfilter/avfiltergraph.h>
#include <libavfilter/avcodec.h>
#include <libavfilter/buffersink.h>
#include <libavfilter/buffersrc.h>
#include <libavutil/opt.h>
#include <libavutil/pixdesc.h>
static AVFormatContext *ifmt_ctx;
static AVFormatContext *ofmt_ctx;
typedef struct FilteringContext {
AVFilterContext *buffersink_ctx;
AVFilterContext *buffersrc_ctx;
AVFilterGraph *filter_graph;
} FilteringContext;
static FilteringContext *filter_ctx;
static int open_input_file(const char *filename)
{
int ret;
unsigned int i;
ifmt_ctx = NULL;
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
return ret;
}
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
return ret;
}
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
AVStream *stream;
AVCodecContext *codec_ctx;
stream = ifmt_ctx->streams[i];
codec_ctx = stream->codec;
/* Reencode video & audio and remux subtitles etc. */
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
/* Open decoder */
ret = avcodec_open2(codec_ctx,
avcodec_find_decoder(codec_ctx->codec_id), NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
return ret;
}
}
}
av_dump_format(ifmt_ctx, 0, filename, 0);
return 0;
}
static int open_output_file(const char *filename)
{
AVStream *out_stream;
AVStream *in_stream;
AVCodecContext *dec_ctx, *enc_ctx;
AVCodec *encoder;
int ret;
unsigned int i;
ofmt_ctx = NULL;
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
if (!ofmt_ctx) {
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
return AVERROR_UNKNOWN;
}
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
out_stream = avformat_new_stream(ofmt_ctx, NULL);
if (!out_stream) {
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
return AVERROR_UNKNOWN;
}
in_stream = ifmt_ctx->streams[i];
dec_ctx = in_stream->codec;
enc_ctx = out_stream->codec;
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
/* in this example, we choose transcoding to same codec */
encoder = avcodec_find_encoder(dec_ctx->codec_id);
if (!encoder) {
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
return AVERROR_INVALIDDATA;
}
/* In this example, we transcode to same properties (picture size,
* sample rate etc.). These properties can be changed for output
* streams easily using filters */
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
enc_ctx->height = dec_ctx->height;
enc_ctx->width = dec_ctx->width;
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
/* take first format from list of supported formats */
enc_ctx->pix_fmt = encoder->pix_fmts[0];
/* video time_base can be set to whatever is handy and supported by encoder */
enc_ctx->time_base = dec_ctx->time_base;
} else {
enc_ctx->sample_rate = dec_ctx->sample_rate;
enc_ctx->channel_layout = dec_ctx->channel_layout;
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
/* take first format from list of supported formats */
enc_ctx->sample_fmt = encoder->sample_fmts[0];
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
}
/* Third parameter can be used to pass settings to encoder */
ret = avcodec_open2(enc_ctx, encoder, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
return ret;
}
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
return AVERROR_INVALIDDATA;
} else {
/* if this stream must be remuxed */
ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
ifmt_ctx->streams[i]->codec);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
return ret;
}
}
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
av_dump_format(ofmt_ctx, 0, filename, 1);
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
return ret;
}
}
/* init muxer, write output file header */
ret = avformat_write_header(ofmt_ctx, NULL);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
return ret;
}
return 0;
}
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
AVCodecContext *enc_ctx, const char *filter_spec)
{
char args[512];
int ret = 0;
AVFilter *buffersrc = NULL;
AVFilter *buffersink = NULL;
AVFilterContext *buffersrc_ctx = NULL;
AVFilterContext *buffersink_ctx = NULL;
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();
AVFilterGraph *filter_graph = avfilter_graph_alloc();
if (!outputs || !inputs || !filter_graph) {
ret = AVERROR(ENOMEM);
goto end;
}
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
buffersrc = avfilter_get_by_name("buffer");
buffersink = avfilter_get_by_name("buffersink");
if (!buffersrc || !buffersink) {
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
ret = AVERROR_UNKNOWN;
goto end;
}
snprintf(args, sizeof(args),
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
dec_ctx->time_base.num, dec_ctx->time_base.den,
dec_ctx->sample_aspect_ratio.num,
dec_ctx->sample_aspect_ratio.den);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
goto end;
}
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
(uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
goto end;
}
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
buffersrc = avfilter_get_by_name("abuffer");
buffersink = avfilter_get_by_name("abuffersink");
if (!buffersrc || !buffersink) {
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
ret = AVERROR_UNKNOWN;
goto end;
}
if (!dec_ctx->channel_layout)
dec_ctx->channel_layout =
av_get_default_channel_layout(dec_ctx->channels);
snprintf(args, sizeof(args),
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
av_get_sample_fmt_name(dec_ctx->sample_fmt),
dec_ctx->channel_layout);
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
args, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
goto end;
}
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
NULL, NULL, filter_graph);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
(uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
(uint8_t*)&enc_ctx->channel_layout,
sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
goto end;
}
ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
(uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
AV_OPT_SEARCH_CHILDREN);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
goto end;
}
} else {
ret = AVERROR_UNKNOWN;
goto end;
}
/* Endpoints for the filter graph. */
outputs->name = av_strdup("in");
outputs->filter_ctx = buffersrc_ctx;
outputs->pad_idx = 0;
outputs->next = NULL;
inputs->name = av_strdup("out");
inputs->filter_ctx = buffersink_ctx;
inputs->pad_idx = 0;
inputs->next = NULL;
if (!outputs->name || !inputs->name) {
ret = AVERROR(ENOMEM);
goto end;
}
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
&inputs, &outputs, NULL)) < 0)
goto end;
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
goto end;
/* Fill FilteringContext */
fctx->buffersrc_ctx = buffersrc_ctx;
fctx->buffersink_ctx = buffersink_ctx;
fctx->filter_graph = filter_graph;
end:
avfilter_inout_free(&inputs);
avfilter_inout_free(&outputs);
return ret;
}
static int init_filters(void)
{
const char *filter_spec;
unsigned int i;
int ret;
filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
if (!filter_ctx)
return AVERROR(ENOMEM);
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
filter_ctx[i].buffersrc_ctx = NULL;
filter_ctx[i].buffersink_ctx = NULL;
filter_ctx[i].filter_graph = NULL;
if (!(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
|| ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
continue;
if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
filter_spec = "null"; /* passthrough (dummy) filter for video */
else
filter_spec = "anull"; /* passthrough (dummy) filter for audio */
ret = init_filter(&filter_ctx[i], ifmt_ctx->streams[i]->codec,
ofmt_ctx->streams[i]->codec, filter_spec);
if (ret)
return ret;
}
return 0;
}
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
int ret;
int got_frame_local;
AVPacket enc_pkt;
int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
(ifmt_ctx->streams[stream_index]->codec->codec_type ==
AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
if (!got_frame)
got_frame = &got_frame_local;
av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
/* encode filtered frame */
enc_pkt.data = NULL;
enc_pkt.size = 0;
av_init_packet(&enc_pkt);
ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
filt_frame, got_frame);
av_frame_free(&filt_frame);
if (ret < 0)
return ret;
if (!(*got_frame))
return 0;
/* prepare packet for muxing */
enc_pkt.stream_index = stream_index;
av_packet_rescale_ts(&enc_pkt,
ofmt_ctx->streams[stream_index]->codec->time_base,
ofmt_ctx->streams[stream_index]->time_base);
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
/* mux encoded frame */
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
return ret;
}
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
{
int ret;
AVFrame *filt_frame;
av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
/* push the decoded frame into the filtergraph */
ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
frame, 0);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
return ret;
}
/* pull filtered frames from the filtergraph */
while (1) {
filt_frame = av_frame_alloc();
if (!filt_frame) {
ret = AVERROR(ENOMEM);
break;
}
av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
filt_frame);
if (ret < 0) {
/* if no more frames for output - returns AVERROR(EAGAIN)
* if flushed and no more frames for output - returns AVERROR_EOF
* rewrite retcode to 0 to show it as normal procedure completion
*/
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
ret = 0;
av_frame_free(&filt_frame);
break;
}
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
ret = encode_write_frame(filt_frame, stream_index, NULL);
if (ret < 0)
break;
}
return ret;
}
static int flush_encoder(unsigned int stream_index)
{
int ret;
int got_frame;
if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
AV_CODEC_CAP_DELAY))
return 0;
while (1) {
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
ret = encode_write_frame(NULL, stream_index, &got_frame);
if (ret < 0)
break;
if (!got_frame)
return 0;
}
return ret;
}
int main(int argc, char **argv)
{
int ret;
AVPacket packet = { .data = NULL, .size = 0 };
AVFrame *frame = NULL;
enum AVMediaType type;
unsigned int stream_index;
unsigned int i;
int got_frame;
int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
if (argc != 3) {
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
return 1;
}
av_register_all();
avfilter_register_all();
if ((ret = open_input_file(argv[1])) < 0)
goto end;
if ((ret = open_output_file(argv[2])) < 0)
goto end;
if ((ret = init_filters()) < 0)
goto end;
/* read all packets */
while (1) {
if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
break;
stream_index = packet.stream_index;
type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
stream_index);
if (filter_ctx[stream_index].filter_graph) {
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
frame = av_frame_alloc();
if (!frame) {
ret = AVERROR(ENOMEM);
break;
}
av_packet_rescale_ts(&packet,
ifmt_ctx->streams[stream_index]->time_base,
ifmt_ctx->streams[stream_index]->codec->time_base);
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
avcodec_decode_audio4;
ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
&got_frame, &packet);
if (ret < 0) {
av_frame_free(&frame);
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
break;
}
if (got_frame) {
frame->pts = av_frame_get_best_effort_timestamp(frame);
ret = filter_encode_write_frame(frame, stream_index);
av_frame_free(&frame);
if (ret < 0)
goto end;
} else {
av_frame_free(&frame);
}
} else {
/* remux this frame without reencoding */
av_packet_rescale_ts(&packet,
ifmt_ctx->streams[stream_index]->time_base,
ofmt_ctx->streams[stream_index]->time_base);
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
if (ret < 0)
goto end;
}
av_free_packet(&packet);
}
/* flush filters and encoders */
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
/* flush filter */
if (!filter_ctx[i].filter_graph)
continue;
ret = filter_encode_write_frame(NULL, i);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
goto end;
}
/* flush encoder */
ret = flush_encoder(i);
if (ret < 0) {
av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
goto end;
}
}
av_write_trailer(ofmt_ctx);
end:
av_free_packet(&packet);
av_frame_free(&frame);
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
avcodec_close(ifmt_ctx->streams[i]->codec);
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
avcodec_close(ofmt_ctx->streams[i]->codec);
if (filter_ctx && filter_ctx[i].filter_graph)
avfilter_graph_free(&filter_ctx[i].filter_graph);
}
av_free(filter_ctx);
avformat_close_input(&ifmt_ctx);
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
avio_closep(&ofmt_ctx->pb);
avformat_free_context(ofmt_ctx);
if (ret < 0)
av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
return ret ? 1 : 0;
}

View File

@@ -0,0 +1,621 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle FFmpeg FAQ
@titlepage
@center @titlefont{FFmpeg FAQ}
@end titlepage
@top
@contents
@chapter General Questions
@section Why doesn't FFmpeg support feature [xyz]?
Because no one has taken on that task yet. FFmpeg development is
driven by the tasks that are important to the individual developers.
If there is a feature that is important to you, the best way to get
it implemented is to undertake the task yourself or sponsor a developer.
@section FFmpeg does not support codec XXX. Can you include a Windows DLL loader to support it?
No. Windows DLLs are not portable, bloated and often slow.
Moreover FFmpeg strives to support all codecs natively.
A DLL loader is not conducive to that goal.
@section I cannot read this file although this format seems to be supported by ffmpeg.
Even if ffmpeg can read the container format, it may not support all its
codecs. Please consult the supported codec list in the ffmpeg
documentation.
@section Which codecs are supported by Windows?
Windows does not support standard formats like MPEG very well, unless you
install some additional codecs.
The following list of video codecs should work on most Windows systems:
@table @option
@item msmpeg4v2
.avi/.asf
@item msmpeg4
.asf only
@item wmv1
.asf only
@item wmv2
.asf only
@item mpeg4
Only if you have some MPEG-4 codec like ffdshow or Xvid installed.
@item mpeg1video
.mpg only
@end table
Note, ASF files often have .wmv or .wma extensions in Windows. It should also
be mentioned that Microsoft claims a patent on the ASF format, and may sue
or threaten users who create ASF files with non-Microsoft software. It is
strongly advised to avoid ASF where possible.
The following list of audio codecs should work on most Windows systems:
@table @option
@item adpcm_ima_wav
@item adpcm_ms
@item pcm_s16le
always
@item libmp3lame
If some MP3 codec like LAME is installed.
@end table
@chapter Compilation
@section @code{error: can't find a register in class 'GENERAL_REGS' while reloading 'asm'}
This is a bug in gcc. Do not report it to us. Instead, please report it to
the gcc developers. Note that we will not add workarounds for gcc bugs.
Also note that (some of) the gcc developers believe this is not a bug or
not a bug they should fix:
@url{http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11203}.
Then again, some of them do not know the difference between an undecidable
problem and an NP-hard problem...
@section I have installed this library with my distro's package manager. Why does @command{configure} not see it?
Distributions usually split libraries in several packages. The main package
contains the files necessary to run programs using the library. The
development package contains the files necessary to build programs using the
library. Sometimes, docs and/or data are in a separate package too.
To build FFmpeg, you need to install the development package. It is usually
called @file{libfoo-dev} or @file{libfoo-devel}. You can remove it after the
build is finished, but be sure to keep the main package.
@section How do I make @command{pkg-config} find my libraries?
Somewhere along with your libraries, there is a @file{.pc} file (or several)
in a @file{pkgconfig} directory. You need to set environment variables to
point @command{pkg-config} to these files.
If you need to @emph{add} directories to @command{pkg-config}'s search list
(typical use case: library installed separately), add it to
@code{$PKG_CONFIG_PATH}:
@example
export PKG_CONFIG_PATH=/opt/x264/lib/pkgconfig:/opt/opus/lib/pkgconfig
@end example
If you need to @emph{replace} @command{pkg-config}'s search list
(typical use case: cross-compiling), set it in
@code{$PKG_CONFIG_LIBDIR}:
@example
export PKG_CONFIG_LIBDIR=/home/me/cross/usr/lib/pkgconfig:/home/me/cross/usr/local/lib/pkgconfig
@end example
If you need to know the library's internal dependencies (typical use: static
linking), add the @code{--static} option to @command{pkg-config}:
@example
./configure --pkg-config-flags=--static
@end example
@section How do I use @command{pkg-config} when cross-compiling?
The best way is to install @command{pkg-config} in your cross-compilation
environment. It will automatically use the cross-compilation libraries.
You can also use @command{pkg-config} from the host environment by
specifying explicitly @code{--pkg-config=pkg-config} to @command{configure}.
In that case, you must point @command{pkg-config} to the correct directories
using the @code{PKG_CONFIG_LIBDIR}, as explained in the previous entry.
As an intermediate solution, you can place in your cross-compilation
environment a script that calls the host @command{pkg-config} with
@code{PKG_CONFIG_LIBDIR} set. That script can look like that:
@example
#!/bin/sh
PKG_CONFIG_LIBDIR=/path/to/cross/lib/pkgconfig
export PKG_CONFIG_LIBDIR
exec /usr/bin/pkg-config "$@@"
@end example
@chapter Usage
@section ffmpeg does not work; what is wrong?
Try a @code{make distclean} in the ffmpeg source directory before the build.
If this does not help see
(@url{http://ffmpeg.org/bugreports.html}).
@section How do I encode single pictures into movies?
First, rename your pictures to follow a numerical sequence.
For example, img1.jpg, img2.jpg, img3.jpg,...
Then you may run:
@example
ffmpeg -f image2 -i img%d.jpg /tmp/a.mpg
@end example
Notice that @samp{%d} is replaced by the image number.
@file{img%03d.jpg} means the sequence @file{img001.jpg}, @file{img002.jpg}, etc.
Use the @option{-start_number} option to declare a starting number for
the sequence. This is useful if your sequence does not start with
@file{img001.jpg} but is still in a numerical order. The following
example will start with @file{img100.jpg}:
@example
ffmpeg -f image2 -start_number 100 -i img%d.jpg /tmp/a.mpg
@end example
If you have large number of pictures to rename, you can use the
following command to ease the burden. The command, using the bourne
shell syntax, symbolically links all files in the current directory
that match @code{*jpg} to the @file{/tmp} directory in the sequence of
@file{img001.jpg}, @file{img002.jpg} and so on.
@example
x=1; for i in *jpg; do counter=$(printf %03d $x); ln -s "$i" /tmp/img"$counter".jpg; x=$(($x+1)); done
@end example
If you want to sequence them by oldest modified first, substitute
@code{$(ls -r -t *jpg)} in place of @code{*jpg}.
Then run:
@example
ffmpeg -f image2 -i /tmp/img%03d.jpg /tmp/a.mpg
@end example
The same logic is used for any image format that ffmpeg reads.
You can also use @command{cat} to pipe images to ffmpeg:
@example
cat *.jpg | ffmpeg -f image2pipe -c:v mjpeg -i - output.mpg
@end example
@section How do I encode movie to single pictures?
Use:
@example
ffmpeg -i movie.mpg movie%d.jpg
@end example
The @file{movie.mpg} used as input will be converted to
@file{movie1.jpg}, @file{movie2.jpg}, etc...
Instead of relying on file format self-recognition, you may also use
@table @option
@item -c:v ppm
@item -c:v png
@item -c:v mjpeg
@end table
to force the encoding.
Applying that to the previous example:
@example
ffmpeg -i movie.mpg -f image2 -c:v mjpeg menu%d.jpg
@end example
Beware that there is no "jpeg" codec. Use "mjpeg" instead.
@section Why do I see a slight quality degradation with multithreaded MPEG* encoding?
For multithreaded MPEG* encoding, the encoded slices must be independent,
otherwise thread n would practically have to wait for n-1 to finish, so it's
quite logical that there is a small reduction of quality. This is not a bug.
@section How can I read from the standard input or write to the standard output?
Use @file{-} as file name.
@section -f jpeg doesn't work.
Try '-f image2 test%d.jpg'.
@section Why can I not change the frame rate?
Some codecs, like MPEG-1/2, only allow a small number of fixed frame rates.
Choose a different codec with the -c:v command line option.
@section How do I encode Xvid or DivX video with ffmpeg?
Both Xvid and DivX (version 4+) are implementations of the ISO MPEG-4
standard (note that there are many other coding formats that use this
same standard). Thus, use '-c:v mpeg4' to encode in these formats. The
default fourcc stored in an MPEG-4-coded file will be 'FMP4'. If you want
a different fourcc, use the '-vtag' option. E.g., '-vtag xvid' will
force the fourcc 'xvid' to be stored as the video fourcc rather than the
default.
@section Which are good parameters for encoding high quality MPEG-4?
'-mbd rd -flags +mv4+aic -trellis 2 -cmp 2 -subcmp 2 -g 300 -pass 1/2',
things to try: '-bf 2', '-flags qprd', '-flags mv0', '-flags skiprd'.
@section Which are good parameters for encoding high quality MPEG-1/MPEG-2?
'-mbd rd -trellis 2 -cmp 2 -subcmp 2 -g 100 -pass 1/2'
but beware the '-g 100' might cause problems with some decoders.
Things to try: '-bf 2', '-flags qprd', '-flags mv0', '-flags skiprd.
@section Interlaced video looks very bad when encoded with ffmpeg, what is wrong?
You should use '-flags +ilme+ildct' and maybe '-flags +alt' for interlaced
material, and try '-top 0/1' if the result looks really messed-up.
@section How can I read DirectShow files?
If you have built FFmpeg with @code{./configure --enable-avisynth}
(only possible on MinGW/Cygwin platforms),
then you may use any file that DirectShow can read as input.
Just create an "input.avs" text file with this single line ...
@example
DirectShowSource("C:\path to your file\yourfile.asf")
@end example
... and then feed that text file to ffmpeg:
@example
ffmpeg -i input.avs
@end example
For ANY other help on AviSynth, please visit the
@uref{http://www.avisynth.org/, AviSynth homepage}.
@section How can I join video files?
To "join" video files is quite ambiguous. The following list explains the
different kinds of "joining" and points out how those are addressed in
FFmpeg. To join video files may mean:
@itemize
@item
To put them one after the other: this is called to @emph{concatenate} them
(in short: concat) and is addressed
@ref{How can I concatenate video files, in this very faq}.
@item
To put them together in the same file, to let the user choose between the
different versions (example: different audio languages): this is called to
@emph{multiplex} them together (in short: mux), and is done by simply
invoking ffmpeg with several @option{-i} options.
@item
For audio, to put all channels together in a single stream (example: two
mono streams into one stereo stream): this is sometimes called to
@emph{merge} them, and can be done using the
@url{http://ffmpeg.org/ffmpeg-filters.html#amerge, @code{amerge}} filter.
@item
For audio, to play one on top of the other: this is called to @emph{mix}
them, and can be done by first merging them into a single stream and then
using the @url{http://ffmpeg.org/ffmpeg-filters.html#pan, @code{pan}} filter to mix
the channels at will.
@item
For video, to display both together, side by side or one on top of a part of
the other; it can be done using the
@url{http://ffmpeg.org/ffmpeg-filters.html#overlay, @code{overlay}} video filter.
@end itemize
@anchor{How can I concatenate video files}
@section How can I concatenate video files?
There are several solutions, depending on the exact circumstances.
@subsection Concatenating using the concat @emph{filter}
FFmpeg has a @url{http://ffmpeg.org/ffmpeg-filters.html#concat,
@code{concat}} filter designed specifically for that, with examples in the
documentation. This operation is recommended if you need to re-encode.
@subsection Concatenating using the concat @emph{demuxer}
FFmpeg has a @url{http://www.ffmpeg.org/ffmpeg-formats.html#concat,
@code{concat}} demuxer which you can use when you want to avoid a re-encode and
your format doesn't support file level concatenation.
@subsection Concatenating using the concat @emph{protocol} (file level)
FFmpeg has a @url{http://ffmpeg.org/ffmpeg-protocols.html#concat,
@code{concat}} protocol designed specifically for that, with examples in the
documentation.
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow one to concatenate
video by merely concatenating the files containing them.
Hence you may concatenate your multimedia files by first transcoding them to
these privileged formats, then using the humble @code{cat} command (or the
equally humble @code{copy} under Windows), and finally transcoding back to your
format of choice.
@example
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
cat intermediate1.mpg intermediate2.mpg > intermediate_all.mpg
ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
@end example
Additionally, you can use the @code{concat} protocol instead of @code{cat} or
@code{copy} which will avoid creation of a potentially huge intermediate file.
@example
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
ffmpeg -i concat:"intermediate1.mpg|intermediate2.mpg" -c copy intermediate_all.mpg
ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
@end example
Note that you may need to escape the character "|" which is special for many
shells.
Another option is usage of named pipes, should your platform support it:
@example
mkfifo intermediate1.mpg
mkfifo intermediate2.mpg
ffmpeg -i input1.avi -qscale:v 1 -y intermediate1.mpg < /dev/null &
ffmpeg -i input2.avi -qscale:v 1 -y intermediate2.mpg < /dev/null &
cat intermediate1.mpg intermediate2.mpg |\
ffmpeg -f mpeg -i - -c:v mpeg4 -acodec libmp3lame output.avi
@end example
@subsection Concatenating using raw audio and video
Similarly, the yuv4mpegpipe format, and the raw video, raw audio codecs also
allow concatenation, and the transcoding step is almost lossless.
When using multiple yuv4mpegpipe(s), the first line needs to be discarded
from all but the first stream. This can be accomplished by piping through
@code{tail} as seen below. Note that when piping through @code{tail} you
must use command grouping, @code{@{ ;@}}, to background properly.
For example, let's say we want to concatenate two FLV files into an
output.flv file:
@example
mkfifo temp1.a
mkfifo temp1.v
mkfifo temp2.a
mkfifo temp2.v
mkfifo all.a
mkfifo all.v
ffmpeg -i input1.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp1.a < /dev/null &
ffmpeg -i input2.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp2.a < /dev/null &
ffmpeg -i input1.flv -an -f yuv4mpegpipe - > temp1.v < /dev/null &
@{ ffmpeg -i input2.flv -an -f yuv4mpegpipe - < /dev/null | tail -n +2 > temp2.v ; @} &
cat temp1.a temp2.a > all.a &
cat temp1.v temp2.v > all.v &
ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
-f yuv4mpegpipe -i all.v \
-y output.flv
rm temp[12].[av] all.[av]
@end example
@section Using @option{-f lavfi}, audio becomes mono for no apparent reason.
Use @option{-dumpgraph -} to find out exactly where the channel layout is
lost.
Most likely, it is through @code{auto-inserted aresample}. Try to understand
why the converting filter was needed at that place.
Just before the output is a likely place, as @option{-f lavfi} currently
only support packed S16.
Then insert the correct @code{aformat} explicitly in the filtergraph,
specifying the exact format.
@example
aformat=sample_fmts=s16:channel_layouts=stereo
@end example
@section Why does FFmpeg not see the subtitles in my VOB file?
VOB and a few other formats do not have a global header that describes
everything present in the file. Instead, applications are supposed to scan
the file to see what it contains. Since VOB files are frequently large, only
the beginning is scanned. If the subtitles happen only later in the file,
they will not be initially detected.
Some applications, including the @code{ffmpeg} command-line tool, can only
work with streams that were detected during the initial scan; streams that
are detected later are ignored.
The size of the initial scan is controlled by two options: @code{probesize}
(default ~5 Mo) and @code{analyzeduration} (default 5,000,000 µs = 5 s). For
the subtitle stream to be detected, both values must be large enough.
@section Why was the @command{ffmpeg} @option{-sameq} option removed? What to use instead?
The @option{-sameq} option meant "same quantizer", and made sense only in a
very limited set of cases. Unfortunately, a lot of people mistook it for
"same quality" and used it in places where it did not make sense: it had
roughly the expected visible effect, but achieved it in a very inefficient
way.
Each encoder has its own set of options to set the quality-vs-size balance,
use the options for the encoder you are using to set the quality level to a
point acceptable for your tastes. The most common options to do that are
@option{-qscale} and @option{-qmax}, but you should peruse the documentation
of the encoder you chose.
@section I have a stretched video, why does scaling does not fix it?
A lot of video codecs and formats can store the @emph{aspect ratio} of the
video: this is the ratio between the width and the height of either the full
image (DAR, display aspect ratio) or individual pixels (SAR, sample aspect
ratio). For example, EGA screens at resolution 640×350 had 4:3 DAR and 35:48
SAR.
Most still image processing work with square pixels, i.e. 1:1 SAR, but a lot
of video standards, especially from the analogic-numeric transition era, use
non-square pixels.
Most processing filters in FFmpeg handle the aspect ratio to avoid
stretching the image: cropping adjusts the DAR to keep the SAR constant,
scaling adjusts the SAR to keep the DAR constant.
If you want to stretch, or “unstretch”, the image, you need to override the
information with the
@url{http://ffmpeg.org/ffmpeg-filters.html#setdar_002c-setsar, @code{setdar or setsar filters}}.
Do not forget to examine carefully the original video to check whether the
stretching comes from the image or from the aspect ratio information.
For example, to fix a badly encoded EGA capture, use the following commands,
either the first one to upscale to square pixels or the second one to set
the correct aspect ratio or the third one to avoid transcoding (may not work
depending on the format / codec / player / phase of the moon):
@example
ffmpeg -i ega_screen.nut -vf scale=640:480,setsar=1 ega_screen_scaled.nut
ffmpeg -i ega_screen.nut -vf setdar=4/3 ega_screen_anamorphic.nut
ffmpeg -i ega_screen.nut -aspect 4/3 -c copy ega_screen_overridden.nut
@end example
@chapter Development
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
Yes. Check the @file{doc/examples} directory in the source
repository, also available online at:
@url{https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples}.
Examples are also installed by default, usually in
@code{$PREFIX/share/ffmpeg/examples}.
Also you may read the Developers Guide of the FFmpeg documentation. Alternatively,
examine the source code for one of the many open source projects that
already incorporate FFmpeg at (@url{projects.html}).
@section Can you support my C compiler XXX?
It depends. If your compiler is C99-compliant, then patches to support
it are likely to be welcome if they do not pollute the source code
with @code{#ifdef}s related to the compiler.
@section Is Microsoft Visual C++ supported?
Yes. Please see the @uref{platform.html, Microsoft Visual C++}
section in the FFmpeg documentation.
@section Can you add automake, libtool or autoconf support?
No. These tools are too bloated and they complicate the build.
@section Why not rewrite FFmpeg in object-oriented C++?
FFmpeg is already organized in a highly modular manner and does not need to
be rewritten in a formal object language. Further, many of the developers
favor straight C; it works for them. For more arguments on this matter,
read @uref{http://www.tux.org/lkml/#s15, "Programming Religion"}.
@section Why are the ffmpeg programs devoid of debugging symbols?
The build process creates @command{ffmpeg_g}, @command{ffplay_g}, etc. which
contain full debug information. Those binaries are stripped to create
@command{ffmpeg}, @command{ffplay}, etc. If you need the debug information, use
the *_g versions.
@section I do not like the LGPL, can I contribute code under the GPL instead?
Yes, as long as the code is optional and can easily and cleanly be placed
under #if CONFIG_GPL without breaking anything. So, for example, a new codec
or filter would be OK under GPL while a bug fix to LGPL code would not.
@section I'm using FFmpeg from within my C application but the linker complains about missing symbols from the libraries themselves.
FFmpeg builds static libraries by default. In static libraries, dependencies
are not handled. That has two consequences. First, you must specify the
libraries in dependency order: @code{-lavdevice} must come before
@code{-lavformat}, @code{-lavutil} must come after everything else, etc.
Second, external libraries that are used in FFmpeg have to be specified too.
An easy way to get the full list of required libraries in dependency order
is to use @code{pkg-config}.
@example
c99 -o program program.c $(pkg-config --cflags --libs libavformat libavcodec)
@end example
See @file{doc/example/Makefile} and @file{doc/example/pc-uninstalled} for
more details.
@section I'm using FFmpeg from within my C++ application but the linker complains about missing symbols which seem to be available.
FFmpeg is a pure C project, so to use the libraries within your C++ application
you need to explicitly state that you are using a C library. You can do this by
encompassing your FFmpeg includes using @code{extern "C"}.
See @url{http://www.parashift.com/c++-faq-lite/mixing-c-and-cpp.html#faq-32.3}
@section I'm using libavutil from within my C++ application but the compiler complains about 'UINT64_C' was not declared in this scope
FFmpeg is a pure C project using C99 math features, in order to enable C++
to use them you have to append -D__STDC_CONSTANT_MACROS to your CXXFLAGS
@section I have a file in memory / a API different from *open/*read/ libc how do I use it with libavformat?
You have to create a custom AVIOContext using @code{avio_alloc_context},
see @file{libavformat/aviobuf.c} in FFmpeg and @file{libmpdemux/demux_lavf.c} in MPlayer or MPlayer2 sources.
@section Where is the documentation about ffv1, msmpeg4, asv1, 4xm?
see @url{http://www.ffmpeg.org/~michael/}
@section How do I feed H.263-RTP (and other codecs in RTP) to libavcodec?
Even if peculiar since it is network oriented, RTP is a container like any
other. You have to @emph{demux} RTP before feeding the payload to libavcodec.
In this specific case please look at RFC 4629 to see how it should be done.
@section AVStream.r_frame_rate is wrong, it is much larger than the frame rate.
@code{r_frame_rate} is NOT the average frame rate, it is the smallest frame rate
that can accurately represent all timestamps. So no, it is not
wrong if it is larger than the average!
For example, if you have mixed 25 and 30 fps content, then @code{r_frame_rate}
will be 150 (it is the least common multiple).
If you are looking for the average frame rate, see @code{AVStream.avg_frame_rate}.
@section Why is @code{make fate} not running all tests?
Make sure you have the fate-suite samples and the @code{SAMPLES} Make variable
or @code{FATE_SAMPLES} environment variable or the @code{--samples}
@command{configure} option is set to the right path.
@section Why is @code{make fate} not finding the samples?
Do you happen to have a @code{~} character in the samples path to indicate a
home directory? The value is used in ways where the shell cannot expand it,
causing FATE to not find files. Just replace @code{~} by the full path.
@bye

View File

@@ -0,0 +1,206 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle FFmpeg Automated Testing Environment
@titlepage
@center @titlefont{FFmpeg Automated Testing Environment}
@end titlepage
@node Top
@top
@contents
@chapter Introduction
FATE is an extended regression suite on the client-side and a means
for results aggregation and presentation on the server-side.
The first part of this document explains how you can use FATE from
your FFmpeg source directory to test your ffmpeg binary. The second
part describes how you can run FATE to submit the results to FFmpeg's
FATE server.
In any way you can have a look at the publicly viewable FATE results
by visiting this website:
@url{http://fate.ffmpeg.org/}
This is especially recommended for all people contributing source
code to FFmpeg, as it can be seen if some test on some platform broke
with their recent contribution. This usually happens on the platforms
the developers could not test on.
The second part of this document describes how you can run FATE to
submit your results to FFmpeg's FATE server. If you want to submit your
results be sure to check that your combination of CPU, OS and compiler
is not already listed on the above mentioned website.
In the third part you can find a comprehensive listing of FATE makefile
targets and variables.
@chapter Using FATE from your FFmpeg source directory
If you want to run FATE on your machine you need to have the samples
in place. You can get the samples via the build target fate-rsync.
Use this command from the top-level source directory:
@example
make fate-rsync SAMPLES=fate-suite/
make fate SAMPLES=fate-suite/
@end example
The above commands set the samples location by passing a makefile
variable via command line. It is also possible to set the samples
location at source configuration time by invoking configure with
@option{--samples=<path to the samples directory>}. Afterwards you can
invoke the makefile targets without setting the @var{SAMPLES} makefile
variable. This is illustrated by the following commands:
@example
./configure --samples=fate-suite/
make fate-rsync
make fate
@end example
Yet another way to tell FATE about the location of the sample
directory is by making sure the environment variable FATE_SAMPLES
contains the path to your samples directory. This can be achieved
by e.g. putting that variable in your shell profile or by setting
it in your interactive session.
@example
FATE_SAMPLES=fate-suite/ make fate
@end example
@float NOTE
Do not put a '~' character in the samples path to indicate a home
directory. Because of shell nuances, this will cause FATE to fail.
@end float
To use a custom wrapper to run the test, pass @option{--target-exec} to
@command{configure} or set the @var{TARGET_EXEC} Make variable.
@chapter Submitting the results to the FFmpeg result aggregation server
To submit your results to the server you should run fate through the
shell script @file{tests/fate.sh} from the FFmpeg sources. This script needs
to be invoked with a configuration file as its first argument.
@example
tests/fate.sh /path/to/fate_config
@end example
A configuration file template with comments describing the individual
configuration variables can be found at @file{doc/fate_config.sh.template}.
@ifhtml
The mentioned configuration template is also available here:
@verbatiminclude fate_config.sh.template
@end ifhtml
Create a configuration that suits your needs, based on the configuration
template. The @env{slot} configuration variable can be any string that is not
yet used, but it is suggested that you name it adhering to the following
pattern @samp{@var{arch}-@var{os}-@var{compiler}-@var{compiler version}}. The
configuration file itself will be sourced in a shell script, therefore all
shell features may be used. This enables you to setup the environment as you
need it for your build.
For your first test runs the @env{fate_recv} variable should be empty or
commented out. This will run everything as normal except that it will omit
the submission of the results to the server. The following files should be
present in $workdir as specified in the configuration file:
@itemize
@item configure.log
@item compile.log
@item test.log
@item report
@item version
@end itemize
When you have everything working properly you can create an SSH key pair
and send the public key to the FATE server administrator who can be contacted
at the email address @email{fate-admin@@ffmpeg.org}.
Configure your SSH client to use public key authentication with that key
when connecting to the FATE server. Also do not forget to check the identity
of the server and to accept its host key. This can usually be achieved by
running your SSH client manually and killing it after you accepted the key.
The FATE server's fingerprint is:
@table @samp
@item RSA
d3:f1:83:97:a4:75:2b:a6:fb:d6:e8:aa:81:93:97:51
@item ECDSA
76:9f:68:32:04:1e:d5:d4:ec:47:3f:dc:fc:18:17:86
@end table
If you have problems connecting to the FATE server, it may help to try out
the @command{ssh} command with one or more @option{-v} options. You should
get detailed output concerning your SSH configuration and the authentication
process.
The only thing left is to automate the execution of the fate.sh script and
the synchronisation of the samples directory.
@chapter FATE makefile targets and variables
@section Makefile targets
@table @option
@item fate-rsync
Download/synchronize sample files to the configured samples directory.
@item fate-list
Will list all fate/regression test targets.
@item fate
Run the FATE test suite (requires the fate-suite dataset).
@end table
@section Makefile variables
@table @env
@item V
Verbosity level, can be set to 0, 1 or 2.
@itemize
@item 0: show just the test arguments
@item 1: show just the command used in the test
@item 2: show everything
@end itemize
@item SAMPLES
Specify or override the path to the FATE samples at make time, it has a
meaning only while running the regression tests.
@item THREADS
Specify how many threads to use while running regression tests, it is
quite useful to detect thread-related regressions.
@item THREAD_TYPE
Specify which threading strategy test, either @samp{slice} or @samp{frame},
by default @samp{slice+frame}
@item CPUFLAGS
Specify CPU flags.
@item TARGET_EXEC
Specify or override the wrapper used to run the tests.
The @env{TARGET_EXEC} option provides a way to run FATE wrapped in
@command{valgrind}, @command{qemu-user} or @command{wine} or on remote targets
through @command{ssh}.
@item GEN
Set to @samp{1} to generate the missing or mismatched references.
@end table
@section Examples
@example
make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate
@end example

View File

@@ -0,0 +1,202 @@
FFmpeg Automated Testing Environment
************************************
Table of Contents
*****************
FFmpeg Automated Testing Environment
1 Introduction
2 Using FATE from your FFmpeg source directory
3 Submitting the results to the FFmpeg result aggregation server
4 FATE makefile targets and variables
4.1 Makefile targets
4.2 Makefile variables
4.3 Examples
1 Introduction
**************
FATE is an extended regression suite on the client-side and a means for
results aggregation and presentation on the server-side.
The first part of this document explains how you can use FATE from
your FFmpeg source directory to test your ffmpeg binary. The second
part describes how you can run FATE to submit the results to FFmpeg's
FATE server.
In any way you can have a look at the publicly viewable FATE results
by visiting this website:
`http://fate.ffmpeg.org/'
This is especially recommended for all people contributing source
code to FFmpeg, as it can be seen if some test on some platform broke
with their recent contribution. This usually happens on the platforms
the developers could not test on.
The second part of this document describes how you can run FATE to
submit your results to FFmpeg's FATE server. If you want to submit your
results be sure to check that your combination of CPU, OS and compiler
is not already listed on the above mentioned website.
In the third part you can find a comprehensive listing of FATE
makefile targets and variables.
2 Using FATE from your FFmpeg source directory
**********************************************
If you want to run FATE on your machine you need to have the samples in
place. You can get the samples via the build target fate-rsync. Use
this command from the top-level source directory:
make fate-rsync SAMPLES=fate-suite/
make fate SAMPLES=fate-suite/
The above commands set the samples location by passing a makefile
variable via command line. It is also possible to set the samples
location at source configuration time by invoking configure with
`--samples=<path to the samples directory>'. Afterwards you can invoke
the makefile targets without setting the SAMPLES makefile variable.
This is illustrated by the following commands:
./configure --samples=fate-suite/
make fate-rsync
make fate
Yet another way to tell FATE about the location of the sample
directory is by making sure the environment variable FATE_SAMPLES
contains the path to your samples directory. This can be achieved by
e.g. putting that variable in your shell profile or by setting it in
your interactive session.
FATE_SAMPLES=fate-suite/ make fate
Do not put a '~' character in the samples path to indicate a home
directory. Because of shell nuances, this will cause FATE to fail.
NOTE
To use a custom wrapper to run the test, pass `--target-exec' to
`configure' or set the TARGET_EXEC Make variable.
3 Submitting the results to the FFmpeg result aggregation server
****************************************************************
To submit your results to the server you should run fate through the
shell script `tests/fate.sh' from the FFmpeg sources. This script needs
to be invoked with a configuration file as its first argument.
tests/fate.sh /path/to/fate_config
A configuration file template with comments describing the individual
configuration variables can be found at `doc/fate_config.sh.template'.
Create a configuration that suits your needs, based on the
configuration template. The `slot' configuration variable can be any
string that is not yet used, but it is suggested that you name it
adhering to the following pattern `ARCH-OS-COMPILER-COMPILER VERSION'.
The configuration file itself will be sourced in a shell script,
therefore all shell features may be used. This enables you to setup the
environment as you need it for your build.
For your first test runs the `fate_recv' variable should be empty or
commented out. This will run everything as normal except that it will
omit the submission of the results to the server. The following files
should be present in $workdir as specified in the configuration file:
* configure.log
* compile.log
* test.log
* report
* version
When you have everything working properly you can create an SSH key
pair and send the public key to the FATE server administrator who can
be contacted at the email address <fate-admin@ffmpeg.org>.
Configure your SSH client to use public key authentication with that
key when connecting to the FATE server. Also do not forget to check the
identity of the server and to accept its host key. This can usually be
achieved by running your SSH client manually and killing it after you
accepted the key. The FATE server's fingerprint is:
`RSA'
d3:f1:83:97:a4:75:2b:a6:fb:d6:e8:aa:81:93:97:51
`ECDSA'
76:9f:68:32:04:1e:d5:d4:ec:47:3f:dc:fc:18:17:86
If you have problems connecting to the FATE server, it may help to
try out the `ssh' command with one or more `-v' options. You should get
detailed output concerning your SSH configuration and the authentication
process.
The only thing left is to automate the execution of the fate.sh
script and the synchronisation of the samples directory.
4 FATE makefile targets and variables
*************************************
4.1 Makefile targets
====================
`fate-rsync'
Download/synchronize sample files to the configured samples
directory.
`fate-list'
Will list all fate/regression test targets.
`fate'
Run the FATE test suite (requires the fate-suite dataset).
4.2 Makefile variables
======================
`V'
Verbosity level, can be set to 0, 1 or 2.
* 0: show just the test arguments
* 1: show just the command used in the test
* 2: show everything
`SAMPLES'
Specify or override the path to the FATE samples at make time, it
has a meaning only while running the regression tests.
`THREADS'
Specify how many threads to use while running regression tests, it
is quite useful to detect thread-related regressions.
`THREAD_TYPE'
Specify which threading strategy test, either `slice' or `frame',
by default `slice+frame'
`CPUFLAGS'
Specify CPU flags.
`TARGET_EXEC'
Specify or override the wrapper used to run the tests. The
`TARGET_EXEC' option provides a way to run FATE wrapped in
`valgrind', `qemu-user' or `wine' or on remote targets through
`ssh'.
`GEN'
Set to `1' to generate the missing or mismatched references.
4.3 Examples
============
make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate

Local Variables:
coding: utf-8
End:

View File

@@ -0,0 +1,30 @@
slot= # some unique identifier
repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
#branch=release/2.6 # the branch to test
samples= # path to samples directory
workdir= # directory in which to do all the work
#fate_recv="ssh -T fate@fate.ffmpeg.org" # command to submit report
comment= # optional description
build_only= # set to "yes" for a compile-only instance that skips tests
# the following are optional and map to configure options
arch=
cpu=
cross_prefix=
as=
cc=
ld=
target_os=
sysroot=
target_exec=
target_path=
target_samples=
extra_cflags=
extra_ldflags=
extra_libs=
extra_conf= # extra configure options not covered above
#make= # name of GNU make if not 'make'
makeopts= # extra options passed to 'make'
#tar= # command to create a tar archive from its arguments on stdout,
# defaults to 'tar c'

View File

@@ -0,0 +1,325 @@
.\" Automatically generated by Pod::Man v1.37, Pod::Parser v1.32
.\"
.\" Standard preamble:
.\" ========================================================================
.de Sh \" Subsection heading
.br
.if t .Sp
.ne 5
.PP
\fB\\$1\fR
.PP
..
.de Sp \" Vertical space (when we can't use .PP)
.if t .sp .5v
.if n .sp
..
.de Vb \" Begin verbatim text
.ft CW
.nf
.ne \\$1
..
.de Ve \" End verbatim text
.ft R
.fi
..
.\" Set up some character translations and predefined strings. \*(-- will
.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
.\" double quote, and \*(R" will give a right double quote. | will give a
.\" real vertical bar. \*(C+ will give a nicer C++. Capital omega is used to
.\" do unbreakable dashes and therefore won't be available. \*(C` and \*(C'
.\" expand to `' in nroff, nothing in troff, for use with C<>.
.tr \(*W-|\(bv\*(Tr
.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
.ie n \{\
. ds -- \(*W-
. ds PI pi
. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
. ds L" ""
. ds R" ""
. ds C` ""
. ds C' ""
'br\}
.el\{\
. ds -- \|\(em\|
. ds PI \(*p
. ds L" ``
. ds R" ''
'br\}
.\"
.\" If the F register is turned on, we'll generate index entries on stderr for
.\" titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and index
.\" entries marked with X<> in POD. Of course, you'll have to process the
.\" output yourself in some meaningful fashion.
.if \nF \{\
. de IX
. tm Index:\\$1\t\\n%\t"\\$2"
..
. nr % 0
. rr F
.\}
.\"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.hy 0
.if n .na
.\"
.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
.\" Fear. Run. Save yourself. No user-serviceable parts.
. \" fudge factors for nroff and troff
.if n \{\
. ds #H 0
. ds #V .8m
. ds #F .3m
. ds #[ \f1
. ds #] \fP
.\}
.if t \{\
. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
. ds #V .6m
. ds #F 0
. ds #[ \&
. ds #] \&
.\}
. \" simple accents for nroff and troff
.if n \{\
. ds ' \&
. ds ` \&
. ds ^ \&
. ds , \&
. ds ~ ~
. ds /
.\}
.if t \{\
. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
.\}
. \" troff and (daisy-wheel) nroff accents
.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
.ds ae a\h'-(\w'a'u*4/10)'e
.ds Ae A\h'-(\w'A'u*4/10)'E
. \" corrections for vroff
.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
. \" for low resolution devices (crt and lpr)
.if \n(.H>23 .if \n(.V>19 \
\{\
. ds : e
. ds 8 ss
. ds o a
. ds d- d\h'-1'\(ga
. ds D- D\h'-1'\(hy
. ds th \o'bp'
. ds Th \o'LP'
. ds ae ae
. ds Ae AE
.\}
.rm #[ #] #H #V #F C
.\" ========================================================================
.\"
.IX Title "FFMPEG-BITSTREAM-FILTERS 1"
.TH FFMPEG-BITSTREAM-FILTERS 1 " " " " " "
.SH "NAME"
ffmpeg\-bitstream\-filters \- FFmpeg bitstream filters
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
This document describes the bitstream filters provided by the
libavcodec library.
.PP
A bitstream filter operates on the encoded stream data, and performs
bitstream level modifications without performing decoding.
.SH "BITSTREAM FILTERS"
.IX Header "BITSTREAM FILTERS"
When you configure your FFmpeg build, all the supported bitstream
filters are enabled by default. You can list all available ones using
the configure option \f(CW\*(C`\-\-list\-bsfs\*(C'\fR.
.PP
You can disable all the bitstream filters using the configure option
\&\f(CW\*(C`\-\-disable\-bsfs\*(C'\fR, and selectively enable any bitstream filter using
the option \f(CW\*(C`\-\-enable\-bsf=BSF\*(C'\fR, or you can disable a particular
bitstream filter using the option \f(CW\*(C`\-\-disable\-bsf=BSF\*(C'\fR.
.PP
The option \f(CW\*(C`\-bsfs\*(C'\fR of the ff* tools will display the list of
all the supported bitstream filters included in your build.
.PP
The ff* tools have a \-bsf option applied per stream, taking a
comma-separated list of filters, whose parameters follow the filter
name after a '='.
.PP
.Vb 1
\& ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
.Ve
.PP
Below is a description of the currently available bitstream filters,
with their parameters, if any.
.Sh "aac_adtstoasc"
.IX Subsection "aac_adtstoasc"
Convert \s-1MPEG\-2/4\s0 \s-1AAC\s0 \s-1ADTS\s0 to \s-1MPEG\-4\s0 Audio Specific Configuration
bitstream filter.
.PP
This filter creates an \s-1MPEG\-4\s0 AudioSpecificConfig from an \s-1MPEG\-2/4\s0
\&\s-1ADTS\s0 header and removes the \s-1ADTS\s0 header.
.PP
This is required for example when copying an \s-1AAC\s0 stream from a raw
\&\s-1ADTS\s0 \s-1AAC\s0 container to a \s-1FLV\s0 or a \s-1MOV/MP4\s0 file.
.Sh "chomp"
.IX Subsection "chomp"
Remove zero padding at the end of a packet.
.Sh "dump_extra"
.IX Subsection "dump_extra"
Add extradata to the beginning of the filtered packets.
.PP
The additional argument specifies which packets should be filtered.
It accepts the values:
.IP "\fBa\fR" 4
.IX Item "a"
add extradata to all key packets, but only if \fIlocal_header\fR is
set in the \fBflags2\fR codec context field
.IP "\fBk\fR" 4
.IX Item "k"
add extradata to all key packets
.IP "\fBe\fR" 4
.IX Item "e"
add extradata to all packets
.PP
If not specified it is assumed \fBk\fR.
.PP
For example the following \fBffmpeg\fR command forces a global
header (thus disabling individual packet headers) in the H.264 packets
generated by the \f(CW\*(C`libx264\*(C'\fR encoder, but corrects them by adding
the header stored in extradata to the key packets:
.PP
.Vb 1
\& ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
.Ve
.Sh "h264_mp4toannexb"
.IX Subsection "h264_mp4toannexb"
Convert an H.264 bitstream from length prefixed mode to start code
prefixed mode (as defined in the Annex B of the ITU-T H.264
specification).
.PP
This is required by some streaming formats, typically the \s-1MPEG\-2\s0
transport stream format (\*(L"mpegts\*(R").
.PP
For example to remux an \s-1MP4\s0 file containing an H.264 stream to mpegts
format with \fBffmpeg\fR, you can use the command:
.PP
.Vb 1
\& ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
.Ve
.Sh "imxdump"
.IX Subsection "imxdump"
Modifies the bitstream to fit in \s-1MOV\s0 and to be usable by the Final Cut
Pro decoder. This filter only applies to the mpeg2video codec, and is
likely not needed for Final Cut Pro 7 and newer with the appropriate
\&\fB\-tag:v\fR.
.PP
For example, to remux 30 MB/sec \s-1NTSC\s0 \s-1IMX\s0 to \s-1MOV:\s0
.PP
.Vb 1
\& ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
.Ve
.Sh "mjpeg2jpeg"
.IX Subsection "mjpeg2jpeg"
Convert \s-1MJPEG/AVI1\s0 packets to full \s-1JPEG/JFIF\s0 packets.
.PP
\&\s-1MJPEG\s0 is a video codec wherein each video frame is essentially a
\&\s-1JPEG\s0 image. The individual frames can be extracted without loss,
e.g. by
.PP
.Vb 1
\& ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
.Ve
.PP
Unfortunately, these chunks are incomplete \s-1JPEG\s0 images, because
they lack the \s-1DHT\s0 segment required for decoding. Quoting from
<\fBhttp://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml\fR>:
.PP
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
commented that \*(L"\s-1MJPEG\s0, or at least the \s-1MJPEG\s0 in AVIs having the
\&\s-1MJPG\s0 fourcc, is restricted \s-1JPEG\s0 with a fixed \*(-- and *omitted* \*(--
Huffman table. The \s-1JPEG\s0 must be YCbCr colorspace, it must be 4:2:2,
and it must use basic Huffman encoding, not arithmetic or
progressive. . . . You can indeed extract the \s-1MJPEG\s0 frames and
decode them with a regular \s-1JPEG\s0 decoder, but you have to prepend
the \s-1DHT\s0 segment to them, or else the decoder won't have any idea
how to decompress the data. The exact table necessary is given in
the OpenDML spec.\*(R"
.PP
This bitstream filter patches the header of frames extracted from an \s-1MJPEG\s0
stream (carrying the \s-1AVI1\s0 header \s-1ID\s0 and lacking a \s-1DHT\s0 segment) to
produce fully qualified \s-1JPEG\s0 images.
.PP
.Vb 3
\& ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
\& exiftran -i -9 frame*.jpg
\& ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
.Ve
.Sh "mjpega_dump_header"
.IX Subsection "mjpega_dump_header"
.Sh "movsub"
.IX Subsection "movsub"
.Sh "mp3_header_decompress"
.IX Subsection "mp3_header_decompress"
.Sh "mpeg4_unpack_bframes"
.IX Subsection "mpeg4_unpack_bframes"
Unpack DivX-style packed B\-frames.
.PP
DivX-style packed B\-frames are not valid \s-1MPEG\-4\s0 and were only a
workaround for the broken Video for Windows subsystem.
They use more space, can cause minor \s-1AV\s0 sync issues, require more
\&\s-1CPU\s0 power to decode (unless the player has some decoded picture queue
to compensate the 2,0,2,0 frame per packet style) and cause
trouble if copied into a standard container like mp4 or mpeg\-ps/ts,
because \s-1MPEG\-4\s0 decoders may not be able to decode them, since they are
not valid \s-1MPEG\-4\s0.
.PP
For example to fix an \s-1AVI\s0 file containing an \s-1MPEG\-4\s0 stream with
DivX-style packed B\-frames using \fBffmpeg\fR, you can use the command:
.PP
.Vb 1
\& ffmpeg -i INPUT.avi -codec copy -bsf:v mpeg4_unpack_bframes OUTPUT.avi
.Ve
.Sh "noise"
.IX Subsection "noise"
Damages the contents of packets without damaging the container. Can be
used for fuzzing or testing error resilience/concealment.
.PP
Parameters:
A numeral string, whose value is related to how often output bytes will
be modified. Therefore, values below or equal to 0 are forbidden, and
the lower the more frequent bytes will be modified, with 1 meaning
every byte is modified.
.PP
.Vb 1
\& ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
.Ve
.PP
applies the modification to every byte.
.Sh "remove_extra"
.IX Subsection "remove_extra"
.SH "SEE ALSO"
.IX Header "SEE ALSO"
\&\fIffmpeg\fR\|(1), \fIffplay\fR\|(1), \fIffprobe\fR\|(1), \fIffserver\fR\|(1), \fIlibavcodec\fR\|(3)
.SH "AUTHORS"
.IX Header "AUTHORS"
The FFmpeg developers.
.PP
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
\&\fBgit log\fR in the FFmpeg source directory, or browsing the
online repository at <\fBhttp://source.ffmpeg.org\fR>.
.PP
Maintainers for the specific components are listed in the file
\&\fI\s-1MAINTAINERS\s0\fR in the source code tree.

View File

@@ -0,0 +1,254 @@
=encoding utf8
=head1 NAME
ffmpeg-bitstream-filters - FFmpeg bitstream filters
=head1 DESCRIPTION
This document describes the bitstream filters provided by the
libavcodec library.
A bitstream filter operates on the encoded stream data, and performs
bitstream level modifications without performing decoding.
=head1 BITSTREAM FILTERS
When you configure your FFmpeg build, all the supported bitstream
filters are enabled by default. You can list all available ones using
the configure option C<--list-bsfs>.
You can disable all the bitstream filters using the configure option
C<--disable-bsfs>, and selectively enable any bitstream filter using
the option C<--enable-bsf=BSF>, or you can disable a particular
bitstream filter using the option C<--disable-bsf=BSF>.
The option C<-bsfs> of the ff* tools will display the list of
all the supported bitstream filters included in your build.
The ff* tools have a -bsf option applied per stream, taking a
comma-separated list of filters, whose parameters follow the filter
name after a '='.
ffmpeg -i INPUT -c:v copy -bsf:v filter1[=opt1=str1/opt2=str2][,filter2] OUTPUT
Below is a description of the currently available bitstream filters,
with their parameters, if any.
=head2 aac_adtstoasc
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
bitstream filter.
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
ADTS header and removes the ADTS header.
This is required for example when copying an AAC stream from a raw
ADTS AAC container to a FLV or a MOV/MP4 file.
=head2 chomp
Remove zero padding at the end of a packet.
=head2 dump_extra
Add extradata to the beginning of the filtered packets.
The additional argument specifies which packets should be filtered.
It accepts the values:
=over 4
=item B<a>
add extradata to all key packets, but only if I<local_header> is
set in the B<flags2> codec context field
=item B<k>
add extradata to all key packets
=item B<e>
add extradata to all packets
=back
If not specified it is assumed B<k>.
For example the following B<ffmpeg> command forces a global
header (thus disabling individual packet headers) in the H.264 packets
generated by the C<libx264> encoder, but corrects them by adding
the header stored in extradata to the key packets:
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
=head2 h264_mp4toannexb
Convert an H.264 bitstream from length prefixed mode to start code
prefixed mode (as defined in the Annex B of the ITU-T H.264
specification).
This is required by some streaming formats, typically the MPEG-2
transport stream format ("mpegts").
For example to remux an MP4 file containing an H.264 stream to mpegts
format with B<ffmpeg>, you can use the command:
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
=head2 imxdump
Modifies the bitstream to fit in MOV and to be usable by the Final Cut
Pro decoder. This filter only applies to the mpeg2video codec, and is
likely not needed for Final Cut Pro 7 and newer with the appropriate
B<-tag:v>.
For example, to remux 30 MB/sec NTSC IMX to MOV:
ffmpeg -i input.mxf -c copy -bsf:v imxdump -tag:v mx3n output.mov
=head2 mjpeg2jpeg
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
MJPEG is a video codec wherein each video frame is essentially a
JPEG image. The individual frames can be extracted without loss,
e.g. by
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
Unfortunately, these chunks are incomplete JPEG images, because
they lack the DHT segment required for decoding. Quoting from
E<lt>B<http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml>E<gt>:
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
commented that "MJPEG, or at least the MJPEG in AVIs having the
MJPG fourcc, is restricted JPEG with a fixed -- and *omitted* --
Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
and it must use basic Huffman encoding, not arithmetic or
progressive. . . . You can indeed extract the MJPEG frames and
decode them with a regular JPEG decoder, but you have to prepend
the DHT segment to them, or else the decoder won't have any idea
how to decompress the data. The exact table necessary is given in
the OpenDML spec."
This bitstream filter patches the header of frames extracted from an MJPEG
stream (carrying the AVI1 header ID and lacking a DHT segment) to
produce fully qualified JPEG images.
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
exiftran -i -9 frame*.jpg
ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
=head2 mjpega_dump_header
=head2 movsub
=head2 mp3_header_decompress
=head2 mpeg4_unpack_bframes
Unpack DivX-style packed B-frames.
DivX-style packed B-frames are not valid MPEG-4 and were only a
workaround for the broken Video for Windows subsystem.
They use more space, can cause minor AV sync issues, require more
CPU power to decode (unless the player has some decoded picture queue
to compensate the 2,0,2,0 frame per packet style) and cause
trouble if copied into a standard container like mp4 or mpeg-ps/ts,
because MPEG-4 decoders may not be able to decode them, since they are
not valid MPEG-4.
For example to fix an AVI file containing an MPEG-4 stream with
DivX-style packed B-frames using B<ffmpeg>, you can use the command:
ffmpeg -i INPUT.avi -codec copy -bsf:v mpeg4_unpack_bframes OUTPUT.avi
=head2 noise
Damages the contents of packets without damaging the container. Can be
used for fuzzing or testing error resilience/concealment.
Parameters:
A numeral string, whose value is related to how often output bytes will
be modified. Therefore, values below or equal to 0 are forbidden, and
the lower the more frequent bytes will be modified, with 1 meaning
every byte is modified.
ffmpeg -i INPUT -c copy -bsf noise[=1] output.mkv
applies the modification to every byte.
=head2 remove_extra
=head1 SEE ALSO
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavcodec(3)
=head1 AUTHORS
The FFmpeg developers.
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
B<git log> in the FFmpeg source directory, or browsing the
online repository at E<lt>B<http://source.ffmpeg.org>E<gt>.
Maintainers for the specific components are listed in the file
F<MAINTAINERS> in the source code tree.

View File

@@ -0,0 +1,46 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle FFmpeg Bitstream Filters Documentation
@titlepage
@center @titlefont{FFmpeg Bitstream Filters Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
This document describes the bitstream filters provided by the
libavcodec library.
A bitstream filter operates on the encoded stream data, and performs
bitstream level modifications without performing decoding.
@c man end DESCRIPTION
@include bitstream_filters.texi
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libavcodec.html,libavcodec}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavcodec(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-bitstream-filters
@settitle FFmpeg bitstream filters
@end ignore
@bye

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,43 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle FFmpeg Codecs Documentation
@titlepage
@center @titlefont{FFmpeg Codecs Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
This document describes the codecs (decoders and encoders) provided by
the libavcodec library.
@c man end DESCRIPTION
@include codecs.texi
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libavcodec.html,libavcodec}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavcodec(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-codecs
@settitle FFmpeg codecs
@end ignore
@bye

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,43 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle FFmpeg Devices Documentation
@titlepage
@center @titlefont{FFmpeg Devices Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
This document describes the input and output devices provided by the
libavdevice library.
@c man end DESCRIPTION
@include devices.texi
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libavdevice.html,libavdevice}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavdevice(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-devices
@settitle FFmpeg devices
@end ignore
@bye

View File

@@ -0,0 +1,43 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle FFmpeg Filters Documentation
@titlepage
@center @titlefont{FFmpeg Filters Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
This document describes filters, sources, and sinks provided by the
libavfilter library.
@c man end DESCRIPTION
@include filters.texi
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libavfilter.html,libavfilter}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavfilter(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-filters
@settitle FFmpeg filters
@end ignore
@bye

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,43 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle FFmpeg Formats Documentation
@titlepage
@center @titlefont{FFmpeg Formats Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
This document describes the supported formats (muxers and demuxers)
provided by the libavformat library.
@c man end DESCRIPTION
@include formats.texi
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libavformat.html,libavformat}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavformat(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-formats
@settitle FFmpeg formats
@end ignore
@bye

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,43 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle FFmpeg Protocols Documentation
@titlepage
@center @titlefont{FFmpeg Protocols Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
This document describes the input and output protocols provided by the
libavformat library.
@c man end DESCRIPTION
@include protocols.texi
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libavformat.html,libavformat}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavformat(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-protocols
@settitle FFmpeg protocols
@end ignore
@bye

View File

@@ -0,0 +1,410 @@
.\" Automatically generated by Pod::Man v1.37, Pod::Parser v1.32
.\"
.\" Standard preamble:
.\" ========================================================================
.de Sh \" Subsection heading
.br
.if t .Sp
.ne 5
.PP
\fB\\$1\fR
.PP
..
.de Sp \" Vertical space (when we can't use .PP)
.if t .sp .5v
.if n .sp
..
.de Vb \" Begin verbatim text
.ft CW
.nf
.ne \\$1
..
.de Ve \" End verbatim text
.ft R
.fi
..
.\" Set up some character translations and predefined strings. \*(-- will
.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
.\" double quote, and \*(R" will give a right double quote. | will give a
.\" real vertical bar. \*(C+ will give a nicer C++. Capital omega is used to
.\" do unbreakable dashes and therefore won't be available. \*(C` and \*(C'
.\" expand to `' in nroff, nothing in troff, for use with C<>.
.tr \(*W-|\(bv\*(Tr
.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
.ie n \{\
. ds -- \(*W-
. ds PI pi
. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
. ds L" ""
. ds R" ""
. ds C` ""
. ds C' ""
'br\}
.el\{\
. ds -- \|\(em\|
. ds PI \(*p
. ds L" ``
. ds R" ''
'br\}
.\"
.\" If the F register is turned on, we'll generate index entries on stderr for
.\" titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and index
.\" entries marked with X<> in POD. Of course, you'll have to process the
.\" output yourself in some meaningful fashion.
.if \nF \{\
. de IX
. tm Index:\\$1\t\\n%\t"\\$2"
..
. nr % 0
. rr F
.\}
.\"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.hy 0
.if n .na
.\"
.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
.\" Fear. Run. Save yourself. No user-serviceable parts.
. \" fudge factors for nroff and troff
.if n \{\
. ds #H 0
. ds #V .8m
. ds #F .3m
. ds #[ \f1
. ds #] \fP
.\}
.if t \{\
. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
. ds #V .6m
. ds #F 0
. ds #[ \&
. ds #] \&
.\}
. \" simple accents for nroff and troff
.if n \{\
. ds ' \&
. ds ` \&
. ds ^ \&
. ds , \&
. ds ~ ~
. ds /
.\}
.if t \{\
. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
.\}
. \" troff and (daisy-wheel) nroff accents
.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
.ds ae a\h'-(\w'a'u*4/10)'e
.ds Ae A\h'-(\w'A'u*4/10)'E
. \" corrections for vroff
.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
. \" for low resolution devices (crt and lpr)
.if \n(.H>23 .if \n(.V>19 \
\{\
. ds : e
. ds 8 ss
. ds o a
. ds d- d\h'-1'\(ga
. ds D- D\h'-1'\(hy
. ds th \o'bp'
. ds Th \o'LP'
. ds ae ae
. ds Ae AE
.\}
.rm #[ #] #H #V #F C
.\" ========================================================================
.\"
.IX Title "FFMPEG-RESAMPLER 1"
.TH FFMPEG-RESAMPLER 1 " " " " " "
.SH "NAME"
ffmpeg\-resampler \- FFmpeg Resampler
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
The FFmpeg resampler provides a high-level interface to the
libswresample library audio resampling utilities. In particular it
allows one to perform audio resampling, audio channel layout rematrixing,
and convert audio format and packing layout.
.SH "RESAMPLER OPTIONS"
.IX Header "RESAMPLER OPTIONS"
The audio resampler supports the following named options.
.PP
Options may be set by specifying \-\fIoption\fR \fIvalue\fR in the
FFmpeg tools, \fIoption\fR=\fIvalue\fR for the aresample filter,
by setting the value explicitly in the
\&\f(CW\*(C`SwrContext\*(C'\fR options or using the \fIlibavutil/opt.h\fR \s-1API\s0 for
programmatic use.
.IP "\fBich, in_channel_count\fR" 4
.IX Item "ich, in_channel_count"
Set the number of input channels. Default value is 0. Setting this
value is not mandatory if the corresponding channel layout
\&\fBin_channel_layout\fR is set.
.IP "\fBoch, out_channel_count\fR" 4
.IX Item "och, out_channel_count"
Set the number of output channels. Default value is 0. Setting this
value is not mandatory if the corresponding channel layout
\&\fBout_channel_layout\fR is set.
.IP "\fBuch, used_channel_count\fR" 4
.IX Item "uch, used_channel_count"
Set the number of used input channels. Default value is 0. This option is
only used for special remapping.
.IP "\fBisr, in_sample_rate\fR" 4
.IX Item "isr, in_sample_rate"
Set the input sample rate. Default value is 0.
.IP "\fBosr, out_sample_rate\fR" 4
.IX Item "osr, out_sample_rate"
Set the output sample rate. Default value is 0.
.IP "\fBisf, in_sample_fmt\fR" 4
.IX Item "isf, in_sample_fmt"
Specify the input sample format. It is set by default to \f(CW\*(C`none\*(C'\fR.
.IP "\fBosf, out_sample_fmt\fR" 4
.IX Item "osf, out_sample_fmt"
Specify the output sample format. It is set by default to \f(CW\*(C`none\*(C'\fR.
.IP "\fBtsf, internal_sample_fmt\fR" 4
.IX Item "tsf, internal_sample_fmt"
Set the internal sample format. Default value is \f(CW\*(C`none\*(C'\fR.
This will automatically be chosen when it is not explicitly set.
.IP "\fBicl, in_channel_layout\fR" 4
.IX Item "icl, in_channel_layout"
.PD 0
.IP "\fBocl, out_channel_layout\fR" 4
.IX Item "ocl, out_channel_layout"
.PD
Set the input/output channel layout.
.Sp
See \fBthe Channel Layout section in the \f(BIffmpeg\-utils\fB\|(1) manual\fR
for the required syntax.
.IP "\fBclev, center_mix_level\fR" 4
.IX Item "clev, center_mix_level"
Set the center mix level. It is a value expressed in deciBel, and must be
in the interval [\-32,32].
.IP "\fBslev, surround_mix_level\fR" 4
.IX Item "slev, surround_mix_level"
Set the surround mix level. It is a value expressed in deciBel, and must
be in the interval [\-32,32].
.IP "\fBlfe_mix_level\fR" 4
.IX Item "lfe_mix_level"
Set \s-1LFE\s0 mix into non \s-1LFE\s0 level. It is used when there is a \s-1LFE\s0 input but no
\&\s-1LFE\s0 output. It is a value expressed in deciBel, and must
be in the interval [\-32,32].
.IP "\fBrmvol, rematrix_volume\fR" 4
.IX Item "rmvol, rematrix_volume"
Set rematrix volume. Default value is 1.0.
.IP "\fBrematrix_maxval\fR" 4
.IX Item "rematrix_maxval"
Set maximum output value for rematrixing.
This can be used to prevent clipping vs. preventing volumn reduction
A value of 1.0 prevents cliping.
.IP "\fBflags, swr_flags\fR" 4
.IX Item "flags, swr_flags"
Set flags used by the converter. Default value is 0.
.Sp
It supports the following individual flags:
.RS 4
.IP "\fBres\fR" 4
.IX Item "res"
force resampling, this flag forces resampling to be used even when the
input and output sample rates match.
.RE
.RS 4
.RE
.IP "\fBdither_scale\fR" 4
.IX Item "dither_scale"
Set the dither scale. Default value is 1.
.IP "\fBdither_method\fR" 4
.IX Item "dither_method"
Set dither method. Default value is 0.
.Sp
Supported values:
.RS 4
.IP "\fBrectangular\fR" 4
.IX Item "rectangular"
select rectangular dither
.IP "\fBtriangular\fR" 4
.IX Item "triangular"
select triangular dither
.IP "\fBtriangular_hp\fR" 4
.IX Item "triangular_hp"
select triangular dither with high pass
.IP "\fBlipshitz\fR" 4
.IX Item "lipshitz"
select lipshitz noise shaping dither
.IP "\fBshibata\fR" 4
.IX Item "shibata"
select shibata noise shaping dither
.IP "\fBlow_shibata\fR" 4
.IX Item "low_shibata"
select low shibata noise shaping dither
.IP "\fBhigh_shibata\fR" 4
.IX Item "high_shibata"
select high shibata noise shaping dither
.IP "\fBf_weighted\fR" 4
.IX Item "f_weighted"
select f\-weighted noise shaping dither
.IP "\fBmodified_e_weighted\fR" 4
.IX Item "modified_e_weighted"
select modified-e-weighted noise shaping dither
.IP "\fBimproved_e_weighted\fR" 4
.IX Item "improved_e_weighted"
select improved-e-weighted noise shaping dither
.RE
.RS 4
.RE
.IP "\fBresampler\fR" 4
.IX Item "resampler"
Set resampling engine. Default value is swr.
.Sp
Supported values:
.RS 4
.IP "\fBswr\fR" 4
.IX Item "swr"
select the native \s-1SW\s0 Resampler; filter options precision and cheby are not
applicable in this case.
.IP "\fBsoxr\fR" 4
.IX Item "soxr"
select the SoX Resampler (where available); compensation, and filter options
filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
case.
.RE
.RS 4
.RE
.IP "\fBfilter_size\fR" 4
.IX Item "filter_size"
For swr only, set resampling filter size, default value is 32.
.IP "\fBphase_shift\fR" 4
.IX Item "phase_shift"
For swr only, set resampling phase shift, default value is 10, and must be in
the interval [0,30].
.IP "\fBlinear_interp\fR" 4
.IX Item "linear_interp"
Use Linear Interpolation if set to 1, default value is 0.
.IP "\fBcutoff\fR" 4
.IX Item "cutoff"
Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
.IP "\fBprecision\fR" 4
.IX Item "precision"
For soxr only, the precision in bits to which the resampled signal will be
calculated. The default value of 20 (which, with suitable dithering, is
appropriate for a destination bit-depth of 16) gives SoX's 'High Quality'; a
value of 28 gives SoX's 'Very High Quality'.
.IP "\fBcheby\fR" 4
.IX Item "cheby"
For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
approximation for 'irrational' ratios. Default value is 0.
.IP "\fBasync\fR" 4
.IX Item "async"
For swr only, simple 1 parameter audio sync to timestamps using stretching,
squeezing, filling and trimming. Setting this to 1 will enable filling and
trimming, larger values represent the maximum amount in samples that the data
may be stretched or squeezed for each second.
Default value is 0, thus no compensation is applied to make the samples match
the audio timestamps.
.IP "\fBfirst_pts\fR" 4
.IX Item "first_pts"
For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
This allows for padding/trimming at the start of stream. By default, no
assumption is made about the first frame's expected pts, so no padding or
trimming is done. For example, this could be set to 0 to pad the beginning with
silence if an audio stream starts after the video stream or to trim any samples
with a negative pts due to encoder delay.
.IP "\fBmin_comp\fR" 4
.IX Item "min_comp"
For swr only, set the minimum difference between timestamps and audio data (in
seconds) to trigger stretching/squeezing/filling or trimming of the
data to make it match the timestamps. The default is that
stretching/squeezing/filling and trimming is disabled
(\fBmin_comp\fR = \f(CW\*(C`FLT_MAX\*(C'\fR).
.IP "\fBmin_hard_comp\fR" 4
.IX Item "min_hard_comp"
For swr only, set the minimum difference between timestamps and audio data (in
seconds) to trigger adding/dropping samples to make it match the
timestamps. This option effectively is a threshold to select between
hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
all compensation is by default disabled through \fBmin_comp\fR.
The default is 0.1.
.IP "\fBcomp_duration\fR" 4
.IX Item "comp_duration"
For swr only, set duration (in seconds) over which data is stretched/squeezed
to make it match the timestamps. Must be a non-negative double float value,
default value is 1.0.
.IP "\fBmax_soft_comp\fR" 4
.IX Item "max_soft_comp"
For swr only, set maximum factor by which data is stretched/squeezed to make it
match the timestamps. Must be a non-negative double float value, default value
is 0.
.IP "\fBmatrix_encoding\fR" 4
.IX Item "matrix_encoding"
Select matrixed stereo encoding.
.Sp
It accepts the following values:
.RS 4
.IP "\fBnone\fR" 4
.IX Item "none"
select none
.IP "\fBdolby\fR" 4
.IX Item "dolby"
select Dolby
.IP "\fBdplii\fR" 4
.IX Item "dplii"
select Dolby Pro Logic \s-1II\s0
.RE
.RS 4
.Sp
Default value is \f(CW\*(C`none\*(C'\fR.
.RE
.IP "\fBfilter_type\fR" 4
.IX Item "filter_type"
For swr only, select resampling filter type. This only affects resampling
operations.
.Sp
It accepts the following values:
.RS 4
.IP "\fBcubic\fR" 4
.IX Item "cubic"
select cubic
.IP "\fBblackman_nuttall\fR" 4
.IX Item "blackman_nuttall"
select Blackman Nuttall Windowed Sinc
.IP "\fBkaiser\fR" 4
.IX Item "kaiser"
select Kaiser Windowed Sinc
.RE
.RS 4
.RE
.IP "\fBkaiser_beta\fR" 4
.IX Item "kaiser_beta"
For swr only, set Kaiser Window Beta value. Must be an integer in the
interval [2,16], default value is 9.
.IP "\fBoutput_sample_bits\fR" 4
.IX Item "output_sample_bits"
For swr only, set number of used output sample bits for dithering. Must be an integer in the
interval [0,64], default value is 0, which means it's not used.
.SH "SEE ALSO"
.IX Header "SEE ALSO"
\&\fIffmpeg\fR\|(1), \fIffplay\fR\|(1), \fIffprobe\fR\|(1), \fIffserver\fR\|(1), \fIlibswresample\fR\|(3)
.SH "AUTHORS"
.IX Header "AUTHORS"
The FFmpeg developers.
.PP
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
\&\fBgit log\fR in the FFmpeg source directory, or browsing the
online repository at <\fBhttp://source.ffmpeg.org\fR>.
.PP
Maintainers for the specific components are listed in the file
\&\fI\s-1MAINTAINERS\s0\fR in the source code tree.

View File

@@ -0,0 +1,403 @@
=encoding utf8
=head1 NAME
ffmpeg-resampler - FFmpeg Resampler
=head1 DESCRIPTION
The FFmpeg resampler provides a high-level interface to the
libswresample library audio resampling utilities. In particular it
allows one to perform audio resampling, audio channel layout rematrixing,
and convert audio format and packing layout.
=head1 RESAMPLER OPTIONS
The audio resampler supports the following named options.
Options may be set by specifying -I<option> I<value> in the
FFmpeg tools, I<option>=I<value> for the aresample filter,
by setting the value explicitly in the
C<SwrContext> options or using the F<libavutil/opt.h> API for
programmatic use.
=over 4
=item B<ich, in_channel_count>
Set the number of input channels. Default value is 0. Setting this
value is not mandatory if the corresponding channel layout
B<in_channel_layout> is set.
=item B<och, out_channel_count>
Set the number of output channels. Default value is 0. Setting this
value is not mandatory if the corresponding channel layout
B<out_channel_layout> is set.
=item B<uch, used_channel_count>
Set the number of used input channels. Default value is 0. This option is
only used for special remapping.
=item B<isr, in_sample_rate>
Set the input sample rate. Default value is 0.
=item B<osr, out_sample_rate>
Set the output sample rate. Default value is 0.
=item B<isf, in_sample_fmt>
Specify the input sample format. It is set by default to C<none>.
=item B<osf, out_sample_fmt>
Specify the output sample format. It is set by default to C<none>.
=item B<tsf, internal_sample_fmt>
Set the internal sample format. Default value is C<none>.
This will automatically be chosen when it is not explicitly set.
=item B<icl, in_channel_layout>
=item B<ocl, out_channel_layout>
Set the input/output channel layout.
See B<the Channel Layout section in the ffmpeg-utils(1) manual>
for the required syntax.
=item B<clev, center_mix_level>
Set the center mix level. It is a value expressed in deciBel, and must be
in the interval [-32,32].
=item B<slev, surround_mix_level>
Set the surround mix level. It is a value expressed in deciBel, and must
be in the interval [-32,32].
=item B<lfe_mix_level>
Set LFE mix into non LFE level. It is used when there is a LFE input but no
LFE output. It is a value expressed in deciBel, and must
be in the interval [-32,32].
=item B<rmvol, rematrix_volume>
Set rematrix volume. Default value is 1.0.
=item B<rematrix_maxval>
Set maximum output value for rematrixing.
This can be used to prevent clipping vs. preventing volumn reduction
A value of 1.0 prevents cliping.
=item B<flags, swr_flags>
Set flags used by the converter. Default value is 0.
It supports the following individual flags:
=over 4
=item B<res>
force resampling, this flag forces resampling to be used even when the
input and output sample rates match.
=back
=item B<dither_scale>
Set the dither scale. Default value is 1.
=item B<dither_method>
Set dither method. Default value is 0.
Supported values:
=over 4
=item B<rectangular>
select rectangular dither
=item B<triangular>
select triangular dither
=item B<triangular_hp>
select triangular dither with high pass
=item B<lipshitz>
select lipshitz noise shaping dither
=item B<shibata>
select shibata noise shaping dither
=item B<low_shibata>
select low shibata noise shaping dither
=item B<high_shibata>
select high shibata noise shaping dither
=item B<f_weighted>
select f-weighted noise shaping dither
=item B<modified_e_weighted>
select modified-e-weighted noise shaping dither
=item B<improved_e_weighted>
select improved-e-weighted noise shaping dither
=back
=item B<resampler>
Set resampling engine. Default value is swr.
Supported values:
=over 4
=item B<swr>
select the native SW Resampler; filter options precision and cheby are not
applicable in this case.
=item B<soxr>
select the SoX Resampler (where available); compensation, and filter options
filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
case.
=back
=item B<filter_size>
For swr only, set resampling filter size, default value is 32.
=item B<phase_shift>
For swr only, set resampling phase shift, default value is 10, and must be in
the interval [0,30].
=item B<linear_interp>
Use Linear Interpolation if set to 1, default value is 0.
=item B<cutoff>
Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
=item B<precision>
For soxr only, the precision in bits to which the resampled signal will be
calculated. The default value of 20 (which, with suitable dithering, is
appropriate for a destination bit-depth of 16) gives SoX's 'High Quality'; a
value of 28 gives SoX's 'Very High Quality'.
=item B<cheby>
For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
approximation for 'irrational' ratios. Default value is 0.
=item B<async>
For swr only, simple 1 parameter audio sync to timestamps using stretching,
squeezing, filling and trimming. Setting this to 1 will enable filling and
trimming, larger values represent the maximum amount in samples that the data
may be stretched or squeezed for each second.
Default value is 0, thus no compensation is applied to make the samples match
the audio timestamps.
=item B<first_pts>
For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
This allows for padding/trimming at the start of stream. By default, no
assumption is made about the first frame's expected pts, so no padding or
trimming is done. For example, this could be set to 0 to pad the beginning with
silence if an audio stream starts after the video stream or to trim any samples
with a negative pts due to encoder delay.
=item B<min_comp>
For swr only, set the minimum difference between timestamps and audio data (in
seconds) to trigger stretching/squeezing/filling or trimming of the
data to make it match the timestamps. The default is that
stretching/squeezing/filling and trimming is disabled
(B<min_comp> = C<FLT_MAX>).
=item B<min_hard_comp>
For swr only, set the minimum difference between timestamps and audio data (in
seconds) to trigger adding/dropping samples to make it match the
timestamps. This option effectively is a threshold to select between
hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
all compensation is by default disabled through B<min_comp>.
The default is 0.1.
=item B<comp_duration>
For swr only, set duration (in seconds) over which data is stretched/squeezed
to make it match the timestamps. Must be a non-negative double float value,
default value is 1.0.
=item B<max_soft_comp>
For swr only, set maximum factor by which data is stretched/squeezed to make it
match the timestamps. Must be a non-negative double float value, default value
is 0.
=item B<matrix_encoding>
Select matrixed stereo encoding.
It accepts the following values:
=over 4
=item B<none>
select none
=item B<dolby>
select Dolby
=item B<dplii>
select Dolby Pro Logic II
=back
Default value is C<none>.
=item B<filter_type>
For swr only, select resampling filter type. This only affects resampling
operations.
It accepts the following values:
=over 4
=item B<cubic>
select cubic
=item B<blackman_nuttall>
select Blackman Nuttall Windowed Sinc
=item B<kaiser>
select Kaiser Windowed Sinc
=back
=item B<kaiser_beta>
For swr only, set Kaiser Window Beta value. Must be an integer in the
interval [2,16], default value is 9.
=item B<output_sample_bits>
For swr only, set number of used output sample bits for dithering. Must be an integer in the
interval [0,64], default value is 0, which means it's not used.
=back
=head1 SEE ALSO
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswresample(3)
=head1 AUTHORS
The FFmpeg developers.
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
B<git log> in the FFmpeg source directory, or browsing the
online repository at E<lt>B<http://source.ffmpeg.org>E<gt>.
Maintainers for the specific components are listed in the file
F<MAINTAINERS> in the source code tree.

View File

@@ -0,0 +1,45 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle FFmpeg Resampler Documentation
@titlepage
@center @titlefont{FFmpeg Resampler Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
The FFmpeg resampler provides a high-level interface to the
libswresample library audio resampling utilities. In particular it
allows one to perform audio resampling, audio channel layout rematrixing,
and convert audio format and packing layout.
@c man end DESCRIPTION
@include resampler.texi
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libswresample.html,libswresample}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswresample(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-resampler
@settitle FFmpeg Resampler
@end ignore
@bye

View File

@@ -0,0 +1,292 @@
.\" Automatically generated by Pod::Man v1.37, Pod::Parser v1.32
.\"
.\" Standard preamble:
.\" ========================================================================
.de Sh \" Subsection heading
.br
.if t .Sp
.ne 5
.PP
\fB\\$1\fR
.PP
..
.de Sp \" Vertical space (when we can't use .PP)
.if t .sp .5v
.if n .sp
..
.de Vb \" Begin verbatim text
.ft CW
.nf
.ne \\$1
..
.de Ve \" End verbatim text
.ft R
.fi
..
.\" Set up some character translations and predefined strings. \*(-- will
.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
.\" double quote, and \*(R" will give a right double quote. | will give a
.\" real vertical bar. \*(C+ will give a nicer C++. Capital omega is used to
.\" do unbreakable dashes and therefore won't be available. \*(C` and \*(C'
.\" expand to `' in nroff, nothing in troff, for use with C<>.
.tr \(*W-|\(bv\*(Tr
.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
.ie n \{\
. ds -- \(*W-
. ds PI pi
. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
. ds L" ""
. ds R" ""
. ds C` ""
. ds C' ""
'br\}
.el\{\
. ds -- \|\(em\|
. ds PI \(*p
. ds L" ``
. ds R" ''
'br\}
.\"
.\" If the F register is turned on, we'll generate index entries on stderr for
.\" titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and index
.\" entries marked with X<> in POD. Of course, you'll have to process the
.\" output yourself in some meaningful fashion.
.if \nF \{\
. de IX
. tm Index:\\$1\t\\n%\t"\\$2"
..
. nr % 0
. rr F
.\}
.\"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.hy 0
.if n .na
.\"
.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
.\" Fear. Run. Save yourself. No user-serviceable parts.
. \" fudge factors for nroff and troff
.if n \{\
. ds #H 0
. ds #V .8m
. ds #F .3m
. ds #[ \f1
. ds #] \fP
.\}
.if t \{\
. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
. ds #V .6m
. ds #F 0
. ds #[ \&
. ds #] \&
.\}
. \" simple accents for nroff and troff
.if n \{\
. ds ' \&
. ds ` \&
. ds ^ \&
. ds , \&
. ds ~ ~
. ds /
.\}
.if t \{\
. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
.\}
. \" troff and (daisy-wheel) nroff accents
.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
.ds ae a\h'-(\w'a'u*4/10)'e
.ds Ae A\h'-(\w'A'u*4/10)'E
. \" corrections for vroff
.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
. \" for low resolution devices (crt and lpr)
.if \n(.H>23 .if \n(.V>19 \
\{\
. ds : e
. ds 8 ss
. ds o a
. ds d- d\h'-1'\(ga
. ds D- D\h'-1'\(hy
. ds th \o'bp'
. ds Th \o'LP'
. ds ae ae
. ds Ae AE
.\}
.rm #[ #] #H #V #F C
.\" ========================================================================
.\"
.IX Title "FFMPEG-SCALER 1"
.TH FFMPEG-SCALER 1 " " " " " "
.SH "NAME"
ffmpeg\-scaler \- FFmpeg video scaling and pixel format converter
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
The FFmpeg rescaler provides a high-level interface to the libswscale
library image conversion utilities. In particular it allows one to perform
image rescaling and pixel format conversion.
.SH "SCALER OPTIONS"
.IX Header "SCALER OPTIONS"
The video scaler supports the following named options.
.PP
Options may be set by specifying \-\fIoption\fR \fIvalue\fR in the
FFmpeg tools. For programmatic use, they can be set explicitly in the
\&\f(CW\*(C`SwsContext\*(C'\fR options or through the \fIlibavutil/opt.h\fR \s-1API\s0.
.IP "\fBsws_flags\fR" 4
.IX Item "sws_flags"
Set the scaler flags. This is also used to set the scaling
algorithm. Only a single algorithm should be selected.
.Sp
It accepts the following values:
.RS 4
.IP "\fBfast_bilinear\fR" 4
.IX Item "fast_bilinear"
Select fast bilinear scaling algorithm.
.IP "\fBbilinear\fR" 4
.IX Item "bilinear"
Select bilinear scaling algorithm.
.IP "\fBbicubic\fR" 4
.IX Item "bicubic"
Select bicubic scaling algorithm.
.IP "\fBexperimental\fR" 4
.IX Item "experimental"
Select experimental scaling algorithm.
.IP "\fBneighbor\fR" 4
.IX Item "neighbor"
Select nearest neighbor rescaling algorithm.
.IP "\fBarea\fR" 4
.IX Item "area"
Select averaging area rescaling algorithm.
.IP "\fBbicublin\fR" 4
.IX Item "bicublin"
Select bicubic scaling algorithm for the luma component, bilinear for
chroma components.
.IP "\fBgauss\fR" 4
.IX Item "gauss"
Select Gaussian rescaling algorithm.
.IP "\fBsinc\fR" 4
.IX Item "sinc"
Select sinc rescaling algorithm.
.IP "\fBlanczos\fR" 4
.IX Item "lanczos"
Select lanczos rescaling algorithm.
.IP "\fBspline\fR" 4
.IX Item "spline"
Select natural bicubic spline rescaling algorithm.
.IP "\fBprint_info\fR" 4
.IX Item "print_info"
Enable printing/debug logging.
.IP "\fBaccurate_rnd\fR" 4
.IX Item "accurate_rnd"
Enable accurate rounding.
.IP "\fBfull_chroma_int\fR" 4
.IX Item "full_chroma_int"
Enable full chroma interpolation.
.IP "\fBfull_chroma_inp\fR" 4
.IX Item "full_chroma_inp"
Select full chroma input.
.IP "\fBbitexact\fR" 4
.IX Item "bitexact"
Enable bitexact output.
.RE
.RS 4
.RE
.IP "\fBsrcw\fR" 4
.IX Item "srcw"
Set source width.
.IP "\fBsrch\fR" 4
.IX Item "srch"
Set source height.
.IP "\fBdstw\fR" 4
.IX Item "dstw"
Set destination width.
.IP "\fBdsth\fR" 4
.IX Item "dsth"
Set destination height.
.IP "\fBsrc_format\fR" 4
.IX Item "src_format"
Set source pixel format (must be expressed as an integer).
.IP "\fBdst_format\fR" 4
.IX Item "dst_format"
Set destination pixel format (must be expressed as an integer).
.IP "\fBsrc_range\fR" 4
.IX Item "src_range"
Select source range.
.IP "\fBdst_range\fR" 4
.IX Item "dst_range"
Select destination range.
.IP "\fBparam0, param1\fR" 4
.IX Item "param0, param1"
Set scaling algorithm parameters. The specified values are specific of
some scaling algorithms and ignored by others. The specified values
are floating point number values.
.IP "\fBsws_dither\fR" 4
.IX Item "sws_dither"
Set the dithering algorithm. Accepts one of the following
values. Default value is \fBauto\fR.
.RS 4
.IP "\fBauto\fR" 4
.IX Item "auto"
automatic choice
.IP "\fBnone\fR" 4
.IX Item "none"
no dithering
.IP "\fBbayer\fR" 4
.IX Item "bayer"
bayer dither
.IP "\fBed\fR" 4
.IX Item "ed"
error diffusion dither
.IP "\fBa_dither\fR" 4
.IX Item "a_dither"
arithmetic dither, based using addition
.IP "\fBx_dither\fR" 4
.IX Item "x_dither"
arithmetic dither, based using xor (more random/less apparent patterning that
a_dither).
.RE
.RS 4
.RE
.IP "\fBalphablend\fR" 4
.IX Item "alphablend"
Set the alpha blending to use when the input has alpha but the output does not.
Default value is \fBnone\fR.
.RS 4
.IP "\fBuniform_color\fR" 4
.IX Item "uniform_color"
Blend onto a uniform background color
.IP "\fBcheckerboard\fR" 4
.IX Item "checkerboard"
Blend onto a checkerboard
.IP "\fBnone\fR" 4
.IX Item "none"
No blending
.RE
.RS 4
.RE
.SH "SEE ALSO"
.IX Header "SEE ALSO"
\&\fIffmpeg\fR\|(1), \fIffplay\fR\|(1), \fIffprobe\fR\|(1), \fIffserver\fR\|(1), \fIlibswscale\fR\|(3)
.SH "AUTHORS"
.IX Header "AUTHORS"
The FFmpeg developers.
.PP
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
\&\fBgit log\fR in the FFmpeg source directory, or browsing the
online repository at <\fBhttp://source.ffmpeg.org\fR>.
.PP
Maintainers for the specific components are listed in the file
\&\fI\s-1MAINTAINERS\s0\fR in the source code tree.

View File

@@ -0,0 +1,271 @@
=encoding utf8
=head1 NAME
ffmpeg-scaler - FFmpeg video scaling and pixel format converter
=head1 DESCRIPTION
The FFmpeg rescaler provides a high-level interface to the libswscale
library image conversion utilities. In particular it allows one to perform
image rescaling and pixel format conversion.
=head1 SCALER OPTIONS
The video scaler supports the following named options.
Options may be set by specifying -I<option> I<value> in the
FFmpeg tools. For programmatic use, they can be set explicitly in the
C<SwsContext> options or through the F<libavutil/opt.h> API.
=over 4
=item B<sws_flags>
Set the scaler flags. This is also used to set the scaling
algorithm. Only a single algorithm should be selected.
It accepts the following values:
=over 4
=item B<fast_bilinear>
Select fast bilinear scaling algorithm.
=item B<bilinear>
Select bilinear scaling algorithm.
=item B<bicubic>
Select bicubic scaling algorithm.
=item B<experimental>
Select experimental scaling algorithm.
=item B<neighbor>
Select nearest neighbor rescaling algorithm.
=item B<area>
Select averaging area rescaling algorithm.
=item B<bicublin>
Select bicubic scaling algorithm for the luma component, bilinear for
chroma components.
=item B<gauss>
Select Gaussian rescaling algorithm.
=item B<sinc>
Select sinc rescaling algorithm.
=item B<lanczos>
Select lanczos rescaling algorithm.
=item B<spline>
Select natural bicubic spline rescaling algorithm.
=item B<print_info>
Enable printing/debug logging.
=item B<accurate_rnd>
Enable accurate rounding.
=item B<full_chroma_int>
Enable full chroma interpolation.
=item B<full_chroma_inp>
Select full chroma input.
=item B<bitexact>
Enable bitexact output.
=back
=item B<srcw>
Set source width.
=item B<srch>
Set source height.
=item B<dstw>
Set destination width.
=item B<dsth>
Set destination height.
=item B<src_format>
Set source pixel format (must be expressed as an integer).
=item B<dst_format>
Set destination pixel format (must be expressed as an integer).
=item B<src_range>
Select source range.
=item B<dst_range>
Select destination range.
=item B<param0, param1>
Set scaling algorithm parameters. The specified values are specific of
some scaling algorithms and ignored by others. The specified values
are floating point number values.
=item B<sws_dither>
Set the dithering algorithm. Accepts one of the following
values. Default value is B<auto>.
=over 4
=item B<auto>
automatic choice
=item B<none>
no dithering
=item B<bayer>
bayer dither
=item B<ed>
error diffusion dither
=item B<a_dither>
arithmetic dither, based using addition
=item B<x_dither>
arithmetic dither, based using xor (more random/less apparent patterning that
a_dither).
=back
=item B<alphablend>
Set the alpha blending to use when the input has alpha but the output does not.
Default value is B<none>.
=over 4
=item B<uniform_color>
Blend onto a uniform background color
=item B<checkerboard>
Blend onto a checkerboard
=item B<none>
No blending
=back
=back
=head1 SEE ALSO
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswscale(3)
=head1 AUTHORS
The FFmpeg developers.
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
B<git log> in the FFmpeg source directory, or browsing the
online repository at E<lt>B<http://source.ffmpeg.org>E<gt>.
Maintainers for the specific components are listed in the file
F<MAINTAINERS> in the source code tree.

View File

@@ -0,0 +1,44 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle FFmpeg Scaler Documentation
@titlepage
@center @titlefont{FFmpeg Scaler Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
The FFmpeg rescaler provides a high-level interface to the libswscale
library image conversion utilities. In particular it allows one to perform
image rescaling and pixel format conversion.
@c man end DESCRIPTION
@include scaler.texi
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libswscale.html,libswscale}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswscale(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-scaler
@settitle FFmpeg video scaling and pixel format converter
@end ignore
@bye

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,43 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle FFmpeg Utilities Documentation
@titlepage
@center @titlefont{FFmpeg Utilities Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
This document describes some generic features and utilities provided
by the libavutil library.
@c man end DESCRIPTION
@include utils.texi
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{libavutil.html,libavutil}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavutil(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffmpeg-utils
@settitle FFmpeg utilities
@end ignore
@bye

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,47 @@
:
ffmpeg.c : libav*
======== : ======
:
:
--------------------------------:---> AVStream...
InputStream input_streams[] / :
/ :
InputFile input_files[] +==========================+ / ^ :
------> 0 | : st ---:-----------:--/ : :
^ +------+-----------+-----+ / +--------------------------+ : :
: | :ist_index--:-----:---------/ 1 | : st : | : :
: +------+-----------+-----+ +==========================+ : :
nb_input_files : | :ist_index--:-----:------------------> 2 | : st : | : :
: +------+-----------+-----+ +--------------------------+ : nb_input_streams :
: | :ist_index : | 3 | ... | : :
v +------+-----------+-----+ +--------------------------+ : :
--> 4 | | : :
| +--------------------------+ : :
| 5 | | : :
| +==========================+ v :
| :
| :
| :
| :
--------- --------------------------------:---> AVStream...
\ / :
OutputStream output_streams[] / :
\ / :
+======\======================/======+ ^ :
------> 0 | : source_index : st-:--- | : :
OutputFile output_files[] / +------------------------------------+ : :
/ 1 | : : : | : :
^ +------+------------+-----+ / +------------------------------------+ : :
: | : ost_index -:-----:------/ 2 | : : : | : :
nb_output_files : +------+------------+-----+ +====================================+ : :
: | : ost_index -:-----|-----------------> 3 | : : : | : :
: +------+------------+-----+ +------------------------------------+ : nb_output_streams :
: | : : | 4 | | : :
: +------+------------+-----+ +------------------------------------+ : :
: | : : | 5 | | : :
v +------+------------+-----+ +------------------------------------+ : :
6 | | : :
+------------------------------------+ : :
7 | | : :
+====================================+ v :
:

View File

@@ -0,0 +1,310 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle ffplay Documentation
@titlepage
@center @titlefont{ffplay Documentation}
@end titlepage
@top
@contents
@chapter Synopsis
ffplay [@var{options}] [@file{input_file}]
@chapter Description
@c man begin DESCRIPTION
FFplay is a very simple and portable media player using the FFmpeg
libraries and the SDL library. It is mostly used as a testbed for the
various FFmpeg APIs.
@c man end
@chapter Options
@c man begin OPTIONS
@include fftools-common-opts.texi
@section Main options
@table @option
@item -x @var{width}
Force displayed width.
@item -y @var{height}
Force displayed height.
@item -s @var{size}
Set frame size (WxH or abbreviation), needed for videos which do
not contain a header with the frame size like raw YUV. This option
has been deprecated in favor of private options, try -video_size.
@item -fs
Start in fullscreen mode.
@item -an
Disable audio.
@item -vn
Disable video.
@item -sn
Disable subtitles.
@item -ss @var{pos}
Seek to @var{pos}. Note that in most formats it is not possible to seek
exactly, so @command{ffplay} will seek to the nearest seek point to
@var{pos}.
@var{pos} must be a time duration specification,
see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
@item -t @var{duration}
Play @var{duration} seconds of audio/video.
@var{duration} must be a time duration specification,
see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
@item -bytes
Seek by bytes.
@item -nodisp
Disable graphical display.
@item -f @var{fmt}
Force format.
@item -window_title @var{title}
Set window title (default is the input filename).
@item -loop @var{number}
Loops movie playback <number> times. 0 means forever.
@item -showmode @var{mode}
Set the show mode to use.
Available values for @var{mode} are:
@table @samp
@item 0, video
show video
@item 1, waves
show audio waves
@item 2, rdft
show audio frequency band using RDFT ((Inverse) Real Discrete Fourier Transform)
@end table
Default value is "video", if video is not present or cannot be played
"rdft" is automatically selected.
You can interactively cycle through the available show modes by
pressing the key @key{w}.
@item -vf @var{filtergraph}
Create the filtergraph specified by @var{filtergraph} and use it to
filter the video stream.
@var{filtergraph} is a description of the filtergraph to apply to
the stream, and must have a single video input and a single video
output. In the filtergraph, the input is associated to the label
@code{in}, and the output to the label @code{out}. See the
ffmpeg-filters manual for more information about the filtergraph
syntax.
You can specify this parameter multiple times and cycle through the specified
filtergraphs along with the show modes by pressing the key @key{w}.
@item -af @var{filtergraph}
@var{filtergraph} is a description of the filtergraph to apply to
the input audio.
Use the option "-filters" to show all the available filters (including
sources and sinks).
@item -i @var{input_file}
Read @var{input_file}.
@end table
@section Advanced options
@table @option
@item -pix_fmt @var{format}
Set pixel format.
This option has been deprecated in favor of private options, try -pixel_format.
@item -stats
Print several playback statistics, in particular show the stream
duration, the codec parameters, the current position in the stream and
the audio/video synchronisation drift. It is on by default, to
explicitly disable it you need to specify @code{-nostats}.
@item -fast
Non-spec-compliant optimizations.
@item -genpts
Generate pts.
@item -sync @var{type}
Set the master clock to audio (@code{type=audio}), video
(@code{type=video}) or external (@code{type=ext}). Default is audio. The
master clock is used to control audio-video synchronization. Most media
players use audio as master clock, but in some cases (streaming or high
quality broadcast) it is necessary to change that. This option is mainly
used for debugging purposes.
@item -ast @var{audio_stream_specifier}
Select the desired audio stream using the given stream specifier. The stream
specifiers are described in the @ref{Stream specifiers} chapter. If this option
is not specified, the "best" audio stream is selected in the program of the
already selected video stream.
@item -vst @var{video_stream_specifier}
Select the desired video stream using the given stream specifier. The stream
specifiers are described in the @ref{Stream specifiers} chapter. If this option
is not specified, the "best" video stream is selected.
@item -sst @var{subtitle_stream_specifier}
Select the desired subtitle stream using the given stream specifier. The stream
specifiers are described in the @ref{Stream specifiers} chapter. If this option
is not specified, the "best" subtitle stream is selected in the program of the
already selected video or audio stream.
@item -autoexit
Exit when video is done playing.
@item -exitonkeydown
Exit if any key is pressed.
@item -exitonmousedown
Exit if any mouse button is pressed.
@item -codec:@var{media_specifier} @var{codec_name}
Force a specific decoder implementation for the stream identified by
@var{media_specifier}, which can assume the values @code{a} (audio),
@code{v} (video), and @code{s} subtitle.
@item -acodec @var{codec_name}
Force a specific audio decoder.
@item -vcodec @var{codec_name}
Force a specific video decoder.
@item -scodec @var{codec_name}
Force a specific subtitle decoder.
@item -autorotate
Automatically rotate the video according to file metadata. Enabled by
default, use @option{-noautorotate} to disable it.
@item -framedrop
Drop video frames if video is out of sync. Enabled by default if the master
clock is not set to video. Use this option to enable frame dropping for all
master clock sources, use @option{-noframedrop} to disable it.
@item -infbuf
Do not limit the input buffer size, read as much data as possible from the
input as soon as possible. Enabled by default for realtime streams, where data
may be dropped if not read in time. Use this option to enable infinite buffers
for all inputs, use @option{-noinfbuf} to disable it.
@end table
@section While playing
@table @key
@item q, ESC
Quit.
@item f
Toggle full screen.
@item p, SPC
Pause.
@item a
Cycle audio channel in the current program.
@item v
Cycle video channel.
@item t
Cycle subtitle channel in the current program.
@item c
Cycle program.
@item w
Cycle video filters or show modes.
@item s
Step to the next frame.
Pause if the stream is not already paused, step to the next video
frame, and pause.
@item left/right
Seek backward/forward 10 seconds.
@item down/up
Seek backward/forward 1 minute.
@item page down/page up
Seek to the previous/next chapter.
or if there are no chapters
Seek backward/forward 10 minutes.
@item mouse click
Seek to percentage in file corresponding to fraction of width.
@end table
@c man end
@include config.texi
@ifset config-all
@set config-readonly
@ifset config-avutil
@include utils.texi
@end ifset
@ifset config-avcodec
@include codecs.texi
@include bitstream_filters.texi
@end ifset
@ifset config-avformat
@include formats.texi
@include protocols.texi
@end ifset
@ifset config-avdevice
@include devices.texi
@end ifset
@ifset config-swresample
@include resampler.texi
@end ifset
@ifset config-swscale
@include scaler.texi
@end ifset
@ifset config-avfilter
@include filters.texi
@end ifset
@end ifset
@chapter See Also
@ifhtml
@ifset config-all
@url{ffplay.html,ffplay},
@end ifset
@ifset config-not-all
@url{ffplay-all.html,ffmpeg-all},
@end ifset
@url{ffmpeg.html,ffmpeg}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{ffmpeg-utils.html,ffmpeg-utils},
@url{ffmpeg-scaler.html,ffmpeg-scaler},
@url{ffmpeg-resampler.html,ffmpeg-resampler},
@url{ffmpeg-codecs.html,ffmpeg-codecs},
@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters},
@url{ffmpeg-formats.html,ffmpeg-formats},
@url{ffmpeg-devices.html,ffmpeg-devices},
@url{ffmpeg-protocols.html,ffmpeg-protocols},
@url{ffmpeg-filters.html,ffmpeg-filters}
@end ifhtml
@ifnothtml
@ifset config-all
ffplay(1),
@end ifset
@ifset config-not-all
ffplay-all(1),
@end ifset
ffmpeg(1), ffprobe(1), ffserver(1),
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffplay
@settitle FFplay media player
@end ignore
@bye

View File

@@ -0,0 +1,683 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle ffprobe Documentation
@titlepage
@center @titlefont{ffprobe Documentation}
@end titlepage
@top
@contents
@chapter Synopsis
ffprobe [@var{options}] [@file{input_file}]
@chapter Description
@c man begin DESCRIPTION
ffprobe gathers information from multimedia streams and prints it in
human- and machine-readable fashion.
For example it can be used to check the format of the container used
by a multimedia stream and the format and type of each media stream
contained in it.
If a filename is specified in input, ffprobe will try to open and
probe the file content. If the file cannot be opened or recognized as
a multimedia file, a positive exit code is returned.
ffprobe may be employed both as a standalone application or in
combination with a textual filter, which may perform more
sophisticated processing, e.g. statistical processing or plotting.
Options are used to list some of the formats supported by ffprobe or
for specifying which information to display, and for setting how
ffprobe will show it.
ffprobe output is designed to be easily parsable by a textual filter,
and consists of one or more sections of a form defined by the selected
writer, which is specified by the @option{print_format} option.
Sections may contain other nested sections, and are identified by a
name (which may be shared by other sections), and an unique
name. See the output of @option{sections}.
Metadata tags stored in the container or in the streams are recognized
and printed in the corresponding "FORMAT", "STREAM" or "PROGRAM_STREAM"
section.
@c man end
@chapter Options
@c man begin OPTIONS
@include fftools-common-opts.texi
@section Main options
@table @option
@item -f @var{format}
Force format to use.
@item -unit
Show the unit of the displayed values.
@item -prefix
Use SI prefixes for the displayed values.
Unless the "-byte_binary_prefix" option is used all the prefixes
are decimal.
@item -byte_binary_prefix
Force the use of binary prefixes for byte values.
@item -sexagesimal
Use sexagesimal format HH:MM:SS.MICROSECONDS for time values.
@item -pretty
Prettify the format of the displayed values, it corresponds to the
options "-unit -prefix -byte_binary_prefix -sexagesimal".
@item -of, -print_format @var{writer_name}[=@var{writer_options}]
Set the output printing format.
@var{writer_name} specifies the name of the writer, and
@var{writer_options} specifies the options to be passed to the writer.
For example for printing the output in JSON format, specify:
@example
-print_format json
@end example
For more details on the available output printing formats, see the
Writers section below.
@item -sections
Print sections structure and section information, and exit. The output
is not meant to be parsed by a machine.
@item -select_streams @var{stream_specifier}
Select only the streams specified by @var{stream_specifier}. This
option affects only the options related to streams
(e.g. @code{show_streams}, @code{show_packets}, etc.).
For example to show only audio streams, you can use the command:
@example
ffprobe -show_streams -select_streams a INPUT
@end example
To show only video packets belonging to the video stream with index 1:
@example
ffprobe -show_packets -select_streams v:1 INPUT
@end example
@item -show_data
Show payload data, as a hexadecimal and ASCII dump. Coupled with
@option{-show_packets}, it will dump the packets' data. Coupled with
@option{-show_streams}, it will dump the codec extradata.
The dump is printed as the "data" field. It may contain newlines.
@item -show_data_hash @var{algorithm}
Show a hash of payload data, for packets with @option{-show_packets} and for
codec extradata with @option{-show_streams}.
@item -show_error
Show information about the error found when trying to probe the input.
The error information is printed within a section with name "ERROR".
@item -show_format
Show information about the container format of the input multimedia
stream.
All the container format information is printed within a section with
name "FORMAT".
@item -show_format_entry @var{name}
Like @option{-show_format}, but only prints the specified entry of the
container format information, rather than all. This option may be given more
than once, then all specified entries will be shown.
This option is deprecated, use @code{show_entries} instead.
@item -show_entries @var{section_entries}
Set list of entries to show.
Entries are specified according to the following
syntax. @var{section_entries} contains a list of section entries
separated by @code{:}. Each section entry is composed by a section
name (or unique name), optionally followed by a list of entries local
to that section, separated by @code{,}.
If section name is specified but is followed by no @code{=}, all
entries are printed to output, together with all the contained
sections. Otherwise only the entries specified in the local section
entries list are printed. In particular, if @code{=} is specified but
the list of local entries is empty, then no entries will be shown for
that section.
Note that the order of specification of the local section entries is
not honored in the output, and the usual display order will be
retained.
The formal syntax is given by:
@example
@var{LOCAL_SECTION_ENTRIES} ::= @var{SECTION_ENTRY_NAME}[,@var{LOCAL_SECTION_ENTRIES}]
@var{SECTION_ENTRY} ::= @var{SECTION_NAME}[=[@var{LOCAL_SECTION_ENTRIES}]]
@var{SECTION_ENTRIES} ::= @var{SECTION_ENTRY}[:@var{SECTION_ENTRIES}]
@end example
For example, to show only the index and type of each stream, and the PTS
time, duration time, and stream index of the packets, you can specify
the argument:
@example
packet=pts_time,duration_time,stream_index : stream=index,codec_type
@end example
To show all the entries in the section "format", but only the codec
type in the section "stream", specify the argument:
@example
format : stream=codec_type
@end example
To show all the tags in the stream and format sections:
@example
stream_tags : format_tags
@end example
To show only the @code{title} tag (if available) in the stream
sections:
@example
stream_tags=title
@end example
@item -show_packets
Show information about each packet contained in the input multimedia
stream.
The information for each single packet is printed within a dedicated
section with name "PACKET".
@item -show_frames
Show information about each frame and subtitle contained in the input
multimedia stream.
The information for each single frame is printed within a dedicated
section with name "FRAME" or "SUBTITLE".
@item -show_streams
Show information about each media stream contained in the input
multimedia stream.
Each media stream information is printed within a dedicated section
with name "STREAM".
@item -show_programs
Show information about programs and their streams contained in the input
multimedia stream.
Each media stream information is printed within a dedicated section
with name "PROGRAM_STREAM".
@item -show_chapters
Show information about chapters stored in the format.
Each chapter is printed within a dedicated section with name "CHAPTER".
@item -count_frames
Count the number of frames per stream and report it in the
corresponding stream section.
@item -count_packets
Count the number of packets per stream and report it in the
corresponding stream section.
@item -read_intervals @var{read_intervals}
Read only the specified intervals. @var{read_intervals} must be a
sequence of interval specifications separated by ",".
@command{ffprobe} will seek to the interval starting point, and will
continue reading from that.
Each interval is specified by two optional parts, separated by "%".
The first part specifies the interval start position. It is
interpreted as an abolute position, or as a relative offset from the
current position if it is preceded by the "+" character. If this first
part is not specified, no seeking will be performed when reading this
interval.
The second part specifies the interval end position. It is interpreted
as an absolute position, or as a relative offset from the current
position if it is preceded by the "+" character. If the offset
specification starts with "#", it is interpreted as the number of
packets to read (not including the flushing packets) from the interval
start. If no second part is specified, the program will read until the
end of the input.
Note that seeking is not accurate, thus the actual interval start
point may be different from the specified position. Also, when an
interval duration is specified, the absolute end time will be computed
by adding the duration to the interval start point found by seeking
the file, rather than to the specified start value.
The formal syntax is given by:
@example
@var{INTERVAL} ::= [@var{START}|+@var{START_OFFSET}][%[@var{END}|+@var{END_OFFSET}]]
@var{INTERVALS} ::= @var{INTERVAL}[,@var{INTERVALS}]
@end example
A few examples follow.
@itemize
@item
Seek to time 10, read packets until 20 seconds after the found seek
point, then seek to position @code{01:30} (1 minute and thirty
seconds) and read packets until position @code{01:45}.
@example
10%+20,01:30%01:45
@end example
@item
Read only 42 packets after seeking to position @code{01:23}:
@example
01:23%+#42
@end example
@item
Read only the first 20 seconds from the start:
@example
%+20
@end example
@item
Read from the start until position @code{02:30}:
@example
%02:30
@end example
@end itemize
@item -show_private_data, -private
Show private data, that is data depending on the format of the
particular shown element.
This option is enabled by default, but you may need to disable it
for specific uses, for example when creating XSD-compliant XML output.
@item -show_program_version
Show information related to program version.
Version information is printed within a section with name
"PROGRAM_VERSION".
@item -show_library_versions
Show information related to library versions.
Version information for each library is printed within a section with
name "LIBRARY_VERSION".
@item -show_versions
Show information related to program and library versions. This is the
equivalent of setting both @option{-show_program_version} and
@option{-show_library_versions} options.
@item -show_pixel_formats
Show information about all pixel formats supported by FFmpeg.
Pixel format information for each format is printed within a section
with name "PIXEL_FORMAT".
@item -bitexact
Force bitexact output, useful to produce output which is not dependent
on the specific build.
@item -i @var{input_file}
Read @var{input_file}.
@end table
@c man end
@chapter Writers
@c man begin WRITERS
A writer defines the output format adopted by @command{ffprobe}, and will be
used for printing all the parts of the output.
A writer may accept one or more arguments, which specify the options
to adopt. The options are specified as a list of @var{key}=@var{value}
pairs, separated by ":".
All writers support the following options:
@table @option
@item string_validation, sv
Set string validation mode.
The following values are accepted.
@table @samp
@item fail
The writer will fail immediately in case an invalid string (UTF-8)
sequence or code point is found in the input. This is especially
useful to validate input metadata.
@item ignore
Any validation error will be ignored. This will result in possibly
broken output, especially with the json or xml writer.
@item replace
The writer will substitute invalid UTF-8 sequences or code points with
the string specified with the @option{string_validation_replacement}.
@end table
Default value is @samp{replace}.
@item string_validation_replacement, svr
Set replacement string to use in case @option{string_validation} is
set to @samp{replace}.
In case the option is not specified, the writer will assume the empty
string, that is it will remove the invalid sequences from the input
strings.
@end table
A description of the currently available writers follows.
@section default
Default format.
Print each section in the form:
@example
[SECTION]
key1=val1
...
keyN=valN
[/SECTION]
@end example
Metadata tags are printed as a line in the corresponding FORMAT, STREAM or
PROGRAM_STREAM section, and are prefixed by the string "TAG:".
A description of the accepted options follows.
@table @option
@item nokey, nk
If set to 1 specify not to print the key of each field. Default value
is 0.
@item noprint_wrappers, nw
If set to 1 specify not to print the section header and footer.
Default value is 0.
@end table
@section compact, csv
Compact and CSV format.
The @code{csv} writer is equivalent to @code{compact}, but supports
different defaults.
Each section is printed on a single line.
If no option is specifid, the output has the form:
@example
section|key1=val1| ... |keyN=valN
@end example
Metadata tags are printed in the corresponding "format" or "stream"
section. A metadata tag key, if printed, is prefixed by the string
"tag:".
The description of the accepted options follows.
@table @option
@item item_sep, s
Specify the character to use for separating fields in the output line.
It must be a single printable character, it is "|" by default ("," for
the @code{csv} writer).
@item nokey, nk
If set to 1 specify not to print the key of each field. Its default
value is 0 (1 for the @code{csv} writer).
@item escape, e
Set the escape mode to use, default to "c" ("csv" for the @code{csv}
writer).
It can assume one of the following values:
@table @option
@item c
Perform C-like escaping. Strings containing a newline (@samp{\n}), carriage
return (@samp{\r}), a tab (@samp{\t}), a form feed (@samp{\f}), the escaping
character (@samp{\}) or the item separator character @var{SEP} are escaped
using C-like fashioned escaping, so that a newline is converted to the
sequence @samp{\n}, a carriage return to @samp{\r}, @samp{\} to @samp{\\} and
the separator @var{SEP} is converted to @samp{\@var{SEP}}.
@item csv
Perform CSV-like escaping, as described in RFC4180. Strings
containing a newline (@samp{\n}), a carriage return (@samp{\r}), a double quote
(@samp{"}), or @var{SEP} are enclosed in double-quotes.
@item none
Perform no escaping.
@end table
@item print_section, p
Print the section name at the begin of each line if the value is
@code{1}, disable it with value set to @code{0}. Default value is
@code{1}.
@end table
@section flat
Flat format.
A free-form output where each line contains an explicit key=value, such as
"streams.stream.3.tags.foo=bar". The output is shell escaped, so it can be
directly embedded in sh scripts as long as the separator character is an
alphanumeric character or an underscore (see @var{sep_char} option).
The description of the accepted options follows.
@table @option
@item sep_char, s
Separator character used to separate the chapter, the section name, IDs and
potential tags in the printed field key.
Default value is @samp{.}.
@item hierarchical, h
Specify if the section name specification should be hierarchical. If
set to 1, and if there is more than one section in the current
chapter, the section name will be prefixed by the name of the
chapter. A value of 0 will disable this behavior.
Default value is 1.
@end table
@section ini
INI format output.
Print output in an INI based format.
The following conventions are adopted:
@itemize
@item
all key and values are UTF-8
@item
@samp{.} is the subgroup separator
@item
newline, @samp{\t}, @samp{\f}, @samp{\b} and the following characters are
escaped
@item
@samp{\} is the escape character
@item
@samp{#} is the comment indicator
@item
@samp{=} is the key/value separator
@item
@samp{:} is not used but usually parsed as key/value separator
@end itemize
This writer accepts options as a list of @var{key}=@var{value} pairs,
separated by @samp{:}.
The description of the accepted options follows.
@table @option
@item hierarchical, h
Specify if the section name specification should be hierarchical. If
set to 1, and if there is more than one section in the current
chapter, the section name will be prefixed by the name of the
chapter. A value of 0 will disable this behavior.
Default value is 1.
@end table
@section json
JSON based format.
Each section is printed using JSON notation.
The description of the accepted options follows.
@table @option
@item compact, c
If set to 1 enable compact output, that is each section will be
printed on a single line. Default value is 0.
@end table
For more information about JSON, see @url{http://www.json.org/}.
@section xml
XML based format.
The XML output is described in the XML schema description file
@file{ffprobe.xsd} installed in the FFmpeg datadir.
An updated version of the schema can be retrieved at the url
@url{http://www.ffmpeg.org/schema/ffprobe.xsd}, which redirects to the
latest schema committed into the FFmpeg development source code tree.
Note that the output issued will be compliant to the
@file{ffprobe.xsd} schema only when no special global output options
(@option{unit}, @option{prefix}, @option{byte_binary_prefix},
@option{sexagesimal} etc.) are specified.
The description of the accepted options follows.
@table @option
@item fully_qualified, q
If set to 1 specify if the output should be fully qualified. Default
value is 0.
This is required for generating an XML file which can be validated
through an XSD file.
@item xsd_compliant, x
If set to 1 perform more checks for ensuring that the output is XSD
compliant. Default value is 0.
This option automatically sets @option{fully_qualified} to 1.
@end table
For more information about the XML format, see
@url{http://www.w3.org/XML/}.
@c man end WRITERS
@chapter Timecode
@c man begin TIMECODE
@command{ffprobe} supports Timecode extraction:
@itemize
@item
MPEG1/2 timecode is extracted from the GOP, and is available in the video
stream details (@option{-show_streams}, see @var{timecode}).
@item
MOV timecode is extracted from tmcd track, so is available in the tmcd
stream metadata (@option{-show_streams}, see @var{TAG:timecode}).
@item
DV, GXF and AVI timecodes are available in format metadata
(@option{-show_format}, see @var{TAG:timecode}).
@end itemize
@c man end TIMECODE
@include config.texi
@ifset config-all
@set config-readonly
@ifset config-avutil
@include utils.texi
@end ifset
@ifset config-avcodec
@include codecs.texi
@include bitstream_filters.texi
@end ifset
@ifset config-avformat
@include formats.texi
@include protocols.texi
@end ifset
@ifset config-avdevice
@include devices.texi
@end ifset
@ifset config-swresample
@include resampler.texi
@end ifset
@ifset config-swscale
@include scaler.texi
@end ifset
@ifset config-avfilter
@include filters.texi
@end ifset
@end ifset
@chapter See Also
@ifhtml
@ifset config-all
@url{ffprobe.html,ffprobe},
@end ifset
@ifset config-not-all
@url{ffprobe-all.html,ffprobe-all},
@end ifset
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffserver.html,ffserver},
@url{ffmpeg-utils.html,ffmpeg-utils},
@url{ffmpeg-scaler.html,ffmpeg-scaler},
@url{ffmpeg-resampler.html,ffmpeg-resampler},
@url{ffmpeg-codecs.html,ffmpeg-codecs},
@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters},
@url{ffmpeg-formats.html,ffmpeg-formats},
@url{ffmpeg-devices.html,ffmpeg-devices},
@url{ffmpeg-protocols.html,ffmpeg-protocols},
@url{ffmpeg-filters.html,ffmpeg-filters}
@end ifhtml
@ifnothtml
@ifset config-all
ffprobe(1),
@end ifset
@ifset config-not-all
ffprobe-all(1),
@end ifset
ffmpeg(1), ffplay(1), ffserver(1),
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffprobe
@settitle ffprobe media prober
@end ignore
@bye

View File

@@ -0,0 +1,356 @@
<?xml version="1.0" encoding="UTF-8"?>
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
targetNamespace="http://www.ffmpeg.org/schema/ffprobe"
xmlns:ffprobe="http://www.ffmpeg.org/schema/ffprobe">
<xsd:element name="ffprobe" type="ffprobe:ffprobeType"/>
<xsd:complexType name="ffprobeType">
<xsd:sequence>
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
<xsd:element name="pixel_formats" type="ffprobe:pixelFormatsType" minOccurs="0" maxOccurs="1" />
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
<xsd:element name="packets_and_frames" type="ffprobe:packetsAndFramesType" minOccurs="0" maxOccurs="1" />
<xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
<xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
<xsd:element name="format" type="ffprobe:formatType" minOccurs="0" maxOccurs="1" />
<xsd:element name="error" type="ffprobe:errorType" minOccurs="0" maxOccurs="1" />
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="packetsType">
<xsd:sequence>
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="framesType">
<xsd:sequence>
<xsd:choice minOccurs="0" maxOccurs="unbounded">
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:choice>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="packetsAndFramesType">
<xsd:sequence>
<xsd:choice minOccurs="0" maxOccurs="unbounded">
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
<xsd:element name="subtitle" type="ffprobe:subtitleType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:choice>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="packetType">
<xsd:sequence>
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
</xsd:sequence>
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
<xsd:attribute name="pts" type="xsd:long" />
<xsd:attribute name="pts_time" type="xsd:float" />
<xsd:attribute name="dts" type="xsd:long" />
<xsd:attribute name="dts_time" type="xsd:float" />
<xsd:attribute name="duration" type="xsd:long" />
<xsd:attribute name="duration_time" type="xsd:float" />
<xsd:attribute name="convergence_duration" type="xsd:long" />
<xsd:attribute name="convergence_duration_time" type="xsd:float" />
<xsd:attribute name="size" type="xsd:long" use="required" />
<xsd:attribute name="pos" type="xsd:long" />
<xsd:attribute name="flags" type="xsd:string" use="required" />
<xsd:attribute name="data" type="xsd:string" />
<xsd:attribute name="data_hash" type="xsd:string" />
</xsd:complexType>
<xsd:complexType name="packetSideDataListType">
<xsd:sequence>
<xsd:element name="side_data" type="ffprobe:packetSideDataType" minOccurs="1" maxOccurs="unbounded"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="packetSideDataType">
<xsd:attribute name="side_data_type" type="xsd:string"/>
<xsd:attribute name="side_data_size" type="xsd:int" />
</xsd:complexType>
<xsd:complexType name="frameType">
<xsd:sequence>
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
<xsd:element name="side_data_list" type="ffprobe:frameSideDataListType" minOccurs="0" maxOccurs="1" />
</xsd:sequence>
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
<xsd:attribute name="stream_index" type="xsd:int" />
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
<xsd:attribute name="pts" type="xsd:long" />
<xsd:attribute name="pts_time" type="xsd:float"/>
<xsd:attribute name="pkt_pts" type="xsd:long" />
<xsd:attribute name="pkt_pts_time" type="xsd:float"/>
<xsd:attribute name="pkt_dts" type="xsd:long" />
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
<xsd:attribute name="best_effort_timestamp" type="xsd:long" />
<xsd:attribute name="best_effort_timestamp_time" type="xsd:float" />
<xsd:attribute name="pkt_duration" type="xsd:long" />
<xsd:attribute name="pkt_duration_time" type="xsd:float"/>
<xsd:attribute name="pkt_pos" type="xsd:long" />
<xsd:attribute name="pkt_size" type="xsd:int" />
<!-- audio attributes -->
<xsd:attribute name="sample_fmt" type="xsd:string"/>
<xsd:attribute name="nb_samples" type="xsd:long" />
<xsd:attribute name="channels" type="xsd:int" />
<xsd:attribute name="channel_layout" type="xsd:string"/>
<!-- video attributes -->
<xsd:attribute name="width" type="xsd:long" />
<xsd:attribute name="height" type="xsd:long" />
<xsd:attribute name="pix_fmt" type="xsd:string"/>
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
<xsd:attribute name="pict_type" type="xsd:string"/>
<xsd:attribute name="coded_picture_number" type="xsd:long" />
<xsd:attribute name="display_picture_number" type="xsd:long" />
<xsd:attribute name="interlaced_frame" type="xsd:int" />
<xsd:attribute name="top_field_first" type="xsd:int" />
<xsd:attribute name="repeat_pict" type="xsd:int" />
</xsd:complexType>
<xsd:complexType name="frameSideDataListType">
<xsd:sequence>
<xsd:element name="side_data" type="ffprobe:frameSideDataType" minOccurs="1" maxOccurs="unbounded"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="frameSideDataType">
<xsd:attribute name="side_data_type" type="xsd:string"/>
<xsd:attribute name="side_data_size" type="xsd:int" />
</xsd:complexType>
<xsd:complexType name="subtitleType">
<xsd:attribute name="media_type" type="xsd:string" fixed="subtitle" use="required"/>
<xsd:attribute name="pts" type="xsd:long" />
<xsd:attribute name="pts_time" type="xsd:float"/>
<xsd:attribute name="format" type="xsd:int" />
<xsd:attribute name="start_display_time" type="xsd:int" />
<xsd:attribute name="end_display_time" type="xsd:int" />
<xsd:attribute name="num_rects" type="xsd:int" />
</xsd:complexType>
<xsd:complexType name="streamsType">
<xsd:sequence>
<xsd:element name="stream" type="ffprobe:streamType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="programsType">
<xsd:sequence>
<xsd:element name="program" type="ffprobe:programType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="streamDispositionType">
<xsd:attribute name="default" type="xsd:int" use="required" />
<xsd:attribute name="dub" type="xsd:int" use="required" />
<xsd:attribute name="original" type="xsd:int" use="required" />
<xsd:attribute name="comment" type="xsd:int" use="required" />
<xsd:attribute name="lyrics" type="xsd:int" use="required" />
<xsd:attribute name="karaoke" type="xsd:int" use="required" />
<xsd:attribute name="forced" type="xsd:int" use="required" />
<xsd:attribute name="hearing_impaired" type="xsd:int" use="required" />
<xsd:attribute name="visual_impaired" type="xsd:int" use="required" />
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
</xsd:complexType>
<xsd:complexType name="streamType">
<xsd:sequence>
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
<xsd:element name="side_data_list" type="ffprobe:packetSideDataListType" minOccurs="0" maxOccurs="1" />
</xsd:sequence>
<xsd:attribute name="index" type="xsd:int" use="required"/>
<xsd:attribute name="codec_name" type="xsd:string" />
<xsd:attribute name="codec_long_name" type="xsd:string" />
<xsd:attribute name="profile" type="xsd:string" />
<xsd:attribute name="codec_type" type="xsd:string" />
<xsd:attribute name="codec_time_base" type="xsd:string" use="required"/>
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
<xsd:attribute name="extradata" type="xsd:string" />
<xsd:attribute name="extradata_hash" type="xsd:string" />
<!-- video attributes -->
<xsd:attribute name="width" type="xsd:int"/>
<xsd:attribute name="height" type="xsd:int"/>
<xsd:attribute name="coded_width" type="xsd:int"/>
<xsd:attribute name="coded_height" type="xsd:int"/>
<xsd:attribute name="has_b_frames" type="xsd:int"/>
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
<xsd:attribute name="pix_fmt" type="xsd:string"/>
<xsd:attribute name="level" type="xsd:int"/>
<xsd:attribute name="color_range" type="xsd:string"/>
<xsd:attribute name="color_space" type="xsd:string"/>
<xsd:attribute name="color_transfer" type="xsd:string"/>
<xsd:attribute name="color_primaries" type="xsd:string"/>
<xsd:attribute name="chroma_location" type="xsd:string"/>
<xsd:attribute name="timecode" type="xsd:string"/>
<xsd:attribute name="refs" type="xsd:int"/>
<!-- audio attributes -->
<xsd:attribute name="sample_fmt" type="xsd:string"/>
<xsd:attribute name="sample_rate" type="xsd:int"/>
<xsd:attribute name="channels" type="xsd:int"/>
<xsd:attribute name="channel_layout" type="xsd:string"/>
<xsd:attribute name="bits_per_sample" type="xsd:int"/>
<xsd:attribute name="id" type="xsd:string"/>
<xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/>
<xsd:attribute name="avg_frame_rate" type="xsd:string" use="required"/>
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
<xsd:attribute name="start_pts" type="xsd:long"/>
<xsd:attribute name="start_time" type="xsd:float"/>
<xsd:attribute name="duration_ts" type="xsd:long"/>
<xsd:attribute name="duration" type="xsd:float"/>
<xsd:attribute name="bit_rate" type="xsd:int"/>
<xsd:attribute name="max_bit_rate" type="xsd:int"/>
<xsd:attribute name="bits_per_raw_sample" type="xsd:int"/>
<xsd:attribute name="nb_frames" type="xsd:int"/>
<xsd:attribute name="nb_read_frames" type="xsd:int"/>
<xsd:attribute name="nb_read_packets" type="xsd:int"/>
</xsd:complexType>
<xsd:complexType name="programType">
<xsd:sequence>
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1"/>
</xsd:sequence>
<xsd:attribute name="program_id" type="xsd:int" use="required"/>
<xsd:attribute name="program_num" type="xsd:int" use="required"/>
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
<xsd:attribute name="start_time" type="xsd:float"/>
<xsd:attribute name="start_pts" type="xsd:long"/>
<xsd:attribute name="end_time" type="xsd:float"/>
<xsd:attribute name="end_pts" type="xsd:long"/>
<xsd:attribute name="pmt_pid" type="xsd:int" use="required"/>
<xsd:attribute name="pcr_pid" type="xsd:int" use="required"/>
</xsd:complexType>
<xsd:complexType name="formatType">
<xsd:sequence>
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
<xsd:attribute name="filename" type="xsd:string" use="required"/>
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
<xsd:attribute name="nb_programs" type="xsd:int" use="required"/>
<xsd:attribute name="format_name" type="xsd:string" use="required"/>
<xsd:attribute name="format_long_name" type="xsd:string"/>
<xsd:attribute name="start_time" type="xsd:float"/>
<xsd:attribute name="duration" type="xsd:float"/>
<xsd:attribute name="size" type="xsd:long"/>
<xsd:attribute name="bit_rate" type="xsd:long"/>
<xsd:attribute name="probe_score" type="xsd:int"/>
</xsd:complexType>
<xsd:complexType name="tagType">
<xsd:attribute name="key" type="xsd:string" use="required"/>
<xsd:attribute name="value" type="xsd:string" use="required"/>
</xsd:complexType>
<xsd:complexType name="errorType">
<xsd:attribute name="code" type="xsd:int" use="required"/>
<xsd:attribute name="string" type="xsd:string" use="required"/>
</xsd:complexType>
<xsd:complexType name="programVersionType">
<xsd:attribute name="version" type="xsd:string" use="required"/>
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
<xsd:attribute name="build_date" type="xsd:string"/>
<xsd:attribute name="build_time" type="xsd:string"/>
<xsd:attribute name="compiler_ident" type="xsd:string" use="required"/>
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
</xsd:complexType>
<xsd:complexType name="chaptersType">
<xsd:sequence>
<xsd:element name="chapter" type="ffprobe:chapterType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="chapterType">
<xsd:sequence>
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
<xsd:attribute name="id" type="xsd:int" use="required"/>
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
<xsd:attribute name="start" type="xsd:int" use="required"/>
<xsd:attribute name="start_time" type="xsd:float"/>
<xsd:attribute name="end" type="xsd:int" use="required"/>
<xsd:attribute name="end_time" type="xsd:float" use="required"/>
</xsd:complexType>
<xsd:complexType name="libraryVersionType">
<xsd:attribute name="name" type="xsd:string" use="required"/>
<xsd:attribute name="major" type="xsd:int" use="required"/>
<xsd:attribute name="minor" type="xsd:int" use="required"/>
<xsd:attribute name="micro" type="xsd:int" use="required"/>
<xsd:attribute name="version" type="xsd:int" use="required"/>
<xsd:attribute name="ident" type="xsd:string" use="required"/>
</xsd:complexType>
<xsd:complexType name="libraryVersionsType">
<xsd:sequence>
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="pixelFormatFlagsType">
<xsd:attribute name="big_endian" type="xsd:int" use="required"/>
<xsd:attribute name="palette" type="xsd:int" use="required"/>
<xsd:attribute name="bitstream" type="xsd:int" use="required"/>
<xsd:attribute name="hwaccel" type="xsd:int" use="required"/>
<xsd:attribute name="planar" type="xsd:int" use="required"/>
<xsd:attribute name="rgb" type="xsd:int" use="required"/>
<xsd:attribute name="pseudopal" type="xsd:int" use="required"/>
<xsd:attribute name="alpha" type="xsd:int" use="required"/>
</xsd:complexType>
<xsd:complexType name="pixelFormatComponentType">
<xsd:attribute name="index" type="xsd:int" use="required"/>
<xsd:attribute name="bit_depth" type="xsd:int" use="required"/>
</xsd:complexType>
<xsd:complexType name="pixelFormatComponentsType">
<xsd:sequence>
<xsd:element name="component" type="ffprobe:pixelFormatComponentType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
</xsd:complexType>
<xsd:complexType name="pixelFormatType">
<xsd:sequence>
<xsd:element name="flags" type="ffprobe:pixelFormatFlagsType" minOccurs="0" maxOccurs="1"/>
<xsd:element name="components" type="ffprobe:pixelFormatComponentsType" minOccurs="0" maxOccurs="1"/>
</xsd:sequence>
<xsd:attribute name="name" type="xsd:string" use="required"/>
<xsd:attribute name="nb_components" type="xsd:int" use="required"/>
<xsd:attribute name="log2_chroma_w" type="xsd:int"/>
<xsd:attribute name="log2_chroma_h" type="xsd:int"/>
<xsd:attribute name="bits_per_pixel" type="xsd:int"/>
</xsd:complexType>
<xsd:complexType name="pixelFormatsType">
<xsd:sequence>
<xsd:element name="pixel_format" type="ffprobe:pixelFormatType" minOccurs="0" maxOccurs="unbounded"/>
</xsd:sequence>
</xsd:complexType>
</xsd:schema>

View File

@@ -0,0 +1,372 @@
# Port on which the server is listening. You must select a different
# port from your standard HTTP web server if it is running on the same
# computer.
HTTPPort 8090
# Address on which the server is bound. Only useful if you have
# several network interfaces.
HTTPBindAddress 0.0.0.0
# Number of simultaneous HTTP connections that can be handled. It has
# to be defined *before* the MaxClients parameter, since it defines the
# MaxClients maximum limit.
MaxHTTPConnections 2000
# Number of simultaneous requests that can be handled. Since FFServer
# is very fast, it is more likely that you will want to leave this high
# and use MaxBandwidth, below.
MaxClients 1000
# This the maximum amount of kbit/sec that you are prepared to
# consume when streaming to clients.
MaxBandwidth 1000
# Access log file (uses standard Apache log file format)
# '-' is the standard output.
CustomLog -
##################################################################
# Definition of the live feeds. Each live feed contains one video
# and/or audio sequence coming from an ffmpeg encoder or another
# ffserver. This sequence may be encoded simultaneously with several
# codecs at several resolutions.
<Feed feed1.ffm>
# You must use 'ffmpeg' to send a live feed to ffserver. In this
# example, you can type:
#
# ffmpeg http://localhost:8090/feed1.ffm
# ffserver can also do time shifting. It means that it can stream any
# previously recorded live stream. The request should contain:
# "http://xxxx?date=[YYYY-MM-DDT][[HH:]MM:]SS[.m...]".You must specify
# a path where the feed is stored on disk. You also specify the
# maximum size of the feed, where zero means unlimited. Default:
# File=/tmp/feed_name.ffm FileMaxSize=5M
File /tmp/feed1.ffm
FileMaxSize 200K
# You could specify
# ReadOnlyFile /saved/specialvideo.ffm
# This marks the file as readonly and it will not be deleted or updated.
# Specify launch in order to start ffmpeg automatically.
# First ffmpeg must be defined with an appropriate path if needed,
# after that options can follow, but avoid adding the http:// field
#Launch ffmpeg
# Only allow connections from localhost to the feed.
ACL allow 127.0.0.1
</Feed>
##################################################################
# Now you can define each stream which will be generated from the
# original audio and video stream. Each format has a filename (here
# 'test1.mpg'). FFServer will send this stream when answering a
# request containing this filename.
<Stream test1.mpg>
# coming from live feed 'feed1'
Feed feed1.ffm
# Format of the stream : you can choose among:
# mpeg : MPEG-1 multiplexed video and audio
# mpegvideo : only MPEG-1 video
# mp2 : MPEG-2 audio (use AudioCodec to select layer 2 and 3 codec)
# ogg : Ogg format (Vorbis audio codec)
# rm : RealNetworks-compatible stream. Multiplexed audio and video.
# ra : RealNetworks-compatible stream. Audio only.
# mpjpeg : Multipart JPEG (works with Netscape without any plugin)
# jpeg : Generate a single JPEG image.
# mjpeg : Generate a M-JPEG stream.
# asf : ASF compatible streaming (Windows Media Player format).
# swf : Macromedia Flash compatible stream
# avi : AVI format (MPEG-4 video, MPEG audio sound)
Format mpeg
# Bitrate for the audio stream. Codecs usually support only a few
# different bitrates.
AudioBitRate 32
# Number of audio channels: 1 = mono, 2 = stereo
AudioChannels 1
# Sampling frequency for audio. When using low bitrates, you should
# lower this frequency to 22050 or 11025. The supported frequencies
# depend on the selected audio codec.
AudioSampleRate 44100
# Bitrate for the video stream
VideoBitRate 64
# Ratecontrol buffer size
VideoBufferSize 40
# Number of frames per second
VideoFrameRate 3
# Size of the video frame: WxH (default: 160x128)
# The following abbreviations are defined: sqcif, qcif, cif, 4cif, qqvga,
# qvga, vga, svga, xga, uxga, qxga, sxga, qsxga, hsxga, wvga, wxga, wsxga,
# wuxga, woxga, wqsxga, wquxga, whsxga, whuxga, cga, ega, hd480, hd720,
# hd1080
VideoSize 160x128
# Transmit only intra frames (useful for low bitrates, but kills frame rate).
#VideoIntraOnly
# If non-intra only, an intra frame is transmitted every VideoGopSize
# frames. Video synchronization can only begin at an intra frame.
VideoGopSize 12
# More MPEG-4 parameters
# VideoHighQuality
# Video4MotionVector
# Choose your codecs:
#AudioCodec mp2
#VideoCodec mpeg1video
# Suppress audio
#NoAudio
# Suppress video
#NoVideo
#VideoQMin 3
#VideoQMax 31
# Set this to the number of seconds backwards in time to start. Note that
# most players will buffer 5-10 seconds of video, and also you need to allow
# for a keyframe to appear in the data stream.
#Preroll 15
# ACL:
# You can allow ranges of addresses (or single addresses)
#ACL ALLOW <first address> <last address>
# You can deny ranges of addresses (or single addresses)
#ACL DENY <first address> <last address>
# You can repeat the ACL allow/deny as often as you like. It is on a per
# stream basis. The first match defines the action. If there are no matches,
# then the default is the inverse of the last ACL statement.
#
# Thus 'ACL allow localhost' only allows access from localhost.
# 'ACL deny 1.0.0.0 1.255.255.255' would deny the whole of network 1 and
# allow everybody else.
</Stream>
##################################################################
# Example streams
# Multipart JPEG
#<Stream test.mjpg>
#Feed feed1.ffm
#Format mpjpeg
#VideoFrameRate 2
#VideoIntraOnly
#NoAudio
#Strict -1
#</Stream>
# Single JPEG
#<Stream test.jpg>
#Feed feed1.ffm
#Format jpeg
#VideoFrameRate 2
#VideoIntraOnly
##VideoSize 352x240
#NoAudio
#Strict -1
#</Stream>
# Flash
#<Stream test.swf>
#Feed feed1.ffm
#Format swf
#VideoFrameRate 2
#VideoIntraOnly
#NoAudio
#</Stream>
# ASF compatible
<Stream test.asf>
Feed feed1.ffm
Format asf
VideoFrameRate 15
VideoSize 352x240
VideoBitRate 256
VideoBufferSize 40
VideoGopSize 30
AudioBitRate 64
StartSendOnKey
</Stream>
# MP3 audio
#<Stream test.mp3>
#Feed feed1.ffm
#Format mp2
#AudioCodec mp3
#AudioBitRate 64
#AudioChannels 1
#AudioSampleRate 44100
#NoVideo
#</Stream>
# Ogg Vorbis audio
#<Stream test.ogg>
#Feed feed1.ffm
#Metadata title "Stream title"
#AudioBitRate 64
#AudioChannels 2
#AudioSampleRate 44100
#NoVideo
#</Stream>
# Real with audio only at 32 kbits
#<Stream test.ra>
#Feed feed1.ffm
#Format rm
#AudioBitRate 32
#NoVideo
#NoAudio
#</Stream>
# Real with audio and video at 64 kbits
#<Stream test.rm>
#Feed feed1.ffm
#Format rm
#AudioBitRate 32
#VideoBitRate 128
#VideoFrameRate 25
#VideoGopSize 25
#NoAudio
#</Stream>
##################################################################
# A stream coming from a file: you only need to set the input
# filename and optionally a new format. Supported conversions:
# AVI -> ASF
#<Stream file.rm>
#File "/usr/local/httpd/htdocs/tlive.rm"
#NoAudio
#</Stream>
#<Stream file.asf>
#File "/usr/local/httpd/htdocs/test.asf"
#NoAudio
#Metadata author "Me"
#Metadata copyright "Super MegaCorp"
#Metadata title "Test stream from disk"
#Metadata comment "Test comment"
#</Stream>
##################################################################
# RTSP examples
#
# You can access this stream with the RTSP URL:
# rtsp://localhost:5454/test1-rtsp.mpg
#
# A non-standard RTSP redirector is also created. Its URL is:
# http://localhost:8090/test1-rtsp.rtsp
#<Stream test1-rtsp.mpg>
#Format rtp
#File "/usr/local/httpd/htdocs/test1.mpg"
#</Stream>
# Transcode an incoming live feed to another live feed,
# using libx264 and video presets
#<Stream live.h264>
#Format rtp
#Feed feed1.ffm
#VideoCodec libx264
#VideoFrameRate 24
#VideoBitRate 100
#VideoSize 480x272
#AVPresetVideo default
#AVPresetVideo baseline
#AVOptionVideo flags +global_header
#
#AudioCodec libfaac
#AudioBitRate 32
#AudioChannels 2
#AudioSampleRate 22050
#AVOptionAudio flags +global_header
#</Stream>
##################################################################
# SDP/multicast examples
#
# If you want to send your stream in multicast, you must set the
# multicast address with MulticastAddress. The port and the TTL can
# also be set.
#
# An SDP file is automatically generated by ffserver by adding the
# 'sdp' extension to the stream name (here
# http://localhost:8090/test1-sdp.sdp). You should usually give this
# file to your player to play the stream.
#
# The 'NoLoop' option can be used to avoid looping when the stream is
# terminated.
#<Stream test1-sdp.mpg>
#Format rtp
#File "/usr/local/httpd/htdocs/test1.mpg"
#MulticastAddress 224.124.0.1
#MulticastPort 5000
#MulticastTTL 16
#NoLoop
#</Stream>
##################################################################
# Special streams
# Server status
<Stream stat.html>
Format status
# Only allow local people to get the status
ACL allow localhost
ACL allow 192.168.0.0 192.168.255.255
#FaviconURL http://pond1.gladstonefamily.net:8080/favicon.ico
</Stream>
# Redirect index.html to the appropriate site
<Redirect index.html>
URL http://www.ffmpeg.org/
</Redirect>

View File

@@ -0,0 +1,923 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle ffserver Documentation
@titlepage
@center @titlefont{ffserver Documentation}
@end titlepage
@top
@contents
@chapter Synopsis
ffserver [@var{options}]
@chapter Description
@c man begin DESCRIPTION
@command{ffserver} is a streaming server for both audio and video.
It supports several live feeds, streaming from files and time shifting
on live feeds. You can seek to positions in the past on each live
feed, provided you specify a big enough feed storage.
@command{ffserver} is configured through a configuration file, which
is read at startup. If not explicitly specified, it will read from
@file{/etc/ffserver.conf}.
@command{ffserver} receives prerecorded files or FFM streams from some
@command{ffmpeg} instance as input, then streams them over
RTP/RTSP/HTTP.
An @command{ffserver} instance will listen on some port as specified
in the configuration file. You can launch one or more instances of
@command{ffmpeg} and send one or more FFM streams to the port where
ffserver is expecting to receive them. Alternately, you can make
@command{ffserver} launch such @command{ffmpeg} instances at startup.
Input streams are called feeds, and each one is specified by a
@code{<Feed>} section in the configuration file.
For each feed you can have different output streams in various
formats, each one specified by a @code{<Stream>} section in the
configuration file.
@chapter Detailed description
@command{ffserver} works by forwarding streams encoded by
@command{ffmpeg}, or pre-recorded streams which are read from disk.
Precisely, @command{ffserver} acts as an HTTP server, accepting POST
requests from @command{ffmpeg} to acquire the stream to publish, and
serving RTSP clients or HTTP clients GET requests with the stream
media content.
A feed is an @ref{FFM} stream created by @command{ffmpeg}, and sent to
a port where @command{ffserver} is listening.
Each feed is identified by a unique name, corresponding to the name
of the resource published on @command{ffserver}, and is configured by
a dedicated @code{Feed} section in the configuration file.
The feed publish URL is given by:
@example
http://@var{ffserver_ip_address}:@var{http_port}/@var{feed_name}
@end example
where @var{ffserver_ip_address} is the IP address of the machine where
@command{ffserver} is installed, @var{http_port} is the port number of
the HTTP server (configured through the @option{HTTPPort} option), and
@var{feed_name} is the name of the corresponding feed defined in the
configuration file.
Each feed is associated to a file which is stored on disk. This stored
file is used to send pre-recorded data to a player as fast as
possible when new content is added in real-time to the stream.
A "live-stream" or "stream" is a resource published by
@command{ffserver}, and made accessible through the HTTP protocol to
clients.
A stream can be connected to a feed, or to a file. In the first case,
the published stream is forwarded from the corresponding feed
generated by a running instance of @command{ffmpeg}, in the second
case the stream is read from a pre-recorded file.
Each stream is identified by a unique name, corresponding to the name
of the resource served by @command{ffserver}, and is configured by
a dedicated @code{Stream} section in the configuration file.
The stream access HTTP URL is given by:
@example
http://@var{ffserver_ip_address}:@var{http_port}/@var{stream_name}[@var{options}]
@end example
The stream access RTSP URL is given by:
@example
http://@var{ffserver_ip_address}:@var{rtsp_port}/@var{stream_name}[@var{options}]
@end example
@var{stream_name} is the name of the corresponding stream defined in
the configuration file. @var{options} is a list of options specified
after the URL which affects how the stream is served by
@command{ffserver}. @var{http_port} and @var{rtsp_port} are the HTTP
and RTSP ports configured with the options @var{HTTPPort} and
@var{RTSPPort} respectively.
In case the stream is associated to a feed, the encoding parameters
must be configured in the stream configuration. They are sent to
@command{ffmpeg} when setting up the encoding. This allows
@command{ffserver} to define the encoding parameters used by
the @command{ffmpeg} encoders.
The @command{ffmpeg} @option{override_ffserver} commandline option
allows one to override the encoding parameters set by the server.
Multiple streams can be connected to the same feed.
For example, you can have a situation described by the following
graph:
@verbatim
_________ __________
| | | |
ffmpeg 1 -----| feed 1 |-----| stream 1 |
\ |_________|\ |__________|
\ \
\ \ __________
\ \ | |
\ \| stream 2 |
\ |__________|
\
\ _________ __________
\ | | | |
\| feed 2 |-----| stream 3 |
|_________| |__________|
_________ __________
| | | |
ffmpeg 2 -----| feed 3 |-----| stream 4 |
|_________| |__________|
_________ __________
| | | |
| file 1 |-----| stream 5 |
|_________| |__________|
@end verbatim
@anchor{FFM}
@section FFM, FFM2 formats
FFM and FFM2 are formats used by ffserver. They allow storing a wide variety of
video and audio streams and encoding options, and can store a moving time segment
of an infinite movie or a whole movie.
FFM is version specific, and there is limited compatibility of FFM files
generated by one version of ffmpeg/ffserver and another version of
ffmpeg/ffserver. It may work but it is not guaranteed to work.
FFM2 is extensible while maintaining compatibility and should work between
differing versions of tools. FFM2 is the default.
@section Status stream
@command{ffserver} supports an HTTP interface which exposes the
current status of the server.
Simply point your browser to the address of the special status stream
specified in the configuration file.
For example if you have:
@example
<Stream status.html>
Format status
# Only allow local people to get the status
ACL allow localhost
ACL allow 192.168.0.0 192.168.255.255
</Stream>
@end example
then the server will post a page with the status information when
the special stream @file{status.html} is requested.
@section How do I make it work?
As a simple test, just run the following two command lines where INPUTFILE
is some file which you can decode with ffmpeg:
@example
ffserver -f doc/ffserver.conf &
ffmpeg -i INPUTFILE http://localhost:8090/feed1.ffm
@end example
At this point you should be able to go to your Windows machine and fire up
Windows Media Player (WMP). Go to Open URL and enter
@example
http://<linuxbox>:8090/test.asf
@end example
You should (after a short delay) see video and hear audio.
WARNING: trying to stream test1.mpg doesn't work with WMP as it tries to
transfer the entire file before starting to play.
The same is true of AVI files.
You should edit the @file{ffserver.conf} file to suit your needs (in
terms of frame rates etc). Then install @command{ffserver} and
@command{ffmpeg}, write a script to start them up, and off you go.
@section What else can it do?
You can replay video from .ffm files that was recorded earlier.
However, there are a number of caveats, including the fact that the
ffserver parameters must match the original parameters used to record the
file. If they do not, then ffserver deletes the file before recording into it.
(Now that I write this, it seems broken).
You can fiddle with many of the codec choices and encoding parameters, and
there are a bunch more parameters that you cannot control. Post a message
to the mailing list if there are some 'must have' parameters. Look in
ffserver.conf for a list of the currently available controls.
It will automatically generate the ASX or RAM files that are often used
in browsers. These files are actually redirections to the underlying ASF
or RM file. The reason for this is that the browser often fetches the
entire file before starting up the external viewer. The redirection files
are very small and can be transferred quickly. [The stream itself is
often 'infinite' and thus the browser tries to download it and never
finishes.]
@section Tips
* When you connect to a live stream, most players (WMP, RA, etc) want to
buffer a certain number of seconds of material so that they can display the
signal continuously. However, ffserver (by default) starts sending data
in realtime. This means that there is a pause of a few seconds while the
buffering is being done by the player. The good news is that this can be
cured by adding a '?buffer=5' to the end of the URL. This means that the
stream should start 5 seconds in the past -- and so the first 5 seconds
of the stream are sent as fast as the network will allow. It will then
slow down to real time. This noticeably improves the startup experience.
You can also add a 'Preroll 15' statement into the ffserver.conf that will
add the 15 second prebuffering on all requests that do not otherwise
specify a time. In addition, ffserver will skip frames until a key_frame
is found. This further reduces the startup delay by not transferring data
that will be discarded.
@section Why does the ?buffer / Preroll stop working after a time?
It turns out that (on my machine at least) the number of frames successfully
grabbed is marginally less than the number that ought to be grabbed. This
means that the timestamp in the encoded data stream gets behind realtime.
This means that if you say 'Preroll 10', then when the stream gets 10
or more seconds behind, there is no Preroll left.
Fixing this requires a change in the internals of how timestamps are
handled.
@section Does the @code{?date=} stuff work.
Yes (subject to the limitation outlined above). Also note that whenever you
start ffserver, it deletes the ffm file (if any parameters have changed),
thus wiping out what you had recorded before.
The format of the @code{?date=xxxxxx} is fairly flexible. You should use one
of the following formats (the 'T' is literal):
@example
* YYYY-MM-DDTHH:MM:SS (localtime)
* YYYY-MM-DDTHH:MM:SSZ (UTC)
@end example
You can omit the YYYY-MM-DD, and then it refers to the current day. However
note that @samp{?date=16:00:00} refers to 16:00 on the current day -- this
may be in the future and so is unlikely to be useful.
You use this by adding the ?date= to the end of the URL for the stream.
For example: @samp{http://localhost:8080/test.asf?date=2002-07-26T23:05:00}.
@c man end
@chapter Options
@c man begin OPTIONS
@include fftools-common-opts.texi
@section Main options
@table @option
@item -f @var{configfile}
Read configuration file @file{configfile}. If not specified it will
read by default from @file{/etc/ffserver.conf}.
@item -n
Enable no-launch mode. This option disables all the @code{Launch}
directives within the various @code{<Feed>} sections. Since
@command{ffserver} will not launch any @command{ffmpeg} instances, you
will have to launch them manually.
@item -d
Enable debug mode. This option increases log verbosity, and directs
log messages to stdout. When specified, the @option{CustomLog} option
is ignored.
@end table
@chapter Configuration file syntax
@command{ffserver} reads a configuration file containing global
options and settings for each stream and feed.
The configuration file consists of global options and dedicated
sections, which must be introduced by "<@var{SECTION_NAME}
@var{ARGS}>" on a separate line and must be terminated by a line in
the form "</@var{SECTION_NAME}>". @var{ARGS} is optional.
Currently the following sections are recognized: @samp{Feed},
@samp{Stream}, @samp{Redirect}.
A line starting with @code{#} is ignored and treated as a comment.
Name of options and sections are case-insensitive.
@section ACL syntax
An ACL (Access Control List) specifies the address which are allowed
to access a given stream, or to write a given feed.
It accepts the folling forms
@itemize
@item
Allow/deny access to @var{address}.
@example
ACL ALLOW <address>
ACL DENY <address>
@end example
@item
Allow/deny access to ranges of addresses from @var{first_address} to
@var{last_address}.
@example
ACL ALLOW <first_address> <last_address>
ACL DENY <first_address> <last_address>
@end example
@end itemize
You can repeat the ACL allow/deny as often as you like. It is on a per
stream basis. The first match defines the action. If there are no matches,
then the default is the inverse of the last ACL statement.
Thus 'ACL allow localhost' only allows access from localhost.
'ACL deny 1.0.0.0 1.255.255.255' would deny the whole of network 1 and
allow everybody else.
@section Global options
@table @option
@item HTTPPort @var{port_number}
@item Port @var{port_number}
@item RTSPPort @var{port_number}
@var{HTTPPort} sets the HTTP server listening TCP port number,
@var{RTSPPort} sets the RTSP server listening TCP port number.
@var{Port} is the equivalent of @var{HTTPPort} and is deprecated.
You must select a different port from your standard HTTP web server if
it is running on the same computer.
If not specified, no corresponding server will be created.
@item HTTPBindAddress @var{ip_address}
@item BindAddress @var{ip_address}
@item RTSPBindAddress @var{ip_address}
Set address on which the HTTP/RTSP server is bound. Only useful if you
have several network interfaces.
@var{BindAddress} is the equivalent of @var{HTTPBindAddress} and is
deprecated.
@item MaxHTTPConnections @var{n}
Set number of simultaneous HTTP connections that can be handled. It
has to be defined @emph{before} the @option{MaxClients} parameter,
since it defines the @option{MaxClients} maximum limit.
Default value is 2000.
@item MaxClients @var{n}
Set number of simultaneous requests that can be handled. Since
@command{ffserver} is very fast, it is more likely that you will want
to leave this high and use @option{MaxBandwidth}.
Default value is 5.
@item MaxBandwidth @var{kbps}
Set the maximum amount of kbit/sec that you are prepared to consume
when streaming to clients.
Default value is 1000.
@item CustomLog @var{filename}
Set access log file (uses standard Apache log file format). '-' is the
standard output.
If not specified @command{ffserver} will produce no log.
In case the commandline option @option{-d} is specified this option is
ignored, and the log is written to standard output.
@item NoDaemon
Set no-daemon mode. This option is currently ignored since now
@command{ffserver} will always work in no-daemon mode, and is
deprecated.
@item UseDefaults
@item NoDefaults
Control whether default codec options are used for the all streams or not.
Each stream may overwrite this setting for its own. Default is @var{UseDefaults}.
The lastest occurrence overrides previous if multiple definitions.
@end table
@section Feed section
A Feed section defines a feed provided to @command{ffserver}.
Each live feed contains one video and/or audio sequence coming from an
@command{ffmpeg} encoder or another @command{ffserver}. This sequence
may be encoded simultaneously with several codecs at several
resolutions.
A feed instance specification is introduced by a line in the form:
@example
<Feed FEED_FILENAME>
@end example
where @var{FEED_FILENAME} specifies the unique name of the FFM stream.
The following options are recognized within a Feed section.
@table @option
@item File @var{filename}
@item ReadOnlyFile @var{filename}
Set the path where the feed file is stored on disk.
If not specified, the @file{/tmp/FEED.ffm} is assumed, where
@var{FEED} is the feed name.
If @option{ReadOnlyFile} is used the file is marked as read-only and
it will not be deleted or updated.
@item Truncate
Truncate the feed file, rather than appending to it. By default
@command{ffserver} will append data to the file, until the maximum
file size value is reached (see @option{FileMaxSize} option).
@item FileMaxSize @var{size}
Set maximum size of the feed file in bytes. 0 means unlimited. The
postfixes @code{K} (2^10), @code{M} (2^20), and @code{G} (2^30) are
recognized.
Default value is 5M.
@item Launch @var{args}
Launch an @command{ffmpeg} command when creating @command{ffserver}.
@var{args} must be a sequence of arguments to be provided to an
@command{ffmpeg} instance. The first provided argument is ignored, and
it is replaced by a path with the same dirname of the @command{ffserver}
instance, followed by the remaining argument and terminated with a
path corresponding to the feed.
When the launched process exits, @command{ffserver} will launch
another program instance.
In case you need a more complex @command{ffmpeg} configuration,
e.g. if you need to generate multiple FFM feeds with a single
@command{ffmpeg} instance, you should launch @command{ffmpeg} by hand.
This option is ignored in case the commandline option @option{-n} is
specified.
@item ACL @var{spec}
Specify the list of IP address which are allowed or denied to write
the feed. Multiple ACL options can be specified.
@end table
@section Stream section
A Stream section defines a stream provided by @command{ffserver}, and
identified by a single name.
The stream is sent when answering a request containing the stream
name.
A stream section must be introduced by the line:
@example
<Stream STREAM_NAME>
@end example
where @var{STREAM_NAME} specifies the unique name of the stream.
The following options are recognized within a Stream section.
Encoding options are marked with the @emph{encoding} tag, and they are
used to set the encoding parameters, and are mapped to libavcodec
encoding options. Not all encoding options are supported, in
particular it is not possible to set encoder private options. In order
to override the encoding options specified by @command{ffserver}, you
can use the @command{ffmpeg} @option{override_ffserver} commandline
option.
Only one of the @option{Feed} and @option{File} options should be set.
@table @option
@item Feed @var{feed_name}
Set the input feed. @var{feed_name} must correspond to an existing
feed defined in a @code{Feed} section.
When this option is set, encoding options are used to setup the
encoding operated by the remote @command{ffmpeg} process.
@item File @var{filename}
Set the filename of the pre-recorded input file to stream.
When this option is set, encoding options are ignored and the input
file content is re-streamed as is.
@item Format @var{format_name}
Set the format of the output stream.
Must be the name of a format recognized by FFmpeg. If set to
@samp{status}, it is treated as a status stream.
@item InputFormat @var{format_name}
Set input format. If not specified, it is automatically guessed.
@item Preroll @var{n}
Set this to the number of seconds backwards in time to start. Note that
most players will buffer 5-10 seconds of video, and also you need to allow
for a keyframe to appear in the data stream.
Default value is 0.
@item StartSendOnKey
Do not send stream until it gets the first key frame. By default
@command{ffserver} will send data immediately.
@item MaxTime @var{n}
Set the number of seconds to run. This value set the maximum duration
of the stream a client will be able to receive.
A value of 0 means that no limit is set on the stream duration.
@item ACL @var{spec}
Set ACL for the stream.
@item DynamicACL @var{spec}
@item RTSPOption @var{option}
@item MulticastAddress @var{address}
@item MulticastPort @var{port}
@item MulticastTTL @var{integer}
@item NoLoop
@item FaviconURL @var{url}
Set favicon (favourite icon) for the server status page. It is ignored
for regular streams.
@item Author @var{value}
@item Comment @var{value}
@item Copyright @var{value}
@item Title @var{value}
Set metadata corresponding to the option. All these options are
deprecated in favor of @option{Metadata}.
@item Metadata @var{key} @var{value}
Set metadata value on the output stream.
@item UseDefaults
@item NoDefaults
Control whether default codec options are used for the stream or not.
Default is @var{UseDefaults} unless disabled globally.
@item NoAudio
@item NoVideo
Suppress audio/video.
@item AudioCodec @var{codec_name} (@emph{encoding,audio})
Set audio codec.
@item AudioBitRate @var{rate} (@emph{encoding,audio})
Set bitrate for the audio stream in kbits per second.
@item AudioChannels @var{n} (@emph{encoding,audio})
Set number of audio channels.
@item AudioSampleRate @var{n} (@emph{encoding,audio})
Set sampling frequency for audio. When using low bitrates, you should
lower this frequency to 22050 or 11025. The supported frequencies
depend on the selected audio codec.
@item AVOptionAudio [@var{codec}:]@var{option} @var{value} (@emph{encoding,audio})
Set generic or private option for audio stream.
Private option must be prefixed with codec name or codec must be defined before.
@item AVPresetAudio @var{preset} (@emph{encoding,audio})
Set preset for audio stream.
@item VideoCodec @var{codec_name} (@emph{encoding,video})
Set video codec.
@item VideoBitRate @var{n} (@emph{encoding,video})
Set bitrate for the video stream in kbits per second.
@item VideoBitRateRange @var{range} (@emph{encoding,video})
Set video bitrate range.
A range must be specified in the form @var{minrate}-@var{maxrate}, and
specifies the @option{minrate} and @option{maxrate} encoding options
expressed in kbits per second.
@item VideoBitRateRangeTolerance @var{n} (@emph{encoding,video})
Set video bitrate tolerance in kbits per second.
@item PixelFormat @var{pixel_format} (@emph{encoding,video})
Set video pixel format.
@item Debug @var{integer} (@emph{encoding,video})
Set video @option{debug} encoding option.
@item Strict @var{integer} (@emph{encoding,video})
Set video @option{strict} encoding option.
@item VideoBufferSize @var{n} (@emph{encoding,video})
Set ratecontrol buffer size, expressed in KB.
@item VideoFrameRate @var{n} (@emph{encoding,video})
Set number of video frames per second.
@item VideoSize (@emph{encoding,video})
Set size of the video frame, must be an abbreviation or in the form
@var{W}x@var{H}. See @ref{video size syntax,,the Video size section
in the ffmpeg-utils(1) manual,ffmpeg-utils}.
Default value is @code{160x128}.
@item VideoIntraOnly (@emph{encoding,video})
Transmit only intra frames (useful for low bitrates, but kills frame rate).
@item VideoGopSize @var{n} (@emph{encoding,video})
If non-intra only, an intra frame is transmitted every VideoGopSize
frames. Video synchronization can only begin at an intra frame.
@item VideoTag @var{tag} (@emph{encoding,video})
Set video tag.
@item VideoHighQuality (@emph{encoding,video})
@item Video4MotionVector (@emph{encoding,video})
@item BitExact (@emph{encoding,video})
Set bitexact encoding flag.
@item IdctSimple (@emph{encoding,video})
Set simple IDCT algorithm.
@item Qscale @var{n} (@emph{encoding,video})
Enable constant quality encoding, and set video qscale (quantization
scale) value, expressed in @var{n} QP units.
@item VideoQMin @var{n} (@emph{encoding,video})
@item VideoQMax @var{n} (@emph{encoding,video})
Set video qmin/qmax.
@item VideoQDiff @var{integer} (@emph{encoding,video})
Set video @option{qdiff} encoding option.
@item LumiMask @var{float} (@emph{encoding,video})
@item DarkMask @var{float} (@emph{encoding,video})
Set @option{lumi_mask}/@option{dark_mask} encoding options.
@item AVOptionVideo [@var{codec}:]@var{option} @var{value} (@emph{encoding,video})
Set generic or private option for video stream.
Private option must be prefixed with codec name or codec must be defined before.
@item AVPresetVideo @var{preset} (@emph{encoding,video})
Set preset for video stream.
@var{preset} must be the path of a preset file.
@end table
@subsection Server status stream
A server status stream is a special stream which is used to show
statistics about the @command{ffserver} operations.
It must be specified setting the option @option{Format} to
@samp{status}.
@section Redirect section
A redirect section specifies where to redirect the requested URL to
another page.
A redirect section must be introduced by the line:
@example
<Redirect NAME>
@end example
where @var{NAME} is the name of the page which should be redirected.
It only accepts the option @option{URL}, which specify the redirection
URL.
@chapter Stream examples
@itemize
@item
Multipart JPEG
@example
<Stream test.mjpg>
Feed feed1.ffm
Format mpjpeg
VideoFrameRate 2
VideoIntraOnly
NoAudio
Strict -1
</Stream>
@end example
@item
Single JPEG
@example
<Stream test.jpg>
Feed feed1.ffm
Format jpeg
VideoFrameRate 2
VideoIntraOnly
VideoSize 352x240
NoAudio
Strict -1
</Stream>
@end example
@item
Flash
@example
<Stream test.swf>
Feed feed1.ffm
Format swf
VideoFrameRate 2
VideoIntraOnly
NoAudio
</Stream>
@end example
@item
ASF compatible
@example
<Stream test.asf>
Feed feed1.ffm
Format asf
VideoFrameRate 15
VideoSize 352x240
VideoBitRate 256
VideoBufferSize 40
VideoGopSize 30
AudioBitRate 64
StartSendOnKey
</Stream>
@end example
@item
MP3 audio
@example
<Stream test.mp3>
Feed feed1.ffm
Format mp2
AudioCodec mp3
AudioBitRate 64
AudioChannels 1
AudioSampleRate 44100
NoVideo
</Stream>
@end example
@item
Ogg Vorbis audio
@example
<Stream test.ogg>
Feed feed1.ffm
Metadata title "Stream title"
AudioBitRate 64
AudioChannels 2
AudioSampleRate 44100
NoVideo
</Stream>
@end example
@item
Real with audio only at 32 kbits
@example
<Stream test.ra>
Feed feed1.ffm
Format rm
AudioBitRate 32
NoVideo
</Stream>
@end example
@item
Real with audio and video at 64 kbits
@example
<Stream test.rm>
Feed feed1.ffm
Format rm
AudioBitRate 32
VideoBitRate 128
VideoFrameRate 25
VideoGopSize 25
</Stream>
@end example
@item
For stream coming from a file: you only need to set the input filename
and optionally a new format.
@example
<Stream file.rm>
File "/usr/local/httpd/htdocs/tlive.rm"
NoAudio
</Stream>
@end example
@example
<Stream file.asf>
File "/usr/local/httpd/htdocs/test.asf"
NoAudio
Metadata author "Me"
Metadata copyright "Super MegaCorp"
Metadata title "Test stream from disk"
Metadata comment "Test comment"
</Stream>
@end example
@end itemize
@c man end
@include config.texi
@ifset config-all
@ifset config-avutil
@include utils.texi
@end ifset
@ifset config-avcodec
@include codecs.texi
@include bitstream_filters.texi
@end ifset
@ifset config-avformat
@include formats.texi
@include protocols.texi
@end ifset
@ifset config-avdevice
@include devices.texi
@end ifset
@ifset config-swresample
@include resampler.texi
@end ifset
@ifset config-swscale
@include scaler.texi
@end ifset
@ifset config-avfilter
@include filters.texi
@end ifset
@end ifset
@chapter See Also
@ifhtml
@ifset config-all
@url{ffserver.html,ffserver},
@end ifset
@ifset config-not-all
@url{ffserver-all.html,ffserver-all},
@end ifset
the @file{doc/ffserver.conf} example,
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe},
@url{ffmpeg-utils.html,ffmpeg-utils},
@url{ffmpeg-scaler.html,ffmpeg-scaler},
@url{ffmpeg-resampler.html,ffmpeg-resampler},
@url{ffmpeg-codecs.html,ffmpeg-codecs},
@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters},
@url{ffmpeg-formats.html,ffmpeg-formats},
@url{ffmpeg-devices.html,ffmpeg-devices},
@url{ffmpeg-protocols.html,ffmpeg-protocols},
@url{ffmpeg-filters.html,ffmpeg-filters}
@end ifhtml
@ifnothtml
@ifset config-all
ffserver(1),
@end ifset
@ifset config-not-all
ffserver-all(1),
@end ifset
the @file{doc/ffserver.conf} example, ffmpeg(1), ffplay(1), ffprobe(1),
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
@end ifnothtml
@include authors.texi
@ignore
@setfilename ffserver
@settitle ffserver video server
@end ignore
@bye

View File

@@ -0,0 +1,389 @@
All the numerical options, if not specified otherwise, accept a string
representing a number as input, which may be followed by one of the SI
unit prefixes, for example: 'K', 'M', or 'G'.
If 'i' is appended to the SI unit prefix, the complete prefix will be
interpreted as a unit prefix for binary multiples, which are based on
powers of 1024 instead of powers of 1000. Appending 'B' to the SI unit
prefix multiplies the value by 8. This allows using, for example:
'KB', 'MiB', 'G' and 'B' as number suffixes.
Options which do not take arguments are boolean options, and set the
corresponding value to true. They can be set to false by prefixing
the option name with "no". For example using "-nofoo"
will set the boolean option with name "foo" to false.
@anchor{Stream specifiers}
@section Stream specifiers
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
are used to precisely specify which stream(s) a given option belongs to.
A stream specifier is a string generally appended to the option name and
separated from it by a colon. E.g. @code{-codec:a:1 ac3} contains the
@code{a:1} stream specifier, which matches the second audio stream. Therefore, it
would select the ac3 codec for the second audio stream.
A stream specifier can match several streams, so that the option is applied to all
of them. E.g. the stream specifier in @code{-b:a 128k} matches all audio
streams.
An empty stream specifier matches all streams. For example, @code{-codec copy}
or @code{-codec: copy} would copy all the streams without reencoding.
Possible forms of stream specifiers are:
@table @option
@item @var{stream_index}
Matches the stream with this index. E.g. @code{-threads:1 4} would set the
thread count for the second stream to 4.
@item @var{stream_type}[:@var{stream_index}]
@var{stream_type} is one of following: 'v' or 'V' for video, 'a' for audio, 's'
for subtitle, 'd' for data, and 't' for attachments. 'v' matches all video
streams, 'V' only matches video streams which are not attached pictures, video
thumbnails or cover arts. If @var{stream_index} is given, then it matches
stream number @var{stream_index} of this type. Otherwise, it matches all
streams of this type.
@item p:@var{program_id}[:@var{stream_index}]
If @var{stream_index} is given, then it matches the stream with number @var{stream_index}
in the program with the id @var{program_id}. Otherwise, it matches all streams in the
program.
@item #@var{stream_id} or i:@var{stream_id}
Match the stream by stream id (e.g. PID in MPEG-TS container).
@item m:@var{key}[:@var{value}]
Matches streams with the metadata tag @var{key} having the specified value. If
@var{value} is not given, matches streams that contain the given tag with any
value.
@item u
Matches streams with usable configuration, the codec must be defined and the
essential information such as video dimension or audio sample rate must be present.
Note that in @command{ffmpeg}, matching by metadata will only work properly for
input files.
@end table
@section Generic options
These options are shared amongst the ff* tools.
@table @option
@item -L
Show license.
@item -h, -?, -help, --help [@var{arg}]
Show help. An optional parameter may be specified to print help about a specific
item. If no argument is specified, only basic (non advanced) tool
options are shown.
Possible values of @var{arg} are:
@table @option
@item long
Print advanced tool options in addition to the basic tool options.
@item full
Print complete list of options, including shared and private options
for encoders, decoders, demuxers, muxers, filters, etc.
@item decoder=@var{decoder_name}
Print detailed information about the decoder named @var{decoder_name}. Use the
@option{-decoders} option to get a list of all decoders.
@item encoder=@var{encoder_name}
Print detailed information about the encoder named @var{encoder_name}. Use the
@option{-encoders} option to get a list of all encoders.
@item demuxer=@var{demuxer_name}
Print detailed information about the demuxer named @var{demuxer_name}. Use the
@option{-formats} option to get a list of all demuxers and muxers.
@item muxer=@var{muxer_name}
Print detailed information about the muxer named @var{muxer_name}. Use the
@option{-formats} option to get a list of all muxers and demuxers.
@item filter=@var{filter_name}
Print detailed information about the filter name @var{filter_name}. Use the
@option{-filters} option to get a list of all filters.
@end table
@item -version
Show version.
@item -formats
Show available formats (including devices).
@item -devices
Show available devices.
@item -codecs
Show all codecs known to libavcodec.
Note that the term 'codec' is used throughout this documentation as a shortcut
for what is more correctly called a media bitstream format.
@item -decoders
Show available decoders.
@item -encoders
Show all available encoders.
@item -bsfs
Show available bitstream filters.
@item -protocols
Show available protocols.
@item -filters
Show available libavfilter filters.
@item -pix_fmts
Show available pixel formats.
@item -sample_fmts
Show available sample formats.
@item -layouts
Show channel names and standard channel layouts.
@item -colors
Show recognized color names.
@item -sources @var{device}[,@var{opt1}=@var{val1}[,@var{opt2}=@var{val2}]...]
Show autodetected sources of the intput device.
Some devices may provide system-dependent source names that cannot be autodetected.
The returned list cannot be assumed to be always complete.
@example
ffmpeg -sources pulse,server=192.168.0.4
@end example
@item -sinks @var{device}[,@var{opt1}=@var{val1}[,@var{opt2}=@var{val2}]...]
Show autodetected sinks of the output device.
Some devices may provide system-dependent sink names that cannot be autodetected.
The returned list cannot be assumed to be always complete.
@example
ffmpeg -sinks pulse,server=192.168.0.4
@end example
@item -loglevel [repeat+]@var{loglevel} | -v [repeat+]@var{loglevel}
Set the logging level used by the library.
Adding "repeat+" indicates that repeated log output should not be compressed
to the first line and the "Last message repeated n times" line will be
omitted. "repeat" can also be used alone.
If "repeat" is used alone, and with no prior loglevel set, the default
loglevel will be used. If multiple loglevel parameters are given, using
'repeat' will not change the loglevel.
@var{loglevel} is a string or a number containing one of the following values:
@table @samp
@item quiet, -8
Show nothing at all; be silent.
@item panic, 0
Only show fatal errors which could lead the process to crash, such as
and assert failure. This is not currently used for anything.
@item fatal, 8
Only show fatal errors. These are errors after which the process absolutely
cannot continue after.
@item error, 16
Show all errors, including ones which can be recovered from.
@item warning, 24
Show all warnings and errors. Any message related to possibly
incorrect or unexpected events will be shown.
@item info, 32
Show informative messages during processing. This is in addition to
warnings and errors. This is the default value.
@item verbose, 40
Same as @code{info}, except more verbose.
@item debug, 48
Show everything, including debugging information.
@item trace, 56
@end table
By default the program logs to stderr, if coloring is supported by the
terminal, colors are used to mark errors and warnings. Log coloring
can be disabled setting the environment variable
@env{AV_LOG_FORCE_NOCOLOR} or @env{NO_COLOR}, or can be forced setting
the environment variable @env{AV_LOG_FORCE_COLOR}.
The use of the environment variable @env{NO_COLOR} is deprecated and
will be dropped in a following FFmpeg version.
@item -report
Dump full command line and console output to a file named
@code{@var{program}-@var{YYYYMMDD}-@var{HHMMSS}.log} in the current
directory.
This file can be useful for bug reports.
It also implies @code{-loglevel verbose}.
Setting the environment variable @env{FFREPORT} to any value has the
same effect. If the value is a ':'-separated key=value sequence, these
options will affect the report; option values must be escaped if they
contain special characters or the options delimiter ':' (see the
``Quoting and escaping'' section in the ffmpeg-utils manual).
The following options are recognized:
@table @option
@item file
set the file name to use for the report; @code{%p} is expanded to the name
of the program, @code{%t} is expanded to a timestamp, @code{%%} is expanded
to a plain @code{%}
@item level
set the log verbosity level using a numerical value (see @code{-loglevel}).
@end table
For example, to output a report to a file named @file{ffreport.log}
using a log level of @code{32} (alias for log level @code{info}):
@example
FFREPORT=file=ffreport.log:level=32 ffmpeg -i input output
@end example
Errors in parsing the environment variable are not fatal, and will not
appear in the report.
@item -hide_banner
Suppress printing banner.
All FFmpeg tools will normally show a copyright notice, build options
and library versions. This option can be used to suppress printing
this information.
@item -cpuflags flags (@emph{global})
Allows setting and clearing cpu flags. This option is intended
for testing. Do not use it unless you know what you're doing.
@example
ffmpeg -cpuflags -sse+mmx ...
ffmpeg -cpuflags mmx ...
ffmpeg -cpuflags 0 ...
@end example
Possible flags for this option are:
@table @samp
@item x86
@table @samp
@item mmx
@item mmxext
@item sse
@item sse2
@item sse2slow
@item sse3
@item sse3slow
@item ssse3
@item atom
@item sse4.1
@item sse4.2
@item avx
@item avx2
@item xop
@item fma3
@item fma4
@item 3dnow
@item 3dnowext
@item bmi1
@item bmi2
@item cmov
@end table
@item ARM
@table @samp
@item armv5te
@item armv6
@item armv6t2
@item vfp
@item vfpv3
@item neon
@item setend
@end table
@item AArch64
@table @samp
@item armv8
@item vfp
@item neon
@end table
@item PowerPC
@table @samp
@item altivec
@end table
@item Specific Processors
@table @samp
@item pentium2
@item pentium3
@item pentium4
@item k6
@item k62
@item athlon
@item athlonxp
@item k8
@end table
@end table
@item -opencl_bench
This option is used to benchmark all available OpenCL devices and print the
results. This option is only available when FFmpeg has been compiled with
@code{--enable-opencl}.
When FFmpeg is configured with @code{--enable-opencl}, the options for the
global OpenCL context are set via @option{-opencl_options}. See the
"OpenCL Options" section in the ffmpeg-utils manual for the complete list of
supported options. Amongst others, these options include the ability to select
a specific platform and device to run the OpenCL code on. By default, FFmpeg
will run on the first device of the first platform. While the options for the
global OpenCL context provide flexibility to the user in selecting the OpenCL
device of their choice, most users would probably want to select the fastest
OpenCL device for their system.
This option assists the selection of the most efficient configuration by
identifying the appropriate device for the user's system. The built-in
benchmark is run on all the OpenCL devices and the performance is measured for
each device. The devices in the results list are sorted based on their
performance with the fastest device listed first. The user can subsequently
invoke @command{ffmpeg} using the device deemed most appropriate via
@option{-opencl_options} to obtain the best performance for the OpenCL
accelerated code.
Typical usage to use the fastest OpenCL device involve the following steps.
Run the command:
@example
ffmpeg -opencl_bench
@end example
Note down the platform ID (@var{pidx}) and device ID (@var{didx}) of the first
i.e. fastest device in the list.
Select the platform and device using the command:
@example
ffmpeg -opencl_options platform_idx=@var{pidx}:device_idx=@var{didx} ...
@end example
@item -opencl_options options (@emph{global})
Set OpenCL environment options. This option is only available when
FFmpeg has been compiled with @code{--enable-opencl}.
@var{options} must be a list of @var{key}=@var{value} option pairs
separated by ':'. See the ``OpenCL Options'' section in the
ffmpeg-utils manual for the list of supported options.
@end table
@section AVOptions
These options are provided directly by the libavformat, libavdevice and
libavcodec libraries. To see the list of available AVOptions, use the
@option{-help} option. They are separated into two categories:
@table @option
@item generic
These options can be set for any container, codec or device. Generic options
are listed under AVFormatContext options for containers/devices and under
AVCodecContext options for codecs.
@item private
These options are specific to the given container, device or codec. Private
options are listed under their corresponding containers/devices/codecs.
@end table
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
an MP3 file, use the @option{id3v2_version} private option of the MP3
muxer:
@example
ffmpeg -i input.flac -id3v2_version 3 out.mp3
@end example
All codec AVOptions are per-stream, and thus a stream specifier
should be attached to them.
Note: the @option{-nooption} syntax cannot be used for boolean
AVOptions, use @option{-option 0}/@option{-option 1}.
Note: the old undocumented way of specifying per-stream AVOptions by
prepending v/a/s to the options name is now obsolete and will be
removed soon.

View File

@@ -0,0 +1,270 @@
Filter design
=============
This document explains guidelines that should be observed (or ignored with
good reason) when writing filters for libavfilter.
In this document, the word “frame” indicates either a video frame or a group
of audio samples, as stored in an AVFilterBuffer structure.
Format negotiation
==================
The query_formats method should set, for each input and each output links,
the list of supported formats.
For video links, that means pixel format. For audio links, that means
channel layout, sample format (the sample packing is implied by the sample
format) and sample rate.
The lists are not just lists, they are references to shared objects. When
the negotiation mechanism computes the intersection of the formats
supported at each end of a link, all references to both lists are replaced
with a reference to the intersection. And when a single format is
eventually chosen for a link amongst the remaining list, again, all
references to the list are updated.
That means that if a filter requires that its input and output have the
same format amongst a supported list, all it has to do is use a reference
to the same list of formats.
query_formats can leave some formats unset and return AVERROR(EAGAIN) to
cause the negotiation mechanism to try again later. That can be used by
filters with complex requirements to use the format negotiated on one link
to set the formats supported on another.
Buffer references ownership and permissions
===========================================
Principle
---------
Audio and video data are voluminous; the buffer and buffer reference
mechanism is intended to avoid, as much as possible, expensive copies of
that data while still allowing the filters to produce correct results.
The data is stored in buffers represented by AVFilterBuffer structures.
They must not be accessed directly, but through references stored in
AVFilterBufferRef structures. Several references can point to the
same buffer; the buffer is automatically deallocated once all
corresponding references have been destroyed.
The characteristics of the data (resolution, sample rate, etc.) are
stored in the reference; different references for the same buffer can
show different characteristics. In particular, a video reference can
point to only a part of a video buffer.
A reference is usually obtained as input to the start_frame or
filter_frame method or requested using the ff_get_video_buffer or
ff_get_audio_buffer functions. A new reference on an existing buffer can
be created with the avfilter_ref_buffer. A reference is destroyed using
the avfilter_unref_bufferp function.
Reference ownership
-------------------
At any time, a reference “belongs” to a particular piece of code,
usually a filter. With a few caveats that will be explained below, only
that piece of code is allowed to access it. It is also responsible for
destroying it, although this is sometimes done automatically (see the
section on link reference fields).
Here are the (fairly obvious) rules for reference ownership:
* A reference received by the filter_frame method (or its start_frame
deprecated version) belongs to the corresponding filter.
Special exception: for video references: the reference may be used
internally for automatic copying and must not be destroyed before
end_frame; it can be given away to ff_start_frame.
* A reference passed to ff_filter_frame (or the deprecated
ff_start_frame) is given away and must no longer be used.
* A reference created with avfilter_ref_buffer belongs to the code that
created it.
* A reference obtained with ff_get_video_buffer or ff_get_audio_buffer
belongs to the code that requested it.
* A reference given as return value by the get_video_buffer or
get_audio_buffer method is given away and must no longer be used.
Link reference fields
---------------------
The AVFilterLink structure has a few AVFilterBufferRef fields. The
cur_buf and out_buf were used with the deprecated
start_frame/draw_slice/end_frame API and should no longer be used.
src_buf and partial_buf are used by libavfilter internally
and must not be accessed by filters.
Reference permissions
---------------------
The AVFilterBufferRef structure has a perms field that describes what
the code that owns the reference is allowed to do to the buffer data.
Different references for the same buffer can have different permissions.
For video filters that implement the deprecated
start_frame/draw_slice/end_frame API, the permissions only apply to the
parts of the buffer that have already been covered by the draw_slice
method.
The value is a binary OR of the following constants:
* AV_PERM_READ: the owner can read the buffer data; this is essentially
always true and is there for self-documentation.
* AV_PERM_WRITE: the owner can modify the buffer data.
* AV_PERM_PRESERVE: the owner can rely on the fact that the buffer data
will not be modified by previous filters.
* AV_PERM_REUSE: the owner can output the buffer several times, without
modifying the data in between.
* AV_PERM_REUSE2: the owner can output the buffer several times and
modify the data in between (useless without the WRITE permissions).
* AV_PERM_ALIGN: the owner can access the data using fast operations
that require data alignment.
The READ, WRITE and PRESERVE permissions are about sharing the same
buffer between several filters to avoid expensive copies without them
doing conflicting changes on the data.
The REUSE and REUSE2 permissions are about special memory for direct
rendering. For example a buffer directly allocated in video memory must
not modified once it is displayed on screen, or it will cause tearing;
it will therefore not have the REUSE2 permission.
The ALIGN permission is about extracting part of the buffer, for
copy-less padding or cropping for example.
References received on input pads are guaranteed to have all the
permissions stated in the min_perms field and none of the permissions
stated in the rej_perms.
References obtained by ff_get_video_buffer and ff_get_audio_buffer are
guaranteed to have at least all the permissions requested as argument.
References created by avfilter_ref_buffer have the same permissions as
the original reference minus the ones explicitly masked; the mask is
usually ~0 to keep the same permissions.
Filters should remove permissions on reference they give to output
whenever necessary. It can be automatically done by setting the
rej_perms field on the output pad.
Here are a few guidelines corresponding to common situations:
* Filters that modify and forward their frame (like drawtext) need the
WRITE permission.
* Filters that read their input to produce a new frame on output (like
scale) need the READ permission on input and must request a buffer
with the WRITE permission.
* Filters that intend to keep a reference after the filtering process
is finished (after filter_frame returns) must have the PRESERVE
permission on it and remove the WRITE permission if they create a new
reference to give it away.
* Filters that intend to modify a reference they have kept after the end
of the filtering process need the REUSE2 permission and must remove
the PRESERVE permission if they create a new reference to give it
away.
Frame scheduling
================
The purpose of these rules is to ensure that frames flow in the filter
graph without getting stuck and accumulating somewhere.
Simple filters that output one frame for each input frame should not have
to worry about it.
filter_frame
------------
This method is called when a frame is pushed to the filter's input. It
can be called at any time except in a reentrant way.
If the input frame is enough to produce output, then the filter should
push the output frames on the output link immediately.
As an exception to the previous rule, if the input frame is enough to
produce several output frames, then the filter needs output only at
least one per link. The additional frames can be left buffered in the
filter; these buffered frames must be flushed immediately if a new input
produces new output.
(Example: frame rate-doubling filter: filter_frame must (1) flush the
second copy of the previous frame, if it is still there, (2) push the
first copy of the incoming frame, (3) keep the second copy for later.)
If the input frame is not enough to produce output, the filter must not
call request_frame to get more. It must just process the frame or queue
it. The task of requesting more frames is left to the filter's
request_frame method or the application.
If a filter has several inputs, the filter must be ready for frames
arriving randomly on any input. Therefore, any filter with several inputs
will most likely require some kind of queuing mechanism. It is perfectly
acceptable to have a limited queue and to drop frames when the inputs
are too unbalanced.
request_frame
-------------
This method is called when a frame is wanted on an output.
For an input, it should directly call filter_frame on the corresponding
output.
For a filter, if there are queued frames already ready, one of these
frames should be pushed. If not, the filter should request a frame on
one of its inputs, repeatedly until at least one frame has been pushed.
Return values:
if request_frame could produce a frame, it should return 0;
if it could not for temporary reasons, it should return AVERROR(EAGAIN);
if it could not because there are no more frames, it should return
AVERROR_EOF.
The typical implementation of request_frame for a filter with several
inputs will look like that:
if (frames_queued) {
push_one_frame();
return 0;
}
while (!frame_pushed) {
input = input_where_a_frame_is_most_needed();
ret = ff_request_frame(input);
if (ret == AVERROR_EOF) {
process_eof_on_input();
} else if (ret < 0) {
return ret;
}
}
return 0;
Note that, except for filters that can have queued frames, request_frame
does not push frames: it requests them to its input, and as a reaction,
the filter_frame method will be called and do the work.
Legacy API
==========
Until libavfilter 3.23, the filter_frame method was split:
- for video filters, it was made of start_frame, draw_slice (that could be
called several times on distinct parts of the frame) and end_frame;
- for audio filters, it was called filter_samples.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,249 @@
@chapter Format Options
@c man begin FORMAT OPTIONS
The libavformat library provides some generic global options, which
can be set on all the muxers and demuxers. In addition each muxer or
demuxer may support so-called private options, which are specific for
that component.
Options may be set by specifying -@var{option} @var{value} in the
FFmpeg tools, or by setting the value explicitly in the
@code{AVFormatContext} options or using the @file{libavutil/opt.h} API
for programmatic use.
The list of supported options follows:
@table @option
@item avioflags @var{flags} (@emph{input/output})
Possible values:
@table @samp
@item direct
Reduce buffering.
@end table
@item probesize @var{integer} (@emph{input})
Set probing size in bytes, i.e. the size of the data to analyze to get
stream information. A higher value will enable detecting more
information in case it is dispersed into the stream, but will increase
latency. Must be an integer not lesser than 32. It is 5000000 by default.
@item packetsize @var{integer} (@emph{output})
Set packet size.
@item fflags @var{flags} (@emph{input/output})
Set format flags.
Possible values:
@table @samp
@item ignidx
Ignore index.
@item fastseek
Enable fast, but inaccurate seeks for some formats.
@item genpts
Generate PTS.
@item nofillin
Do not fill in missing values that can be exactly calculated.
@item noparse
Disable AVParsers, this needs @code{+nofillin} too.
@item igndts
Ignore DTS.
@item discardcorrupt
Discard corrupted frames.
@item sortdts
Try to interleave output packets by DTS.
@item keepside
Do not merge side data.
@item latm
Enable RTP MP4A-LATM payload.
@item nobuffer
Reduce the latency introduced by optional buffering
@item bitexact
Only write platform-, build- and time-independent data.
This ensures that file and data checksums are reproducible and match between
platforms. Its primary use is for regression testing.
@end table
@item seek2any @var{integer} (@emph{input})
Allow seeking to non-keyframes on demuxer level when supported if set to 1.
Default is 0.
@item analyzeduration @var{integer} (@emph{input})
Specify how many microseconds are analyzed to probe the input. A
higher value will enable detecting more accurate information, but will
increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
@item cryptokey @var{hexadecimal string} (@emph{input})
Set decryption key.
@item indexmem @var{integer} (@emph{input})
Set max memory used for timestamp index (per stream).
@item rtbufsize @var{integer} (@emph{input})
Set max memory used for buffering real-time frames.
@item fdebug @var{flags} (@emph{input/output})
Print specific debug info.
Possible values:
@table @samp
@item ts
@end table
@item max_delay @var{integer} (@emph{input/output})
Set maximum muxing or demuxing delay in microseconds.
@item fpsprobesize @var{integer} (@emph{input})
Set number of frames used to probe fps.
@item audio_preload @var{integer} (@emph{output})
Set microseconds by which audio packets should be interleaved earlier.
@item chunk_duration @var{integer} (@emph{output})
Set microseconds for each chunk.
@item chunk_size @var{integer} (@emph{output})
Set size in bytes for each chunk.
@item err_detect, f_err_detect @var{flags} (@emph{input})
Set error detection flags. @code{f_err_detect} is deprecated and
should be used only via the @command{ffmpeg} tool.
Possible values:
@table @samp
@item crccheck
Verify embedded CRCs.
@item bitstream
Detect bitstream specification deviations.
@item buffer
Detect improper bitstream length.
@item explode
Abort decoding on minor error detection.
@item careful
Consider things that violate the spec and have not been seen in the
wild as errors.
@item compliant
Consider all spec non compliancies as errors.
@item aggressive
Consider things that a sane encoder should not do as an error.
@end table
@item max_interleave_delta @var{integer} (@emph{output})
Set maximum buffering duration for interleaving. The duration is
expressed in microseconds, and defaults to 1000000 (1 second).
To ensure all the streams are interleaved correctly, libavformat will
wait until it has at least one packet for each stream before actually
writing any packets to the output file. When some streams are
"sparse" (i.e. there are large gaps between successive packets), this
can result in excessive buffering.
This field specifies the maximum difference between the timestamps of the
first and the last packet in the muxing queue, above which libavformat
will output a packet regardless of whether it has queued a packet for all
the streams.
If set to 0, libavformat will continue buffering packets until it has
a packet for each stream, regardless of the maximum timestamp
difference between the buffered packets.
@item use_wallclock_as_timestamps @var{integer} (@emph{input})
Use wallclock as timestamps.
@item avoid_negative_ts @var{integer} (@emph{output})
Possible values:
@table @samp
@item make_non_negative
Shift timestamps to make them non-negative.
Also note that this affects only leading negative timestamps, and not
non-monotonic negative timestamps.
@item make_zero
Shift timestamps so that the first timestamp is 0.
@item auto (default)
Enables shifting when required by the target format.
@item disabled
Disables shifting of timestamp.
@end table
When shifting is enabled, all output timestamps are shifted by the
same amount. Audio, video, and subtitles desynching and relative
timestamp differences are preserved compared to how they would have
been without shifting.
@item skip_initial_bytes @var{integer} (@emph{input})
Set number of bytes to skip before reading header and frames if set to 1.
Default is 0.
@item correct_ts_overflow @var{integer} (@emph{input})
Correct single timestamp overflows if set to 1. Default is 1.
@item flush_packets @var{integer} (@emph{output})
Flush the underlying I/O stream after each packet. Default 1 enables it, and
has the effect of reducing the latency; 0 disables it and may slightly
increase performance in some cases.
@item output_ts_offset @var{offset} (@emph{output})
Set the output time offset.
@var{offset} must be a time duration specification,
see @ref{time duration syntax,,the Time duration section in the ffmpeg-utils(1) manual,ffmpeg-utils}.
The offset is added by the muxer to the output timestamps.
Specifying a positive offset means that the corresponding streams are
delayed bt the time duration specified in @var{offset}. Default value
is @code{0} (meaning that no offset is applied).
@item format_whitelist @var{list} (@emph{input})
"," separated List of allowed demuxers. By default all are allowed.
@item dump_separator @var{string} (@emph{input})
Separator used to separate the fields printed on the command line about the
Stream parameters.
For example to separate the fields with newlines and indention:
@example
ffprobe -dump_separator "
" -i ~/videos/matrixbench_mpeg2.mpg
@end example
@end table
@c man end FORMAT OPTIONS
@anchor{Format stream specifiers}
@section Format stream specifiers
Format stream specifiers allow selection of one or more streams that
match specific properties.
Possible forms of stream specifiers are:
@table @option
@item @var{stream_index}
Matches the stream with this index.
@item @var{stream_type}[:@var{stream_index}]
@var{stream_type} is one of following: 'v' for video, 'a' for audio,
's' for subtitle, 'd' for data, and 't' for attachments. If
@var{stream_index} is given, then it matches the stream number
@var{stream_index} of this type. Otherwise, it matches all streams of
this type.
@item p:@var{program_id}[:@var{stream_index}]
If @var{stream_index} is given, then it matches the stream with number
@var{stream_index} in the program with the id
@var{program_id}. Otherwise, it matches all streams in the program.
@item #@var{stream_id}
Matches the stream by a format-specific ID.
@end table
The exact semantics of stream specifiers is defined by the
@code{avformat_match_stream_specifier()} function declared in the
@file{libavformat/avformat.h} header.
@ifclear config-writeonly
@include demuxers.texi
@end ifclear
@ifclear config-readonly
@include muxers.texi
@end ifclear
@include metadata.texi

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,420 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle Using Git to develop FFmpeg
@titlepage
@center @titlefont{Using Git to develop FFmpeg}
@end titlepage
@top
@contents
@chapter Introduction
This document aims in giving some quick references on a set of useful Git
commands. You should always use the extensive and detailed documentation
provided directly by Git:
@example
git --help
man git
@end example
shows you the available subcommands,
@example
git <command> --help
man git-<command>
@end example
shows information about the subcommand <command>.
Additional information could be found on the
@url{http://gitref.org, Git Reference} website.
For more information about the Git project, visit the
@url{http://git-scm.com/, Git website}.
Consult these resources whenever you have problems, they are quite exhaustive.
What follows now is a basic introduction to Git and some FFmpeg-specific
guidelines to ease the contribution to the project.
@chapter Basics Usage
@section Get Git
You can get Git from @url{http://git-scm.com/}
Most distribution and operating system provide a package for it.
@section Cloning the source tree
@example
git clone git://source.ffmpeg.org/ffmpeg <target>
@end example
This will put the FFmpeg sources into the directory @var{<target>}.
@example
git clone git@@source.ffmpeg.org:ffmpeg <target>
@end example
This will put the FFmpeg sources into the directory @var{<target>} and let
you push back your changes to the remote repository.
Make sure that you do not have Windows line endings in your checkouts,
otherwise you may experience spurious compilation failures. One way to
achieve this is to run
@example
git config --global core.autocrlf false
@end example
@anchor{Updating the source tree to the latest revision}
@section Updating the source tree to the latest revision
@example
git pull (--rebase)
@end example
pulls in the latest changes from the tracked branch. The tracked branch
can be remote. By default the master branch tracks the branch master in
the remote origin.
@float IMPORTANT
@command{--rebase} (see below) is recommended.
@end float
@section Rebasing your local branches
@example
git pull --rebase
@end example
fetches the changes from the main repository and replays your local commits
over it. This is required to keep all your local changes at the top of
FFmpeg's master tree. The master tree will reject pushes with merge commits.
@section Adding/removing files/directories
@example
git add [-A] <filename/dirname>
git rm [-r] <filename/dirname>
@end example
Git needs to get notified of all changes you make to your working
directory that makes files appear or disappear.
Line moves across files are automatically tracked.
@section Showing modifications
@example
git diff <filename(s)>
@end example
will show all local modifications in your working directory as unified diff.
@section Inspecting the changelog
@example
git log <filename(s)>
@end example
You may also use the graphical tools like @command{gitview} or @command{gitk}
or the web interface available at @url{http://source.ffmpeg.org/}.
@section Checking source tree status
@example
git status
@end example
detects all the changes you made and lists what actions will be taken in case
of a commit (additions, modifications, deletions, etc.).
@section Committing
@example
git diff --check
@end example
to double check your changes before committing them to avoid trouble later
on. All experienced developers do this on each and every commit, no matter
how small.
Every one of them has been saved from looking like a fool by this many times.
It's very easy for stray debug output or cosmetic modifications to slip in,
please avoid problems through this extra level of scrutiny.
For cosmetics-only commits you should get (almost) empty output from
@example
git diff -w -b <filename(s)>
@end example
Also check the output of
@example
git status
@end example
to make sure you don't have untracked files or deletions.
@example
git add [-i|-p|-A] <filenames/dirnames>
@end example
Make sure you have told Git your name and email address
@example
git config --global user.name "My Name"
git config --global user.email my@@email.invalid
@end example
Use @option{--global} to set the global configuration for all your Git checkouts.
Git will select the changes to the files for commit. Optionally you can use
the interactive or the patch mode to select hunk by hunk what should be
added to the commit.
@example
git commit
@end example
Git will commit the selected changes to your current local branch.
You will be prompted for a log message in an editor, which is either
set in your personal configuration file through
@example
git config --global core.editor
@end example
or set by one of the following environment variables:
@var{GIT_EDITOR}, @var{VISUAL} or @var{EDITOR}.
Log messages should be concise but descriptive. Explain why you made a change,
what you did will be obvious from the changes themselves most of the time.
Saying just "bug fix" or "10l" is bad. Remember that people of varying skill
levels look at and educate themselves while reading through your code. Don't
include filenames in log messages, Git provides that information.
Possibly make the commit message have a terse, descriptive first line, an
empty line and then a full description. The first line will be used to name
the patch by @command{git format-patch}.
@section Preparing a patchset
@example
git format-patch <commit> [-o directory]
@end example
will generate a set of patches for each commit between @var{<commit>} and
current @var{HEAD}. E.g.
@example
git format-patch origin/master
@end example
will generate patches for all commits on current branch which are not
present in upstream.
A useful shortcut is also
@example
git format-patch -n
@end example
which will generate patches from last @var{n} commits.
By default the patches are created in the current directory.
@section Sending patches for review
@example
git send-email <commit list|directory>
@end example
will send the patches created by @command{git format-patch} or directly
generates them. All the email fields can be configured in the global/local
configuration or overridden by command line.
Note that this tool must often be installed separately (e.g. @var{git-email}
package on Debian-based distros).
@section Renaming/moving/copying files or contents of files
Git automatically tracks such changes, making those normal commits.
@example
mv/cp path/file otherpath/otherfile
git add [-A] .
git commit
@end example
@chapter Git configuration
In order to simplify a few workflows, it is advisable to configure both
your personal Git installation and your local FFmpeg repository.
@section Personal Git installation
Add the following to your @file{~/.gitconfig} to help @command{git send-email}
and @command{git format-patch} detect renames:
@example
[diff]
renames = copy
@end example
@section Repository configuration
In order to have @command{git send-email} automatically send patches
to the ffmpeg-devel mailing list, add the following stanza
to @file{/path/to/ffmpeg/repository/.git/config}:
@example
[sendemail]
to = ffmpeg-devel@@ffmpeg.org
@end example
@chapter FFmpeg specific
@section Reverting broken commits
@example
git reset <commit>
@end example
@command{git reset} will uncommit the changes till @var{<commit>} rewriting
the current branch history.
@example
git commit --amend
@end example
allows one to amend the last commit details quickly.
@example
git rebase -i origin/master
@end example
will replay local commits over the main repository allowing to edit, merge
or remove some of them in the process.
@float NOTE
@command{git reset}, @command{git commit --amend} and @command{git rebase}
rewrite history, so you should use them ONLY on your local or topic branches.
The main repository will reject those changes.
@end float
@example
git revert <commit>
@end example
@command{git revert} will generate a revert commit. This will not make the
faulty commit disappear from the history.
@section Pushing changes to remote trees
@example
git push origin master --dry-run
@end example
Will simulate a push of the local master branch to the default remote
(@var{origin}). And list which branches and ranges or commits would have been
pushed.
Git will prevent you from pushing changes if the local and remote trees are
out of sync. Refer to @ref{Updating the source tree to the latest revision}.
@example
git remote add <name> <url>
@end example
Will add additional remote with a name reference, it is useful if you want
to push your local branch for review on a remote host.
@example
git push <remote> <refspec>
@end example
Will push the changes to the @var{<remote>} repository.
Omitting @var{<refspec>} makes @command{git push} update all the remote
branches matching the local ones.
@section Finding a specific svn revision
Since version 1.7.1 Git supports @samp{:/foo} syntax for specifying commits
based on a regular expression. see man gitrevisions
@example
git show :/'as revision 23456'
@end example
will show the svn changeset @samp{r23456}. With older Git versions searching in
the @command{git log} output is the easiest option (especially if a pager with
search capabilities is used).
This commit can be checked out with
@example
git checkout -b svn_23456 :/'as revision 23456'
@end example
or for Git < 1.7.1 with
@example
git checkout -b svn_23456 $SHA1
@end example
where @var{$SHA1} is the commit hash from the @command{git log} output.
@chapter Pre-push checklist
Once you have a set of commits that you feel are ready for pushing,
work through the following checklist to doublecheck everything is in
proper order. This list tries to be exhaustive. In case you are just
pushing a typo in a comment, some of the steps may be unnecessary.
Apply your common sense, but if in doubt, err on the side of caution.
First, make sure that the commits and branches you are going to push
match what you want pushed and that nothing is missing, extraneous or
wrong. You can see what will be pushed by running the git push command
with @option{--dry-run} first. And then inspecting the commits listed with
@command{git log -p 1234567..987654}. The @command{git status} command
may help in finding local changes that have been forgotten to be added.
Next let the code pass through a full run of our testsuite.
@itemize
@item @command{make distclean}
@item @command{/path/to/ffmpeg/configure}
@item @command{make fate}
@item if fate fails due to missing samples run @command{make fate-rsync} and retry
@end itemize
Make sure all your changes have been checked before pushing them, the
testsuite only checks against regressions and that only to some extend. It does
obviously not check newly added features/code to be working unless you have
added a test for that (which is recommended).
Also note that every single commit should pass the test suite, not just
the result of a series of patches.
Once everything passed, push the changes to your public ffmpeg clone and post a
merge request to ffmpeg-devel. You can also push them directly but this is not
recommended.
@chapter Server Issues
Contact the project admins at @email{root@@ffmpeg.org} if you have technical
problems with the Git server.

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,194 @@
FFmpeg's bug/feature request tracker manual
=================================================
NOTE: This is a draft.
Overview:
---------
FFmpeg uses Trac for tracking issues, new issues and changes to
existing issues can be done through a web interface.
Issues can be different kinds of things we want to keep track of
but that do not belong into the source tree itself. This includes
bug reports, feature requests and license violations. We
might add more items to this list in the future, so feel free to
propose a new `type of issue' on the ffmpeg-devel mailing list if
you feel it is worth tracking.
It is possible to subscribe to individual issues by adding yourself to the
Cc list or to subscribe to the ffmpeg-trac mailing list which receives
a mail for every change to every issue.
(the above does all work already after light testing)
The subscription URL for the ffmpeg-trac list is:
http(s)://lists.ffmpeg.org/mailman/listinfo/ffmpeg-trac
The URL of the webinterface of the tracker is:
http(s)://trac.ffmpeg.org
Type:
-----
art
Artwork such as photos, music, banners, and logos.
bug / defect
An error, flaw, mistake, failure, or fault in FFmpeg or libav* that
prevents it from behaving as intended.
feature request / enhancement
Request of support for encoding or decoding of a new codec, container
or variant.
Request of support for more, less or plain different output or behavior
where the current implementation cannot be considered wrong.
license violation
ticket to keep track of (L)GPL violations of ffmpeg by others
sponsoring request
Developer requests for hardware, software, specifications, money,
refunds, etc.
Priority:
---------
critical
Bugs about data loss and security issues.
No feature request can be critical.
important
Bugs which make FFmpeg unusable for a significant number of users.
Examples here might be completely broken MPEG-4 decoding or a build issue
on Linux.
While broken 4xm decoding or a broken OS/2 build would not be important,
the separation to normal is somewhat fuzzy.
For feature requests this priority would be used for things many people
want.
Regressions also should be marked as important, regressions are bugs that
don't exist in a past revision or another branch.
normal
minor
Bugs about things like spelling errors, "mp2" instead of
"mp3" being shown and such.
Feature requests about things few people want or which do not make a big
difference.
wish
Something that is desirable to have but that there is no urgency at
all to implement, e.g. something completely cosmetic like a website
restyle or a personalized doxy template or the FFmpeg logo.
This priority is not valid for bugs.
Status:
-------
new
initial state
open
intermediate states
closed
final state
Analyzed flag:
--------------
Bugs which have been analyzed and where it is understood what causes them
and which exact chain of events triggers them. This analysis should be
available as a message in the bug report.
Note, do not change the status to analyzed without also providing a clear
and understandable analysis.
This state implicates that the bug either has been reproduced or that
reproduction is not needed as the bug is already understood.
Type/Status:
----------
*/new
Initial state of new bugs and feature requests submitted by
users.
*/open
Issues which have been briefly looked at and which did not look outright
invalid.
This implicates that no real more detailed state applies yet. Conversely,
the more detailed states below implicate that the issue has been briefly
looked at.
*/closed/duplicate
Bugs or feature requests which are duplicates.
Note, if you mark something as duplicate, do not forget setting the
superseder so bug reports are properly linked.
*/closed/invalid
Bugs caused by user errors, random ineligible or otherwise nonsense stuff.
*/closed/needs_more_info
Issues for which some information has been requested by the developers,
but which has not been provided by anyone within reasonable time.
bug/closed/fixed
Bugs which have to the best of our knowledge been fixed.
bug/closed/wontfix
Bugs which we will not fix. Possible reasons include legality, high
complexity for the sake of supporting obscure corner cases, speed loss
for similarly esoteric purposes, et cetera.
This also means that we would reject a patch.
If we are just too lazy to fix a bug then the correct state is open
and unassigned. Closed means that the case is closed which is not
the case if we are just waiting for a patch.
bug/closed/works_for_me
Bugs for which sufficient information was provided to reproduce but
reproduction failed - that is the code seems to work correctly to the
best of our knowledge.
feature_request/closed/fixed
Feature requests which have been implemented.
feature_request/closed/wontfix
Feature requests which will not be implemented. The reasons here could
be legal, philosophical or others.
Note2, if you provide the requested info do not forget to remove the
needs_more_info resolution.
Component:
----------
avcodec
issues in libavcodec/*
avformat
issues in libavformat/*
avutil
issues in libavutil/*
regression test
issues in tests/*
ffmpeg
issues in or related to ffmpeg.c
ffplay
issues in or related to ffplay.c
ffprobe
issues in or related to ffprobe.c
ffserver
issues in or related to ffserver.c
build system
issues in or related to configure/Makefile
regression
bugs which were not present in a past revision
trac
issues related to our issue tracker

View File

@@ -0,0 +1,159 @@
.\" Automatically generated by Pod::Man v1.37, Pod::Parser v1.32
.\"
.\" Standard preamble:
.\" ========================================================================
.de Sh \" Subsection heading
.br
.if t .Sp
.ne 5
.PP
\fB\\$1\fR
.PP
..
.de Sp \" Vertical space (when we can't use .PP)
.if t .sp .5v
.if n .sp
..
.de Vb \" Begin verbatim text
.ft CW
.nf
.ne \\$1
..
.de Ve \" End verbatim text
.ft R
.fi
..
.\" Set up some character translations and predefined strings. \*(-- will
.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
.\" double quote, and \*(R" will give a right double quote. | will give a
.\" real vertical bar. \*(C+ will give a nicer C++. Capital omega is used to
.\" do unbreakable dashes and therefore won't be available. \*(C` and \*(C'
.\" expand to `' in nroff, nothing in troff, for use with C<>.
.tr \(*W-|\(bv\*(Tr
.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
.ie n \{\
. ds -- \(*W-
. ds PI pi
. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
. ds L" ""
. ds R" ""
. ds C` ""
. ds C' ""
'br\}
.el\{\
. ds -- \|\(em\|
. ds PI \(*p
. ds L" ``
. ds R" ''
'br\}
.\"
.\" If the F register is turned on, we'll generate index entries on stderr for
.\" titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and index
.\" entries marked with X<> in POD. Of course, you'll have to process the
.\" output yourself in some meaningful fashion.
.if \nF \{\
. de IX
. tm Index:\\$1\t\\n%\t"\\$2"
..
. nr % 0
. rr F
.\}
.\"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.hy 0
.if n .na
.\"
.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
.\" Fear. Run. Save yourself. No user-serviceable parts.
. \" fudge factors for nroff and troff
.if n \{\
. ds #H 0
. ds #V .8m
. ds #F .3m
. ds #[ \f1
. ds #] \fP
.\}
.if t \{\
. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
. ds #V .6m
. ds #F 0
. ds #[ \&
. ds #] \&
.\}
. \" simple accents for nroff and troff
.if n \{\
. ds ' \&
. ds ` \&
. ds ^ \&
. ds , \&
. ds ~ ~
. ds /
.\}
.if t \{\
. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
.\}
. \" troff and (daisy-wheel) nroff accents
.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
.ds ae a\h'-(\w'a'u*4/10)'e
.ds Ae A\h'-(\w'A'u*4/10)'E
. \" corrections for vroff
.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
. \" for low resolution devices (crt and lpr)
.if \n(.H>23 .if \n(.V>19 \
\{\
. ds : e
. ds 8 ss
. ds o a
. ds d- d\h'-1'\(ga
. ds D- D\h'-1'\(hy
. ds th \o'bp'
. ds Th \o'LP'
. ds ae ae
. ds Ae AE
.\}
.rm #[ #] #H #V #F C
.\" ========================================================================
.\"
.IX Title "doc::libavcodec 3"
.TH doc::libavcodec 3 " " " " " "
.SH "NAME"
libavcodec \- media streams decoding and encoding library
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
The libavcodec library provides a generic encoding/decoding framework
and contains multiple decoders and encoders for audio, video and
subtitle streams, and several bitstream filters.
.PP
The shared architecture provides various services ranging from bit
stream I/O to \s-1DSP\s0 optimizations, and makes it suitable for
implementing robust and fast codecs as well as for experimentation.
.SH "SEE ALSO"
.IX Header "SEE ALSO"
\&\fIffmpeg\fR\|(1), \fIffplay\fR\|(1), \fIffprobe\fR\|(1), \fIffserver\fR\|(1),
\&\fIffmpeg\-codecs\fR\|(1), \fIffmpeg\-bitstream\-filters\fR\|(1),
\&\fIlibavutil\fR\|(3)
.SH "AUTHORS"
.IX Header "AUTHORS"
The FFmpeg developers.
.PP
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
\&\fBgit log\fR in the FFmpeg source directory, or browsing the
online repository at <\fBhttp://source.ffmpeg.org\fR>.
.PP
Maintainers for the specific components are listed in the file
\&\fI\s-1MAINTAINERS\s0\fR in the source code tree.

View File

@@ -0,0 +1,43 @@
=encoding utf8
=head1 NAME
libavcodec - media streams decoding and encoding library
=head1 DESCRIPTION
The libavcodec library provides a generic encoding/decoding framework
and contains multiple decoders and encoders for audio, video and
subtitle streams, and several bitstream filters.
The shared architecture provides various services ranging from bit
stream I/O to DSP optimizations, and makes it suitable for
implementing robust and fast codecs as well as for experimentation.
=head1 SEE ALSO
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1),
libavutil(3)
=head1 AUTHORS
The FFmpeg developers.
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
B<git log> in the FFmpeg source directory, or browsing the
online repository at E<lt>B<http://source.ffmpeg.org>E<gt>.
Maintainers for the specific components are listed in the file
F<MAINTAINERS> in the source code tree.

View File

@@ -0,0 +1,49 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle Libavcodec Documentation
@titlepage
@center @titlefont{Libavcodec Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
The libavcodec library provides a generic encoding/decoding framework
and contains multiple decoders and encoders for audio, video and
subtitle streams, and several bitstream filters.
The shared architecture provides various services ranging from bit
stream I/O to DSP optimizations, and makes it suitable for
implementing robust and fast codecs as well as for experimentation.
@c man end DESCRIPTION
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{ffmpeg-codecs.html,ffmpeg-codecs}, @url{ffmpeg-bitstream-filters.html,bitstream-filters},
@url{libavutil.html,libavutil}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1),
libavutil(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename libavcodec
@settitle media streams decoding and encoding library
@end ignore
@bye

View File

@@ -0,0 +1,156 @@
.\" Automatically generated by Pod::Man v1.37, Pod::Parser v1.32
.\"
.\" Standard preamble:
.\" ========================================================================
.de Sh \" Subsection heading
.br
.if t .Sp
.ne 5
.PP
\fB\\$1\fR
.PP
..
.de Sp \" Vertical space (when we can't use .PP)
.if t .sp .5v
.if n .sp
..
.de Vb \" Begin verbatim text
.ft CW
.nf
.ne \\$1
..
.de Ve \" End verbatim text
.ft R
.fi
..
.\" Set up some character translations and predefined strings. \*(-- will
.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
.\" double quote, and \*(R" will give a right double quote. | will give a
.\" real vertical bar. \*(C+ will give a nicer C++. Capital omega is used to
.\" do unbreakable dashes and therefore won't be available. \*(C` and \*(C'
.\" expand to `' in nroff, nothing in troff, for use with C<>.
.tr \(*W-|\(bv\*(Tr
.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
.ie n \{\
. ds -- \(*W-
. ds PI pi
. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
. ds L" ""
. ds R" ""
. ds C` ""
. ds C' ""
'br\}
.el\{\
. ds -- \|\(em\|
. ds PI \(*p
. ds L" ``
. ds R" ''
'br\}
.\"
.\" If the F register is turned on, we'll generate index entries on stderr for
.\" titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and index
.\" entries marked with X<> in POD. Of course, you'll have to process the
.\" output yourself in some meaningful fashion.
.if \nF \{\
. de IX
. tm Index:\\$1\t\\n%\t"\\$2"
..
. nr % 0
. rr F
.\}
.\"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.hy 0
.if n .na
.\"
.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
.\" Fear. Run. Save yourself. No user-serviceable parts.
. \" fudge factors for nroff and troff
.if n \{\
. ds #H 0
. ds #V .8m
. ds #F .3m
. ds #[ \f1
. ds #] \fP
.\}
.if t \{\
. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
. ds #V .6m
. ds #F 0
. ds #[ \&
. ds #] \&
.\}
. \" simple accents for nroff and troff
.if n \{\
. ds ' \&
. ds ` \&
. ds ^ \&
. ds , \&
. ds ~ ~
. ds /
.\}
.if t \{\
. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
.\}
. \" troff and (daisy-wheel) nroff accents
.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
.ds ae a\h'-(\w'a'u*4/10)'e
.ds Ae A\h'-(\w'A'u*4/10)'E
. \" corrections for vroff
.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
. \" for low resolution devices (crt and lpr)
.if \n(.H>23 .if \n(.V>19 \
\{\
. ds : e
. ds 8 ss
. ds o a
. ds d- d\h'-1'\(ga
. ds D- D\h'-1'\(hy
. ds th \o'bp'
. ds Th \o'LP'
. ds ae ae
. ds Ae AE
.\}
.rm #[ #] #H #V #F C
.\" ========================================================================
.\"
.IX Title "doc::libavdevice 3"
.TH doc::libavdevice 3 " " " " " "
.SH "NAME"
libavdevice \- multimedia device handling library
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
The libavdevice library provides a generic framework for grabbing from
and rendering to many common multimedia input/output devices, and
supports several input and output devices, including Video4Linux2,
VfW, DShow, and \s-1ALSA\s0.
.SH "SEE ALSO"
.IX Header "SEE ALSO"
\&\fIffmpeg\fR\|(1), \fIffplay\fR\|(1), \fIffprobe\fR\|(1), \fIffserver\fR\|(1),
\&\fIffmpeg\-devices\fR\|(1),
\&\fIlibavutil\fR\|(3), \fIlibavcodec\fR\|(3), \fIlibavformat\fR\|(3)
.SH "AUTHORS"
.IX Header "AUTHORS"
The FFmpeg developers.
.PP
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
\&\fBgit log\fR in the FFmpeg source directory, or browsing the
online repository at <\fBhttp://source.ffmpeg.org\fR>.
.PP
Maintainers for the specific components are listed in the file
\&\fI\s-1MAINTAINERS\s0\fR in the source code tree.

View File

@@ -0,0 +1,40 @@
=encoding utf8
=head1 NAME
libavdevice - multimedia device handling library
=head1 DESCRIPTION
The libavdevice library provides a generic framework for grabbing from
and rendering to many common multimedia input/output devices, and
supports several input and output devices, including Video4Linux2,
VfW, DShow, and ALSA.
=head1 SEE ALSO
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-devices(1),
libavutil(3), libavcodec(3), libavformat(3)
=head1 AUTHORS
The FFmpeg developers.
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
B<git log> in the FFmpeg source directory, or browsing the
online repository at E<lt>B<http://source.ffmpeg.org>E<gt>.
Maintainers for the specific components are listed in the file
F<MAINTAINERS> in the source code tree.

View File

@@ -0,0 +1,46 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle Libavdevice Documentation
@titlepage
@center @titlefont{Libavdevice Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
The libavdevice library provides a generic framework for grabbing from
and rendering to many common multimedia input/output devices, and
supports several input and output devices, including Video4Linux2,
VfW, DShow, and ALSA.
@c man end DESCRIPTION
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{ffmpeg-devices.html,ffmpeg-devices},
@url{libavutil.html,libavutil}, @url{libavcodec.html,libavcodec}, @url{libavformat.html,libavformat}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-devices(1),
libavutil(3), libavcodec(3), libavformat(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename libavdevice
@settitle multimedia device handling library
@end ignore
@bye

View File

@@ -0,0 +1,45 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle Libavfilter Documentation
@titlepage
@center @titlefont{Libavfilter Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
The libavfilter library provides a generic audio/video filtering
framework containing several filters, sources and sinks.
@c man end DESCRIPTION
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{ffmpeg-filters.html,ffmpeg-filters},
@url{libavutil.html,libavutil}, @url{libswscale.html,libswscale}, @url{libswresample.html,libswresample},
@url{libavcodec.html,libavcodec}, @url{libavformat.html,libavformat}, @url{libavdevice.html,libavdevice}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-filters(1),
libavutil(3), libswscale(3), libswresample(3), libavcodec(3), libavformat(3), libavdevice(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename libavfilter
@settitle multimedia filtering library
@end ignore
@bye

View File

@@ -0,0 +1,159 @@
.\" Automatically generated by Pod::Man v1.37, Pod::Parser v1.32
.\"
.\" Standard preamble:
.\" ========================================================================
.de Sh \" Subsection heading
.br
.if t .Sp
.ne 5
.PP
\fB\\$1\fR
.PP
..
.de Sp \" Vertical space (when we can't use .PP)
.if t .sp .5v
.if n .sp
..
.de Vb \" Begin verbatim text
.ft CW
.nf
.ne \\$1
..
.de Ve \" End verbatim text
.ft R
.fi
..
.\" Set up some character translations and predefined strings. \*(-- will
.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
.\" double quote, and \*(R" will give a right double quote. | will give a
.\" real vertical bar. \*(C+ will give a nicer C++. Capital omega is used to
.\" do unbreakable dashes and therefore won't be available. \*(C` and \*(C'
.\" expand to `' in nroff, nothing in troff, for use with C<>.
.tr \(*W-|\(bv\*(Tr
.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
.ie n \{\
. ds -- \(*W-
. ds PI pi
. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
. ds L" ""
. ds R" ""
. ds C` ""
. ds C' ""
'br\}
.el\{\
. ds -- \|\(em\|
. ds PI \(*p
. ds L" ``
. ds R" ''
'br\}
.\"
.\" If the F register is turned on, we'll generate index entries on stderr for
.\" titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and index
.\" entries marked with X<> in POD. Of course, you'll have to process the
.\" output yourself in some meaningful fashion.
.if \nF \{\
. de IX
. tm Index:\\$1\t\\n%\t"\\$2"
..
. nr % 0
. rr F
.\}
.\"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.hy 0
.if n .na
.\"
.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
.\" Fear. Run. Save yourself. No user-serviceable parts.
. \" fudge factors for nroff and troff
.if n \{\
. ds #H 0
. ds #V .8m
. ds #F .3m
. ds #[ \f1
. ds #] \fP
.\}
.if t \{\
. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
. ds #V .6m
. ds #F 0
. ds #[ \&
. ds #] \&
.\}
. \" simple accents for nroff and troff
.if n \{\
. ds ' \&
. ds ` \&
. ds ^ \&
. ds , \&
. ds ~ ~
. ds /
.\}
.if t \{\
. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
.\}
. \" troff and (daisy-wheel) nroff accents
.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
.ds ae a\h'-(\w'a'u*4/10)'e
.ds Ae A\h'-(\w'A'u*4/10)'E
. \" corrections for vroff
.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
. \" for low resolution devices (crt and lpr)
.if \n(.H>23 .if \n(.V>19 \
\{\
. ds : e
. ds 8 ss
. ds o a
. ds d- d\h'-1'\(ga
. ds D- D\h'-1'\(hy
. ds th \o'bp'
. ds Th \o'LP'
. ds ae ae
. ds Ae AE
.\}
.rm #[ #] #H #V #F C
.\" ========================================================================
.\"
.IX Title "doc::libavformat 3"
.TH doc::libavformat 3 " " " " " "
.SH "NAME"
libavformat \- multimedia muxing and demuxing library
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
The libavformat library provides a generic framework for multiplexing
and demultiplexing (muxing and demuxing) audio, video and subtitle
streams. It encompasses multiple muxers and demuxers for multimedia
container formats.
.PP
It also supports several input and output protocols to access a media
resource.
.SH "SEE ALSO"
.IX Header "SEE ALSO"
\&\fIffmpeg\fR\|(1), \fIffplay\fR\|(1), \fIffprobe\fR\|(1), \fIffserver\fR\|(1),
\&\fIffmpeg\-formats\fR\|(1), \fIffmpeg\-protocols\fR\|(1),
\&\fIlibavutil\fR\|(3), \fIlibavcodec\fR\|(3)
.SH "AUTHORS"
.IX Header "AUTHORS"
The FFmpeg developers.
.PP
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
\&\fBgit log\fR in the FFmpeg source directory, or browsing the
online repository at <\fBhttp://source.ffmpeg.org\fR>.
.PP
Maintainers for the specific components are listed in the file
\&\fI\s-1MAINTAINERS\s0\fR in the source code tree.

View File

@@ -0,0 +1,43 @@
=encoding utf8
=head1 NAME
libavformat - multimedia muxing and demuxing library
=head1 DESCRIPTION
The libavformat library provides a generic framework for multiplexing
and demultiplexing (muxing and demuxing) audio, video and subtitle
streams. It encompasses multiple muxers and demuxers for multimedia
container formats.
It also supports several input and output protocols to access a media
resource.
=head1 SEE ALSO
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-formats(1), ffmpeg-protocols(1),
libavutil(3), libavcodec(3)
=head1 AUTHORS
The FFmpeg developers.
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
B<git log> in the FFmpeg source directory, or browsing the
online repository at E<lt>B<http://source.ffmpeg.org>E<gt>.
Maintainers for the specific components are listed in the file
F<MAINTAINERS> in the source code tree.

View File

@@ -0,0 +1,49 @@
\input texinfo @c -*- texinfo -*-
@documentencoding UTF-8
@settitle Libavformat Documentation
@titlepage
@center @titlefont{Libavformat Documentation}
@end titlepage
@top
@contents
@chapter Description
@c man begin DESCRIPTION
The libavformat library provides a generic framework for multiplexing
and demultiplexing (muxing and demuxing) audio, video and subtitle
streams. It encompasses multiple muxers and demuxers for multimedia
container formats.
It also supports several input and output protocols to access a media
resource.
@c man end DESCRIPTION
@chapter See Also
@ifhtml
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
@url{ffmpeg-formats.html,ffmpeg-formats}, @url{ffmpeg-protocols.html,ffmpeg-protocols},
@url{libavutil.html,libavutil}, @url{libavcodec.html,libavcodec}
@end ifhtml
@ifnothtml
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-formats(1), ffmpeg-protocols(1),
libavutil(3), libavcodec(3)
@end ifnothtml
@include authors.texi
@ignore
@setfilename libavformat
@settitle multimedia muxing and demuxing library
@end ignore
@bye

View File

@@ -0,0 +1,172 @@
.\" Automatically generated by Pod::Man v1.37, Pod::Parser v1.32
.\"
.\" Standard preamble:
.\" ========================================================================
.de Sh \" Subsection heading
.br
.if t .Sp
.ne 5
.PP
\fB\\$1\fR
.PP
..
.de Sp \" Vertical space (when we can't use .PP)
.if t .sp .5v
.if n .sp
..
.de Vb \" Begin verbatim text
.ft CW
.nf
.ne \\$1
..
.de Ve \" End verbatim text
.ft R
.fi
..
.\" Set up some character translations and predefined strings. \*(-- will
.\" give an unbreakable dash, \*(PI will give pi, \*(L" will give a left
.\" double quote, and \*(R" will give a right double quote. | will give a
.\" real vertical bar. \*(C+ will give a nicer C++. Capital omega is used to
.\" do unbreakable dashes and therefore won't be available. \*(C` and \*(C'
.\" expand to `' in nroff, nothing in troff, for use with C<>.
.tr \(*W-|\(bv\*(Tr
.ds C+ C\v'-.1v'\h'-1p'\s-2+\h'-1p'+\s0\v'.1v'\h'-1p'
.ie n \{\
. ds -- \(*W-
. ds PI pi
. if (\n(.H=4u)&(1m=24u) .ds -- \(*W\h'-12u'\(*W\h'-12u'-\" diablo 10 pitch
. if (\n(.H=4u)&(1m=20u) .ds -- \(*W\h'-12u'\(*W\h'-8u'-\" diablo 12 pitch
. ds L" ""
. ds R" ""
. ds C` ""
. ds C' ""
'br\}
.el\{\
. ds -- \|\(em\|
. ds PI \(*p
. ds L" ``
. ds R" ''
'br\}
.\"
.\" If the F register is turned on, we'll generate index entries on stderr for
.\" titles (.TH), headers (.SH), subsections (.Sh), items (.Ip), and index
.\" entries marked with X<> in POD. Of course, you'll have to process the
.\" output yourself in some meaningful fashion.
.if \nF \{\
. de IX
. tm Index:\\$1\t\\n%\t"\\$2"
..
. nr % 0
. rr F
.\}
.\"
.\" For nroff, turn off justification. Always turn off hyphenation; it makes
.\" way too many mistakes in technical documents.
.hy 0
.if n .na
.\"
.\" Accent mark definitions (@(#)ms.acc 1.5 88/02/08 SMI; from UCB 4.2).
.\" Fear. Run. Save yourself. No user-serviceable parts.
. \" fudge factors for nroff and troff
.if n \{\
. ds #H 0
. ds #V .8m
. ds #F .3m
. ds #[ \f1
. ds #] \fP
.\}
.if t \{\
. ds #H ((1u-(\\\\n(.fu%2u))*.13m)
. ds #V .6m
. ds #F 0
. ds #[ \&
. ds #] \&
.\}
. \" simple accents for nroff and troff
.if n \{\
. ds ' \&
. ds ` \&
. ds ^ \&
. ds , \&
. ds ~ ~
. ds /
.\}
.if t \{\
. ds ' \\k:\h'-(\\n(.wu*8/10-\*(#H)'\'\h"|\\n:u"
. ds ` \\k:\h'-(\\n(.wu*8/10-\*(#H)'\`\h'|\\n:u'
. ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'^\h'|\\n:u'
. ds , \\k:\h'-(\\n(.wu*8/10)',\h'|\\n:u'
. ds ~ \\k:\h'-(\\n(.wu-\*(#H-.1m)'~\h'|\\n:u'
. ds / \\k:\h'-(\\n(.wu*8/10-\*(#H)'\z\(sl\h'|\\n:u'
.\}
. \" troff and (daisy-wheel) nroff accents
.ds : \\k:\h'-(\\n(.wu*8/10-\*(#H+.1m+\*(#F)'\v'-\*(#V'\z.\h'.2m+\*(#F'.\h'|\\n:u'\v'\*(#V'
.ds 8 \h'\*(#H'\(*b\h'-\*(#H'
.ds o \\k:\h'-(\\n(.wu+\w'\(de'u-\*(#H)/2u'\v'-.3n'\*(#[\z\(de\v'.3n'\h'|\\n:u'\*(#]
.ds d- \h'\*(#H'\(pd\h'-\w'~'u'\v'-.25m'\f2\(hy\fP\v'.25m'\h'-\*(#H'
.ds D- D\\k:\h'-\w'D'u'\v'-.11m'\z\(hy\v'.11m'\h'|\\n:u'
.ds th \*(#[\v'.3m'\s+1I\s-1\v'-.3m'\h'-(\w'I'u*2/3)'\s-1o\s+1\*(#]
.ds Th \*(#[\s+2I\s-2\h'-\w'I'u*3/5'\v'-.3m'o\v'.3m'\*(#]
.ds ae a\h'-(\w'a'u*4/10)'e
.ds Ae A\h'-(\w'A'u*4/10)'E
. \" corrections for vroff
.if v .ds ~ \\k:\h'-(\\n(.wu*9/10-\*(#H)'\s-2\u~\d\s+2\h'|\\n:u'
.if v .ds ^ \\k:\h'-(\\n(.wu*10/11-\*(#H)'\v'-.4m'^\v'.4m'\h'|\\n:u'
. \" for low resolution devices (crt and lpr)
.if \n(.H>23 .if \n(.V>19 \
\{\
. ds : e
. ds 8 ss
. ds o a
. ds d- d\h'-1'\(ga
. ds D- D\h'-1'\(hy
. ds th \o'bp'
. ds Th \o'LP'
. ds ae ae
. ds Ae AE
.\}
.rm #[ #] #H #V #F C
.\" ========================================================================
.\"
.IX Title "doc::libavutil 3"
.TH doc::libavutil 3 " " " " " "
.SH "NAME"
libavutil \- multimedia\-biased utility library
.SH "DESCRIPTION"
.IX Header "DESCRIPTION"
The libavutil library is a utility library to aid portable
multimedia programming. It contains safe portable string functions,
random number generators, data structures, additional mathematics
functions, cryptography and multimedia related functionality (like
enumerations for pixel and sample formats). It is not a library for
code needed by both libavcodec and libavformat.
.PP
The goals for this library is to be:
.IP "\fBModular\fR" 4
.IX Item "Modular"
It should have few interdependencies and the possibility of disabling individual
parts during \fB./configure\fR.
.IP "\fBSmall\fR" 4
.IX Item "Small"
Both sources and objects should be small.
.IP "\fBEfficient\fR" 4
.IX Item "Efficient"
It should have low \s-1CPU\s0 and memory usage.
.IP "\fBUseful\fR" 4
.IX Item "Useful"
It should avoid useless features that almost no one needs.
.SH "SEE ALSO"
.IX Header "SEE ALSO"
\&\fIffmpeg\fR\|(1), \fIffplay\fR\|(1), \fIffprobe\fR\|(1), \fIffserver\fR\|(1),
\&\fIffmpeg\-utils\fR\|(1)
.SH "AUTHORS"
.IX Header "AUTHORS"
The FFmpeg developers.
.PP
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
\&\fBgit log\fR in the FFmpeg source directory, or browsing the
online repository at <\fBhttp://source.ffmpeg.org\fR>.
.PP
Maintainers for the specific components are listed in the file
\&\fI\s-1MAINTAINERS\s0\fR in the source code tree.

View File

@@ -0,0 +1,70 @@
=encoding utf8
=head1 NAME
libavutil - multimedia-biased utility library
=head1 DESCRIPTION
The libavutil library is a utility library to aid portable
multimedia programming. It contains safe portable string functions,
random number generators, data structures, additional mathematics
functions, cryptography and multimedia related functionality (like
enumerations for pixel and sample formats). It is not a library for
code needed by both libavcodec and libavformat.
The goals for this library is to be:
=over 4
=item B<Modular>
It should have few interdependencies and the possibility of disabling individual
parts during B<./configure>.
=item B<Small>
Both sources and objects should be small.
=item B<Efficient>
It should have low CPU and memory usage.
=item B<Useful>
It should avoid useless features that almost no one needs.
=back
=head1 SEE ALSO
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
ffmpeg-utils(1)
=head1 AUTHORS
The FFmpeg developers.
For details about the authorship, see the Git history of the project
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
B<git log> in the FFmpeg source directory, or browsing the
online repository at E<lt>B<http://source.ffmpeg.org>E<gt>.
Maintainers for the specific components are listed in the file
F<MAINTAINERS> in the source code tree.

Some files were not shown because too many files have changed in this diff Show More