forked from KolibriOS/kolibrios
sdk: update Makefiles
git-svn-id: svn://kolibrios.org@4866 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
parent
bb8df7102c
commit
b3031965cc
@ -1,15 +1,14 @@
|
||||
CC=gcc
|
||||
LD = ld
|
||||
AR= ar
|
||||
|
||||
LIBRARY= pixlib
|
||||
|
||||
CC = kos32-gcc
|
||||
AR = kos32-ar
|
||||
LD = kos32-ld
|
||||
STRIP = kos32-strip
|
||||
|
||||
CFLAGS = -U_Win32 -U_WIN32 -U__MINGW32__ -c -O2 -fomit-frame-pointer
|
||||
|
||||
STRIP = $(PREFIX)strip
|
||||
|
||||
LDFLAGS:= -shared -s -nostdlib -T ../newlib/dll.lds --entry _DllStartup --image-base=0
|
||||
PXFLAGS:= --version-script pixlib.ver --output-def $(LIBRARY).orig.def --out-implib $(LIBRARY).dll.a
|
||||
PXFLAGS:= --version-script pixlib.ver --out-implib $(LIBRARY).dll.a
|
||||
SNAFLAGS:= --version-script sna.ver --output-def sna.def
|
||||
UXAFLAGS:= --version-script uxa.ver --output-def uxa.def
|
||||
|
||||
@ -21,7 +20,6 @@ LIBS:= -ldll -lc.dll
|
||||
|
||||
DEFINES:= -DHAS_DEBUG_FULL=0 -DSHOW_BATCH=0 -DDEBUG_DUMP=0
|
||||
|
||||
|
||||
SRC_PIXLIB = pixlib.c
|
||||
|
||||
SRC_SNA = \
|
||||
@ -66,6 +64,7 @@ else
|
||||
OBJECTS= $(OBJ_SNA)
|
||||
DEFINES+= -DBUILD_SNA
|
||||
LIBS+= -lgcc
|
||||
LIBPATH+= -L/home/autobuild/tools/win32/mingw32/lib
|
||||
endif
|
||||
endif
|
||||
|
||||
@ -78,13 +77,11 @@ ebox:$(LIBRARY).dll
|
||||
|
||||
$(LIBRARY).dll: $(OBJ_PIXLIB) Makefile
|
||||
$(LD) $(LDFLAGS) $(PXFLAGS) $(LIBPATH) -o $@ $(OBJ_PIXLIB) $(LIBS)
|
||||
$(STRIP) $@
|
||||
mv -f $@ ../../bin
|
||||
mv -f $(LIBRARY).dll.a ../../lib
|
||||
|
||||
intel-sna.drv: $(OBJ_SNA) Makefile
|
||||
$(LD) $(LDFLAGS) $(SNAFLAGS) $(LIBPATH) -o $@ $(OBJ_SNA) $(LIBS)
|
||||
$(STRIP) $@
|
||||
mv -f $@ ../../bin
|
||||
|
||||
intel-uxa.drv: $(OBJ_UXA) Makefile
|
||||
|
@ -37,7 +37,7 @@ USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
#include "config.h"
|
||||
#endif
|
||||
|
||||
#include <memory.h>
|
||||
//#include <memory.h>
|
||||
#include <malloc.h>
|
||||
#include "i915_pciids.h"
|
||||
|
||||
|
@ -46,7 +46,7 @@ SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
#include "compiler.h"
|
||||
|
||||
|
||||
#include <memory.h>
|
||||
//#include <memory.h>
|
||||
#include <malloc.h>
|
||||
#include <errno.h>
|
||||
#include <kos32sys.h>
|
||||
|
@ -1,23 +1,24 @@
|
||||
|
||||
export CC = kos32-gcc
|
||||
export AR = kos32-ar
|
||||
export LD = kos32-ld
|
||||
export STRIP = kos32-strip
|
||||
|
||||
export SDK_DIR:= $(abspath ../..)
|
||||
|
||||
EGL = egl
|
||||
|
||||
CC = gcc
|
||||
|
||||
CFLAGS_OPT = -U_Win32 -U_WIN32 -U__MINGW32__ -UWIN32 -U_MSC_VER -Os
|
||||
CFLAGS_OPT+= -fomit-frame-pointer -mno-ms-bitfields
|
||||
CFLAGS = -c $(CFLAGS_OPT)
|
||||
|
||||
LD = ld
|
||||
LDFLAGS = -nostdlib -shared -s --image-base 0 -T ../newlib/dll.lds --out-implib lib$(EGL).dll.a -e _DllStartup
|
||||
|
||||
STRIP = $(PREFIX)strip
|
||||
|
||||
export SDK_DIR:= $(abspath ../..)
|
||||
LDFLAGS = -nostdlib -shared -s --image-base 0 -T ../newlib/dll.lds -e _DllStartup
|
||||
LDFLAGS+= --out-implib lib$(EGL).dll.a
|
||||
|
||||
INC_MESA= -I../newlib/include -I./include -I./src -I./src/glsl -I./src/mesa -I./src/mapi
|
||||
INC_EGL= -I../newlib/include -I../../includes -I./include -I../libdrm -I../libdrm/include/drm -I./src/egl/main -I./src/gbm/backends/dri -I./src/gbm/main
|
||||
|
||||
LIBPATH:= -L../../lib
|
||||
LIBPATH:= -L../../lib -L/home/autobuild/tools/win32/mingw32/lib
|
||||
|
||||
LIBS:= -ldll -ldrm.dll -lc.dll -lgcc
|
||||
|
||||
@ -40,18 +41,18 @@ all:
|
||||
|
||||
$(EGL).dll: $(EGL_OBJS) Makefile
|
||||
$(LD) $(LDFLAGS) $(LIBPATH) -o $@ egl.def $(EGL_OBJS) $(LIBS)
|
||||
$(STRIP) $@
|
||||
# $(STRIP) $@
|
||||
# sed -e "s/ @[^ ]*//" egl1.def > egl.def
|
||||
mv -f $@ ../../bin
|
||||
mv -f lib$(EGL).dll.a ../../lib
|
||||
|
||||
libglsl.a : $(GLSL_OBJS) Makefile
|
||||
ar cvrs libglsl.a $(GLSL_OBJS)
|
||||
$(AR) libglsl.a $(GLSL_OBJS)
|
||||
mv -f libglsl.a ../../lib
|
||||
|
||||
libGL.dll: $(LIBGL_OBJS) Makefile
|
||||
$(LD) $(LDFLAGS) $(LIBPATH) -o $@ $(LIBGL_OBJS) $(LIBS)
|
||||
$(STRIP) $@
|
||||
# $(STRIP) $@
|
||||
# sed -e "s/ @[^ ]*//" egl1.def > egl.def
|
||||
mv -f $@ ../../bin
|
||||
mv -f libGL.dll.a ../../lib
|
||||
|
@ -1,4 +1,3 @@
|
||||
CC = gcc
|
||||
|
||||
SRC_DIR:=$(SDK_DIR)/sources
|
||||
MESA_SRC:= $(SRC_DIR)/Mesa/src
|
||||
@ -7,17 +6,13 @@ CFLAGS_OPT = -U_Win32 -U_WIN32 -U__MINGW32__ -UWIN32 -U_MSC_VER -Os
|
||||
CFLAGS_OPT+= -fomit-frame-pointer -mno-ms-bitfields
|
||||
CFLAGS = -c $(CFLAGS_OPT)
|
||||
|
||||
LD = ld
|
||||
LDFLAGS = -nostdlib -shared -s --image-base 0 -T $(SRC_DIR)/newlib/dll.lds --out-implib libegl.dll.a -e _DllStartup
|
||||
|
||||
STRIP = $(PREFIX)strip
|
||||
|
||||
|
||||
INC_EGL:= -I$(SRC_DIR)/newlib/include -I$(SDK_DIR)/includes -I$(SRC_DIR)/Mesa/include
|
||||
INC_EGL+= -I$(SRC_DIR)/libdrm -I$(SRC_DIR)/libdrm/include/drm -I./main
|
||||
INC_EGL+= -I$(MESA_SRC)/gbm/backends/dri -I$(MESA_SRC)/gbm/main
|
||||
|
||||
LIBPATH:= -L$(SDK_DIR)/lib
|
||||
LIBPATH:= -L$(SDK_DIR)/lib -L/home/autobuild/tools/win32/mingw32/lib
|
||||
|
||||
LIBS:= -ldll -ldrm.dll -lc.dll -lgcc
|
||||
|
||||
@ -63,7 +58,7 @@ all: libegl.dll
|
||||
|
||||
libegl.dll: $(EGL_OBJS) Makefile
|
||||
$(LD) $(LDFLAGS) $(LIBPATH) -o $@ $(MESA_SRC)/../egl.def $(EGL_OBJS) $(LIBS)
|
||||
$(STRIP) $@
|
||||
# $(STRIP) $@
|
||||
# sed -e "s/ @[^ ]*//" egl1.def > egl.def
|
||||
mv -f $@ $(SDK_DIR)/bin
|
||||
mv -f libegl.dll.a $(SDK_DIR)/lib
|
||||
|
@ -1,4 +1,3 @@
|
||||
CC = gcc
|
||||
|
||||
SRC_DIR:=$(SDK_DIR)/sources
|
||||
MESA_SRC:= $(SRC_DIR)/Mesa/src
|
||||
@ -114,7 +113,7 @@ GLSL_OBJS = $(patsubst %.cpp, %.o, $(patsubst %.c, %.o, $(GLSL_SRC)))
|
||||
all: libglsl.a
|
||||
|
||||
libglsl.a : $(GLSL_OBJS) Makefile
|
||||
ar cvrs libglsl.a $(GLSL_OBJS)
|
||||
$(AR) crs libglsl.a $(GLSL_OBJS)
|
||||
mv -f libglsl.a $(SDK_DIR)/lib
|
||||
|
||||
%.o : %.c Makefile
|
||||
|
@ -1,4 +1,3 @@
|
||||
CC = gcc
|
||||
|
||||
SRC_DIR:=$(SDK_DIR)/sources
|
||||
MESA_SRC:= $(SRC_DIR)/Mesa/src
|
||||
@ -7,16 +6,13 @@ CFLAGS_OPT = -U_Win32 -U_WIN32 -U__MINGW32__ -UWIN32 -U_MSC_VER -Os
|
||||
CFLAGS_OPT+= -fomit-frame-pointer -mno-ms-bitfields
|
||||
CFLAGS = -c $(CFLAGS_OPT)
|
||||
|
||||
LD = ld
|
||||
LDFLAGS = -nostdlib -shared -s --image-base 0 -T $(SRC_DIR)/newlib/dll.lds --out-implib libGL.dll.a -e _DllStartup
|
||||
|
||||
STRIP = $(PREFIX)strip
|
||||
|
||||
INC_MESA:= -I$(SRC_DIR)/newlib/include -I$(SRC_DIR)/Mesa/include
|
||||
INC_MESA+= -I./src -I$(MESA_SRC)/glsl -I$(MESA_SRC)/mesa -I$(MESA_SRC)/mapi
|
||||
|
||||
|
||||
LIBPATH:= -L$(SDK_DIR)/lib
|
||||
LIBPATH:= -L$(SDK_DIR)/lib -L/home/autobuild/tools/win32/mingw32/lib
|
||||
|
||||
LIBS:= -ldll -ldrm.dll -lc.dll -lgcc
|
||||
|
||||
@ -45,7 +41,7 @@ all: libGL.dll
|
||||
|
||||
libGL.dll: $(LIBGL_OBJS) Makefile
|
||||
$(LD) $(LDFLAGS) $(LIBPATH) -o $@ mesa.def $(LIBGL_OBJS) $(LIBS)
|
||||
$(STRIP) $@
|
||||
# $(STRIP) $@
|
||||
mv -f $@ $(SDK_DIR)/bin
|
||||
mv -f libGL.dll.a $(SDK_DIR)/lib
|
||||
|
||||
|
@ -1,4 +1,3 @@
|
||||
CC = gcc
|
||||
|
||||
SRC_DIR:=$(SDK_DIR)/sources
|
||||
MESA_SRC:= $(SRC_DIR)/Mesa/src
|
||||
@ -7,17 +6,14 @@ CFLAGS_OPT = -U_Win32 -U_WIN32 -U__MINGW32__ -UWIN32 -U_MSC_VER -Os
|
||||
CFLAGS_OPT+= -fomit-frame-pointer -mno-ms-bitfields
|
||||
CFLAGS = -c $(CFLAGS_OPT)
|
||||
|
||||
LD = ld
|
||||
LDFLAGS = -nostdlib -shared -s --image-base 0 -T $(SRC_DIR)/newlib/dll.lds -e _DllStartup
|
||||
|
||||
STRIP = $(PREFIX)strip
|
||||
|
||||
INC_MESA:= -I$(SRC_DIR)/newlib/include -I$(SRC_DIR)/Mesa/include
|
||||
INC_MESA+= -I./ -I$(MESA_SRC)/glsl -I$(MESA_SRC)/mesa -I$(MESA_SRC)/mapi
|
||||
INC_MESA+= -I$(SRC_DIR)/libdrm -I$(MESA_SRC) -I$(SRC_DIR)/expat/lib
|
||||
INC_I965:= -I$(SRC_DIR)/libdrm/intel -I$(SRC_DIR)/libdrm/include/drm -I$(MESA_SRC)/mesa/drivers/dri/common
|
||||
|
||||
LIBPATH:= -L$(SDK_DIR)/lib
|
||||
LIBPATH:= -L$(SDK_DIR)/lib -L/home/autobuild/tools/win32/mingw32/lib
|
||||
|
||||
LIBS:= -ldll -lglsl -lGL.dll -lsupc++ -lgcc_eh -ldrm.dll -lexpat -lc.dll -lgcc
|
||||
|
||||
@ -348,7 +344,7 @@ I965_SRC = \
|
||||
drivers/dri/i965/gen6_clip_state.c \
|
||||
drivers/dri/i965/gen6_depthstencil.c \
|
||||
drivers/dri/i965/gen6_gs_state.c \
|
||||
drivers/dri/i965/gen6_multisample_state.c \
|
||||
drivers/dri/i965/gen6_multisample_state.c \
|
||||
drivers/dri/i965/gen6_queryobj.c \
|
||||
drivers/dri/i965/gen6_sampler_state.c \
|
||||
drivers/dri/i965/gen6_scissor_state.c \
|
||||
@ -381,7 +377,7 @@ all: i965_dri.drv
|
||||
|
||||
i965_dri.drv: $(I965_OBJS) $(MESA_OBJS) dri.def Makefile
|
||||
$(LD) $(LDFLAGS) $(LIBPATH) -o $@ $(I965_OBJS) $(MESA_OBJS) dri.def $(LIBS)
|
||||
$(STRIP) $@
|
||||
# $(STRIP) $@
|
||||
mv -f $@ $(SDK_DIR)/bin
|
||||
|
||||
%.o : %.c Makefile
|
||||
|
@ -1,20 +1,22 @@
|
||||
|
||||
LIBRARY = cairo2
|
||||
|
||||
CC = gcc
|
||||
CC = kos32-gcc
|
||||
AR = kos32-ar
|
||||
LD = kos32-ld
|
||||
STRIP = kos32-strip
|
||||
|
||||
CFLAGS = -c -O2 -ffast-math -Wall -Winline -Wno-attributes -fomit-frame-pointer
|
||||
|
||||
LD = ld
|
||||
LDFLAGS = -shared -s -nostdlib -T ../newlib/dll.lds --entry _DllStartup --image-base=0 --version-script cairo.ver --output-def $(LIBRARY).orig.def --out-implib lib$(LIBRARY).dll.a
|
||||
LDFLAGS = -shared -s -nostdlib -T ../newlib/dll.lds --entry _DllStartup --image-base=0 --version-script cairo.ver --output-def $(LIBRARY).orig.def
|
||||
LDFLAGS+= --out-implib lib$(LIBRARY).dll.a
|
||||
|
||||
STRIP = $(PREFIX)strip
|
||||
ARFLAGS = crs
|
||||
|
||||
DEFINES = -U__WIN32__ -U_Win32 -U_WIN32 -U__MINGW32__ -U_MSC_VER -DHAVE_CONFIG_H=1 -DCAIRO_NO_MUTEX=1
|
||||
|
||||
INCLUDES = -I. -I../newlib/include -I../pixman -I../zlib -I../libpng -I../freetype/include
|
||||
|
||||
|
||||
LIBPATH:= -L../../lib
|
||||
LIBPATH:= -L../../lib -L/home/autobuild/tools/win32/mingw32/lib
|
||||
|
||||
LIBS:= -ldll -lgcc -lfreetype.dll -lz.dll -lpixman-1.dll -lpng16.dll -lc.dll
|
||||
|
||||
@ -154,16 +156,15 @@ all: lib$(LIBRARY).a $(LIBRARY).dll
|
||||
ebox: lib$(LIBRARY).a $(LIBRARY).dll
|
||||
|
||||
lib$(LIBRARY).a: $(OBJECTS) Makefile
|
||||
ar cvrs lib$(LIBRARY).a $(OBJECTS)
|
||||
$(AR) $(ARFLAGS) lib$(LIBRARY).a $(OBJECTS)
|
||||
mv -f lib$(LIBRARY).a ../../lib
|
||||
|
||||
|
||||
$(LIBRARY).dll: $(OBJECTS) Makefile
|
||||
$(LD) $(LDFLAGS) $(LIBPATH) -o $@ $(OBJECTS) $(LIBS)
|
||||
$(STRIP) $@
|
||||
sed -e "s/ @[^ ]*//" $(LIBRARY).orig.def > $(LIBRARY).def
|
||||
sed -f ../newlib/cmd1.sed $(LIBRARY).def > mem
|
||||
sed -f ../newlib/cmd2.sed mem >$(LIBRARY).inc
|
||||
#sed -e "s/ @[^ ]*//" $(LIBRARY).orig.def > $(LIBRARY).def
|
||||
#sed -f ../newlib/cmd1.sed $(LIBRARY).def > mem
|
||||
#sed -f ../newlib/cmd2.sed mem >$(LIBRARY).inc
|
||||
mv -f $@ ../../bin
|
||||
mv -f lib$(LIBRARY).dll.a ../../lib
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -2,17 +2,19 @@
|
||||
LIBRARY= libeglut
|
||||
IMPLIB= libeglut.dll.a
|
||||
|
||||
CC = kos32-gcc
|
||||
AR = kos32-ar
|
||||
LD = kos32-ld
|
||||
|
||||
CC=gcc
|
||||
CFLAGS = -U_Win32 -U_WIN32 -U__MINGW32__ -c -O2 -fomit-frame-pointer
|
||||
|
||||
AR= ar
|
||||
|
||||
INCLUDES= -I. -I../newlib/include -I../Mesa/include -I../Mesa/src/gbm/main -I../libdrm/include
|
||||
|
||||
LDFLAGS:= -shared -s -nostdlib -T ../newlib/dll.lds --entry _DllStartup --image-base=0 --out-implib $(IMPLIB)
|
||||
LDFLAGS:= -shared -s -nostdlib -T ../newlib/dll.lds --entry _DllStartup --image-base=0
|
||||
LDFLAGS+= --out-implib $(IMPLIB)
|
||||
|
||||
LIBPATH:= -L../../lib
|
||||
LIBS:= -ldll -legl.dll -lgl.dll -lc.dll
|
||||
LIBS:= -ldll -legl.dll -lGL.dll -lc.dll
|
||||
|
||||
DEFINES= -D__unix__ -DMESA_EGL_NO_X11_HEADERS
|
||||
|
||||
@ -32,7 +34,7 @@ OBJECTS = $(patsubst %.c, %.o, $(SOURCES))
|
||||
all:$(LIBRARY).a $(LIBRARY).dll
|
||||
|
||||
$(LIBRARY).a: $(OBJECTS) Makefile
|
||||
ar cvrs $(LIBRARY).a $(OBJECTS)
|
||||
$(AR) crs $(LIBRARY).a $(OBJECTS)
|
||||
mv -f $(LIBRARY).a ../../lib
|
||||
|
||||
$(LIBRARY).dll: $(OBJECTS) Makefile
|
||||
|
@ -1,11 +1,13 @@
|
||||
CC=gcc
|
||||
LD= ld
|
||||
AR= ar
|
||||
|
||||
LIBRARY= libexpat
|
||||
|
||||
CC = kos32-gcc
|
||||
AR = kos32-ar
|
||||
LD = kos32-ld
|
||||
|
||||
CFLAGS = -U_Win32 -U_WIN32 -U__MINGW32__ -c -O2 -fomit-frame-pointer
|
||||
|
||||
ARFLAGS = crs
|
||||
|
||||
INCLUDES= -I. -I../newlib/include
|
||||
|
||||
DEFS = -DHAVE_EXPAT_CONFIG_H
|
||||
@ -26,7 +28,7 @@ OBJS = $(patsubst %.c, %.o, $(SRCS))
|
||||
all:$(LIBRARY).a
|
||||
|
||||
$(LIBRARY).a: $(OBJS) Makefile
|
||||
ar cvrs $(LIBRARY).a $(OBJS)
|
||||
$(AR) $(ARFLAGS) $(LIBRARY).a $(OBJS)
|
||||
mv -f $(LIBRARY).a ../../lib
|
||||
|
||||
%.o : %.c Makefile
|
||||
|
@ -165,8 +165,8 @@ config:
|
||||
|
||||
check: all alltools examples testprogs fate
|
||||
|
||||
include $(SRC_PATH)/doc/Makefile
|
||||
include $(SRC_PATH)/tests/Makefile
|
||||
#include $(SRC_PATH)/doc/Makefile
|
||||
#include $(SRC_PATH)/tests/Makefile
|
||||
|
||||
$(sort $(OBJDIRS)):
|
||||
$(Q)mkdir -p $@
|
||||
|
@ -1,7 +1,7 @@
|
||||
/* Automatically generated by configure - do not modify! */
|
||||
#ifndef FFMPEG_CONFIG_H
|
||||
#define FFMPEG_CONFIG_H
|
||||
#define FFMPEG_CONFIGURATION "--disable-static --enable-shared --disable-network --disable-debug --enable-memalign-hack --enable-gpl --disable-avx --disable-fma4 --disable-programs --extra-cflags=-I/d/kos/kolibri/programs/develop/libraries/newlib/include --disable-pthreads --disable-w32threads --extra-cflags=-U_Win32 --extra-cflags=-U_WIN32 --extra-cflags=-U__MINGW32__ --enable-runtime-cpudetect --disable-encoders --disable-muxers --disable-protocols --enable-protocol=file --disable-devices --disable-postproc --disable-avfilter --disable-hwaccels"
|
||||
#define FFMPEG_CONFIGURATION "--disable-static --enable-shared --disable-network --disable-debug --enable-memalign-hack --enable-gpl --disable-avx --disable-fma4 --disable-programs --extra-cflags=-I/d/kos/kolibri/programs/develop/libraries/newlib/include --disable-pthreads --disable-w32threads --extra-cflags=-U_Win32 --extra-cflags=-U_WIN32 --extra-cflags=-U__MINGW32__ --enable-runtime-cpudetect --disable-encoders --disable-muxers --disable-protocols --enable-protocol=file --disable-devices --disable-postproc --disable-avfilter --disable-hwaccels --disable-doc"
|
||||
#define FFMPEG_LICENSE "GPL version 2 or later"
|
||||
#define FFMPEG_DATADIR "/usr/local/share/ffmpeg"
|
||||
#define AVCONV_DATADIR "/usr/local/share/ffmpeg"
|
||||
|
@ -16,25 +16,25 @@ SRC_PATH:=$(SRC_PATH:.%=..%)
|
||||
endif
|
||||
CC_IDENT=gcc 4.8.1 (GCC)
|
||||
ARCH=x86
|
||||
CC=gcc
|
||||
CXX=g++
|
||||
AS=gcc
|
||||
LD=ld
|
||||
CC = kos32-gcc
|
||||
CXX= kos32-g++
|
||||
AS = kos32-gcc
|
||||
LD = kos32-ld
|
||||
DEPCC=gcc
|
||||
DEPCCFLAGS= $(CPPFLAGS)
|
||||
DEPAS=gcc
|
||||
DEPASFLAGS= $(CPPFLAGS)
|
||||
YASM=nasm
|
||||
DEPYASM=nasm
|
||||
AR=ar
|
||||
AR=kos32-ar
|
||||
ARFLAGS=rc
|
||||
AR_O=$@
|
||||
RANLIB=:
|
||||
STRIP=strip
|
||||
STRIP=kos32-strip
|
||||
CP=cp -p
|
||||
LN_S=ln -s -f
|
||||
CPPFLAGS= -D_ISOC99_SOURCE -D_FILE_OFFSET_BITS=32 -D_LARGEFILE_SOURCE -U__STRICT_ANSI__
|
||||
CFLAGS= -I../newlib/include -U_Win32 -U_WIN32 -U__MINGW32__ -std=c99 -fomit-frame-pointer -Wdeclaration-after-statement -Wall -Wno-parentheses -Wno-switch -Wno-format-zero-length -Wdisabled-optimization -Wpointer-arith -Wredundant-decls -Wno-pointer-sign -Wwrite-strings -Wtype-limits -Wundef -Wmissing-prototypes -Wno-pointer-to-int-cast -Wstrict-prototypes -O2 -fno-math-errno -fno-signed-zeros -fno-tree-vectorize -Werror=implicit-function-declaration -Werror=missing-prototypes -Werror=return-type -Werror=vla
|
||||
CFLAGS= -I../newlib/include -I../zlib -U_Win32 -U_WIN32 -U__MINGW32__ -std=c99 -fomit-frame-pointer -Wdeclaration-after-statement -Wall -Wno-parentheses -Wno-switch -Wno-format-zero-length -Wdisabled-optimization -Wpointer-arith -Wredundant-decls -Wno-pointer-sign -Wwrite-strings -Wtype-limits -Wundef -Wmissing-prototypes -Wno-pointer-to-int-cast -Wstrict-prototypes -O2 -fno-math-errno -fno-signed-zeros -fno-tree-vectorize -Werror=implicit-function-declaration -Werror=missing-prototypes -Werror=return-type -Werror=vla
|
||||
CXXFLAGS= -D__STDC_CONSTANT_MACROS
|
||||
ASFLAGS= -I../newlib/include -U_Win32 -U_WIN32 -U__MINGW32__
|
||||
AS_C=-c
|
||||
@ -48,9 +48,9 @@ LD_O=-o $@
|
||||
LD_LIB=-l%
|
||||
LD_PATH=-L
|
||||
DLLTOOL=dlltool
|
||||
LDFLAGS=-L../../lib --output-def $$(@:$(SLIBSUF)=.orig.def) -nostdlib --enable-runtime-pseudo-reloc
|
||||
LDFLAGS=-L../../lib -L/home/autobuild/tools/win32/mingw32/lib --output-def $$(@:$(SLIBSUF)=.orig.def) -nostdlib --enable-runtime-pseudo-reloc
|
||||
LDFLAGS-ffserver=-Wl,-E
|
||||
SHFLAGS=-shared -s -T../newlib/dll.lds -Map map -Bsymbolic --entry _DllStartup --image-base 0 --out-implib $(SUBDIR)lib$(SLIBNAME:$(SLIBSUF)=.dll.a) --version-script $(SUBDIR)lib$(NAME).ver -ldll
|
||||
SHFLAGS=-shared -s -T../newlib/dll.lds -Bsymbolic --entry _DllStartup --image-base 0 --out-implib $(SUBDIR)lib$(SLIBNAME:$(SLIBSUF)=.dll.a) --version-script $(SUBDIR)lib$(NAME).ver -ldll
|
||||
YASMFLAGS=-f win32 -DPREFIX
|
||||
BUILDSUF=
|
||||
PROGSSUF=
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,137 +0,0 @@
|
||||
LIBRARIES-$(CONFIG_AVUTIL) += libavutil
|
||||
LIBRARIES-$(CONFIG_SWSCALE) += libswscale
|
||||
LIBRARIES-$(CONFIG_SWRESAMPLE) += libswresample
|
||||
LIBRARIES-$(CONFIG_AVCODEC) += libavcodec
|
||||
LIBRARIES-$(CONFIG_AVFORMAT) += libavformat
|
||||
LIBRARIES-$(CONFIG_AVDEVICE) += libavdevice
|
||||
LIBRARIES-$(CONFIG_AVFILTER) += libavfilter
|
||||
|
||||
COMPONENTS-$(CONFIG_AVUTIL) += ffmpeg-utils
|
||||
COMPONENTS-$(CONFIG_SWSCALE) += ffmpeg-scaler
|
||||
COMPONENTS-$(CONFIG_SWRESAMPLE) += ffmpeg-resampler
|
||||
COMPONENTS-$(CONFIG_AVCODEC) += ffmpeg-codecs ffmpeg-bitstream-filters
|
||||
COMPONENTS-$(CONFIG_AVFORMAT) += ffmpeg-formats ffmpeg-protocols
|
||||
COMPONENTS-$(CONFIG_AVDEVICE) += ffmpeg-devices
|
||||
COMPONENTS-$(CONFIG_AVFILTER) += ffmpeg-filters
|
||||
|
||||
MANPAGES1 = $(PROGS-yes:%=doc/%.1) $(PROGS-yes:%=doc/%-all.1) $(COMPONENTS-yes:%=doc/%.1)
|
||||
MANPAGES3 = $(LIBRARIES-yes:%=doc/%.3)
|
||||
MANPAGES = $(MANPAGES1) $(MANPAGES3)
|
||||
PODPAGES = $(PROGS-yes:%=doc/%.pod) $(PROGS-yes:%=doc/%-all.pod) $(COMPONENTS-yes:%=doc/%.pod) $(LIBRARIES-yes:%=doc/%.pod)
|
||||
HTMLPAGES = $(PROGS-yes:%=doc/%.html) $(PROGS-yes:%=doc/%-all.html) $(COMPONENTS-yes:%=doc/%.html) $(LIBRARIES-yes:%=doc/%.html) \
|
||||
doc/developer.html \
|
||||
doc/faq.html \
|
||||
doc/fate.html \
|
||||
doc/general.html \
|
||||
doc/git-howto.html \
|
||||
doc/nut.html \
|
||||
doc/platform.html \
|
||||
|
||||
TXTPAGES = doc/fate.txt \
|
||||
|
||||
|
||||
DOCS-$(CONFIG_HTMLPAGES) += $(HTMLPAGES)
|
||||
DOCS-$(CONFIG_PODPAGES) += $(PODPAGES)
|
||||
DOCS-$(CONFIG_MANPAGES) += $(MANPAGES)
|
||||
DOCS-$(CONFIG_TXTPAGES) += $(TXTPAGES)
|
||||
DOCS = $(DOCS-yes)
|
||||
|
||||
all-$(CONFIG_DOC): doc
|
||||
|
||||
doc: documentation
|
||||
|
||||
apidoc: doc/doxy/html
|
||||
documentation: $(DOCS)
|
||||
|
||||
TEXIDEP = awk '/^@(verbatim)?include/ { printf "$@: $(@D)/%s\n", $$2 }' <$< >$(@:%=%.d)
|
||||
|
||||
doc/%.txt: TAG = TXT
|
||||
doc/%.txt: doc/%.texi
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)makeinfo --force --no-headers -o $@ $< 2>/dev/null
|
||||
|
||||
GENTEXI = format codec
|
||||
GENTEXI := $(GENTEXI:%=doc/avoptions_%.texi)
|
||||
|
||||
$(GENTEXI): TAG = GENTEXI
|
||||
$(GENTEXI): doc/avoptions_%.texi: doc/print_options$(HOSTEXESUF)
|
||||
$(M)doc/print_options $* > $@
|
||||
|
||||
doc/%.html: TAG = HTML
|
||||
doc/%.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)texi2html -I doc -monolithic --D=config-not-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
|
||||
|
||||
doc/%-all.html: TAG = HTML
|
||||
doc/%-all.html: doc/%.texi $(SRC_PATH)/doc/t2h.init $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)texi2html -I doc -monolithic --D=config-all --init-file $(SRC_PATH)/doc/t2h.init --output $@ $<
|
||||
|
||||
doc/%.pod: TAG = POD
|
||||
doc/%.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)perl $(SRC_PATH)/doc/texi2pod.pl -Dconfig-not-all=yes -Idoc $< $@
|
||||
|
||||
doc/%-all.pod: TAG = POD
|
||||
doc/%-all.pod: doc/%.texi $(SRC_PATH)/doc/texi2pod.pl $(GENTEXI)
|
||||
$(Q)$(TEXIDEP)
|
||||
$(M)perl $(SRC_PATH)/doc/texi2pod.pl -Dconfig-all=yes -Idoc $< $@
|
||||
|
||||
doc/%.1 doc/%.3: TAG = MAN
|
||||
doc/%.1: doc/%.pod $(GENTEXI)
|
||||
$(M)pod2man --section=1 --center=" " --release=" " $< > $@
|
||||
doc/%.3: doc/%.pod $(GENTEXI)
|
||||
$(M)pod2man --section=3 --center=" " --release=" " $< > $@
|
||||
|
||||
$(DOCS) doc/doxy/html: | doc/
|
||||
|
||||
doc/doxy/html: $(SRC_PATH)/doc/Doxyfile $(INSTHEADERS)
|
||||
$(M)$(SRC_PATH)/doc/doxy-wrapper.sh $(SRC_PATH) $^
|
||||
|
||||
install-doc: install-html install-man
|
||||
|
||||
install-html:
|
||||
|
||||
install-man:
|
||||
|
||||
ifdef CONFIG_HTMLPAGES
|
||||
install-progs-$(CONFIG_DOC): install-html
|
||||
|
||||
install-html: $(HTMLPAGES)
|
||||
$(Q)mkdir -p "$(DOCDIR)"
|
||||
$(INSTALL) -m 644 $(HTMLPAGES) "$(DOCDIR)"
|
||||
endif
|
||||
|
||||
ifdef CONFIG_MANPAGES
|
||||
install-progs-$(CONFIG_DOC): install-man
|
||||
|
||||
install-man: $(MANPAGES)
|
||||
$(Q)mkdir -p "$(MANDIR)/man1"
|
||||
$(INSTALL) -m 644 $(MANPAGES1) "$(MANDIR)/man1"
|
||||
$(Q)mkdir -p "$(MANDIR)/man3"
|
||||
$(INSTALL) -m 644 $(MANPAGES3) "$(MANDIR)/man3"
|
||||
endif
|
||||
|
||||
uninstall: uninstall-doc
|
||||
|
||||
uninstall-doc: uninstall-html uninstall-man
|
||||
|
||||
uninstall-html:
|
||||
$(RM) -r "$(DOCDIR)"
|
||||
|
||||
uninstall-man:
|
||||
$(RM) $(addprefix "$(MANDIR)/man1/",$(PROGS-yes:%=%.1) $(PROGS-yes:%=%-all.1) $(COMPONENTS-yes:%=%.1))
|
||||
$(RM) $(addprefix "$(MANDIR)/man3/",$(LIBRARIES-yes:%=%.3))
|
||||
|
||||
clean:: docclean
|
||||
|
||||
distclean:: docclean
|
||||
$(RM) doc/config.texi
|
||||
|
||||
docclean:
|
||||
$(RM) $(TXTPAGES) doc/*.html doc/*.pod doc/*.1 doc/*.3 $(CLEANSUFFIXES:%=doc/%) doc/avoptions_*.texi
|
||||
$(RM) -r doc/doxy/html
|
||||
|
||||
-include $(wildcard $(DOCS:%=%.d))
|
||||
|
||||
.PHONY: apidoc doc documentation
|
@ -1,16 +0,0 @@
|
||||
Release Notes
|
||||
=============
|
||||
|
||||
* 2.1 "Fourier" October, 2013
|
||||
|
||||
|
||||
General notes
|
||||
-------------
|
||||
See the Changelog file for a list of significant changes. Note, there
|
||||
are many more new features and bugfixes than whats listed there.
|
||||
|
||||
Bugreports against FFmpeg git master or the most recent FFmpeg release are
|
||||
accepted. If you are experiencing issues with any formally released version of
|
||||
FFmpeg, please try git master to check if the issue still exists. If it does,
|
||||
make your report against the development code following the usual bug reporting
|
||||
guidelines.
|
@ -1,11 +0,0 @@
|
||||
@chapter Authors
|
||||
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
@command{git log} in the FFmpeg source directory, or browsing the
|
||||
online repository at @url{http://source.ffmpeg.org}.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
@file{MAINTAINERS} in the source code tree.
|
@ -1,876 +0,0 @@
|
||||
@c DO NOT EDIT THIS FILE!
|
||||
@c It was generated by print_options.
|
||||
|
||||
@section Codec AVOptions
|
||||
@table @option
|
||||
@item -b[:stream_specifier] @var{integer} (@emph{output,audio,video})
|
||||
set bitrate (in bits/s)
|
||||
@item -ab[:stream_specifier] @var{integer} (@emph{output,audio})
|
||||
set bitrate (in bits/s)
|
||||
@item -bt[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
Set video bitrate tolerance (in bits/s). In 1-pass mode, bitrate tolerance specifies how far ratecontrol is willing to deviate from the target average bitrate value. This is not related to minimum/maximum bitrate. Lowering tolerance too much has an adverse effect on quality.
|
||||
@item -flags[:stream_specifier] @var{flags} (@emph{input/output,audio,video,subtitles})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item unaligned
|
||||
allow decoders to produce unaligned output
|
||||
@item mv4
|
||||
use four motion vectors per macroblock (MPEG-4)
|
||||
@item qpel
|
||||
use 1/4-pel motion compensation
|
||||
@item loop
|
||||
use loop filter
|
||||
@item qscale
|
||||
use fixed qscale
|
||||
@item gmc
|
||||
use gmc
|
||||
@item mv0
|
||||
always try a mb with mv=<0,0>
|
||||
@item input_preserved
|
||||
|
||||
@item pass1
|
||||
use internal 2-pass ratecontrol in first pass mode
|
||||
@item pass2
|
||||
use internal 2-pass ratecontrol in second pass mode
|
||||
@item gray
|
||||
only decode/encode grayscale
|
||||
@item emu_edge
|
||||
do not draw edges
|
||||
@item psnr
|
||||
error[?] variables will be set during encoding
|
||||
@item truncated
|
||||
|
||||
@item naq
|
||||
normalize adaptive quantization
|
||||
@item ildct
|
||||
use interlaced DCT
|
||||
@item low_delay
|
||||
force low delay
|
||||
@item global_header
|
||||
place global headers in extradata instead of every keyframe
|
||||
@item bitexact
|
||||
use only bitexact functions (except (I)DCT)
|
||||
@item aic
|
||||
H.263 advanced intra coding / MPEG-4 AC prediction
|
||||
@item ilme
|
||||
interlaced motion estimation
|
||||
@item cgop
|
||||
closed GOP
|
||||
@end table
|
||||
@item -me_method[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
set motion estimation method
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item zero
|
||||
zero motion estimation (fastest)
|
||||
@item full
|
||||
full motion estimation (slowest)
|
||||
@item epzs
|
||||
EPZS motion estimation (default)
|
||||
@item esa
|
||||
esa motion estimation (alias for full)
|
||||
@item tesa
|
||||
tesa motion estimation
|
||||
@item dia
|
||||
diamond motion estimation (alias for EPZS)
|
||||
@item log
|
||||
log motion estimation
|
||||
@item phods
|
||||
phods motion estimation
|
||||
@item x1
|
||||
X1 motion estimation
|
||||
@item hex
|
||||
hex motion estimation
|
||||
@item umh
|
||||
umh motion estimation
|
||||
@item iter
|
||||
iter motion estimation
|
||||
@end table
|
||||
@item -g[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
set the group of picture (GOP) size
|
||||
@item -ar[:stream_specifier] @var{integer} (@emph{input/output,audio})
|
||||
set audio sampling rate (in Hz)
|
||||
@item -ac[:stream_specifier] @var{integer} (@emph{input/output,audio})
|
||||
set number of audio channels
|
||||
@item -cutoff[:stream_specifier] @var{integer} (@emph{output,audio})
|
||||
set cutoff bandwidth
|
||||
@item -frame_size[:stream_specifier] @var{integer} (@emph{output,audio})
|
||||
@item -qcomp[:stream_specifier] @var{float} (@emph{output,video})
|
||||
video quantizer scale compression (VBR). Constant of ratecontrol equation. Recommended range for default rc_eq: 0.0-1.0
|
||||
@item -qblur[:stream_specifier] @var{float} (@emph{output,video})
|
||||
video quantizer scale blur (VBR)
|
||||
@item -qmin[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
minimum video quantizer scale (VBR)
|
||||
@item -qmax[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
maximum video quantizer scale (VBR)
|
||||
@item -qdiff[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
maximum difference between the quantizer scales (VBR)
|
||||
@item -bf[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
use 'frames' B frames
|
||||
@item -b_qfactor[:stream_specifier] @var{float} (@emph{output,video})
|
||||
QP factor between P- and B-frames
|
||||
@item -rc_strategy[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
ratecontrol method
|
||||
@item -b_strategy[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
strategy to choose between I/P/B-frames
|
||||
@item -ps[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
RTP payload size in bytes
|
||||
@item -bug[:stream_specifier] @var{flags} (@emph{input,video})
|
||||
work around not autodetected encoder bugs
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item autodetect
|
||||
|
||||
@item old_msmpeg4
|
||||
some old lavc-generated MSMPEG4v3 files (no autodetection)
|
||||
@item xvid_ilace
|
||||
Xvid interlacing bug (autodetected if FOURCC == XVIX)
|
||||
@item ump4
|
||||
(autodetected if FOURCC == UMP4)
|
||||
@item no_padding
|
||||
padding bug (autodetected)
|
||||
@item amv
|
||||
|
||||
@item ac_vlc
|
||||
illegal VLC bug (autodetected per FOURCC)
|
||||
@item qpel_chroma
|
||||
|
||||
@item std_qpel
|
||||
old standard qpel (autodetected per FOURCC/version)
|
||||
@item qpel_chroma2
|
||||
|
||||
@item direct_blocksize
|
||||
direct-qpel-blocksize bug (autodetected per FOURCC/version)
|
||||
@item edge
|
||||
edge padding bug (autodetected per FOURCC/version)
|
||||
@item hpel_chroma
|
||||
|
||||
@item dc_clip
|
||||
|
||||
@item ms
|
||||
work around various bugs in Microsoft's broken decoders
|
||||
@item trunc
|
||||
truncated frames
|
||||
@end table
|
||||
@item -strict[:stream_specifier] @var{integer} (@emph{input/output,audio,video})
|
||||
how strictly to follow the standards
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item very
|
||||
strictly conform to a older more strict version of the spec or reference software
|
||||
@item strict
|
||||
strictly conform to all the things in the spec no matter what the consequences
|
||||
@item normal
|
||||
|
||||
@item unofficial
|
||||
allow unofficial extensions
|
||||
@item experimental
|
||||
allow non-standardized experimental things
|
||||
@end table
|
||||
@item -b_qoffset[:stream_specifier] @var{float} (@emph{output,video})
|
||||
QP offset between P- and B-frames
|
||||
@item -err_detect[:stream_specifier] @var{flags} (@emph{input,audio,video})
|
||||
set error detection flags
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item crccheck
|
||||
verify embedded CRCs
|
||||
@item bitstream
|
||||
detect bitstream specification deviations
|
||||
@item buffer
|
||||
detect improper bitstream length
|
||||
@item explode
|
||||
abort decoding on minor error detection
|
||||
@item careful
|
||||
consider things that violate the spec, are fast to check and have not been seen in the wild as errors
|
||||
@item compliant
|
||||
consider all spec non compliancies as errors
|
||||
@item aggressive
|
||||
consider things that a sane encoder should not do as an error
|
||||
@end table
|
||||
@item -mpeg_quant[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
use MPEG quantizers instead of H.263
|
||||
@item -qsquish[:stream_specifier] @var{float} (@emph{output,video})
|
||||
how to keep quantizer between qmin and qmax (0 = clip, 1 = use differentiable function)
|
||||
@item -rc_qmod_amp[:stream_specifier] @var{float} (@emph{output,video})
|
||||
experimental quantizer modulation
|
||||
@item -rc_qmod_freq[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
experimental quantizer modulation
|
||||
@item -rc_eq[:stream_specifier] @var{string} (@emph{output,video})
|
||||
Set rate control equation. When computing the expression, besides the standard functions defined in the section 'Expression Evaluation', the following functions are available: bits2qp(bits), qp2bits(qp). Also the following constants are available: iTex pTex tex mv fCode iCount mcVar var isI isP isB avgQP qComp avgIITex avgPITex avgPPTex avgBPTex avgTex.
|
||||
@item -maxrate[:stream_specifier] @var{integer} (@emph{output,audio,video})
|
||||
Set maximum bitrate tolerance (in bits/s). Requires bufsize to be set.
|
||||
@item -minrate[:stream_specifier] @var{integer} (@emph{output,audio,video})
|
||||
Set minimum bitrate tolerance (in bits/s). Most useful in setting up a CBR encode. It is of little use otherwise.
|
||||
@item -bufsize[:stream_specifier] @var{integer} (@emph{output,audio,video})
|
||||
set ratecontrol buffer size (in bits)
|
||||
@item -rc_buf_aggressivity[:stream_specifier] @var{float} (@emph{output,video})
|
||||
currently useless
|
||||
@item -i_qfactor[:stream_specifier] @var{float} (@emph{output,video})
|
||||
QP factor between P- and I-frames
|
||||
@item -i_qoffset[:stream_specifier] @var{float} (@emph{output,video})
|
||||
QP offset between P- and I-frames
|
||||
@item -rc_init_cplx[:stream_specifier] @var{float} (@emph{output,video})
|
||||
initial complexity for 1-pass encoding
|
||||
@item -dct[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
DCT algorithm
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item auto
|
||||
autoselect a good one (default)
|
||||
@item fastint
|
||||
fast integer
|
||||
@item int
|
||||
accurate integer
|
||||
@item mmx
|
||||
|
||||
@item altivec
|
||||
|
||||
@item faan
|
||||
floating point AAN DCT
|
||||
@end table
|
||||
@item -lumi_mask[:stream_specifier] @var{float} (@emph{output,video})
|
||||
compresses bright areas stronger than medium ones
|
||||
@item -tcplx_mask[:stream_specifier] @var{float} (@emph{output,video})
|
||||
temporal complexity masking
|
||||
@item -scplx_mask[:stream_specifier] @var{float} (@emph{output,video})
|
||||
spatial complexity masking
|
||||
@item -p_mask[:stream_specifier] @var{float} (@emph{output,video})
|
||||
inter masking
|
||||
@item -dark_mask[:stream_specifier] @var{float} (@emph{output,video})
|
||||
compresses dark areas stronger than medium ones
|
||||
@item -idct[:stream_specifier] @var{integer} (@emph{input/output,video})
|
||||
select IDCT implementation
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item auto
|
||||
|
||||
@item int
|
||||
|
||||
@item simple
|
||||
|
||||
@item simplemmx
|
||||
|
||||
@item arm
|
||||
|
||||
@item altivec
|
||||
|
||||
@item sh4
|
||||
|
||||
@item simplearm
|
||||
|
||||
@item simplearmv5te
|
||||
|
||||
@item simplearmv6
|
||||
|
||||
@item simpleneon
|
||||
|
||||
@item simplealpha
|
||||
|
||||
@item ipp
|
||||
|
||||
@item xvidmmx
|
||||
|
||||
@item faani
|
||||
floating point AAN IDCT
|
||||
@end table
|
||||
@item -ec[:stream_specifier] @var{flags} (@emph{input,video})
|
||||
set error concealment strategy
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item guess_mvs
|
||||
iterative motion vector (MV) search (slow)
|
||||
@item deblock
|
||||
use strong deblock filter for damaged MBs
|
||||
@end table
|
||||
@item -pred[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
prediction method
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item left
|
||||
|
||||
@item plane
|
||||
|
||||
@item median
|
||||
|
||||
@end table
|
||||
@item -aspect[:stream_specifier] @var{rational number} (@emph{output,video})
|
||||
sample aspect ratio
|
||||
@item -debug[:stream_specifier] @var{flags} (@emph{input/output,audio,video,subtitles})
|
||||
print specific debug info
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item pict
|
||||
picture info
|
||||
@item rc
|
||||
rate control
|
||||
@item bitstream
|
||||
|
||||
@item mb_type
|
||||
macroblock (MB) type
|
||||
@item qp
|
||||
per-block quantization parameter (QP)
|
||||
@item mv
|
||||
motion vector
|
||||
@item dct_coeff
|
||||
|
||||
@item skip
|
||||
|
||||
@item startcode
|
||||
|
||||
@item pts
|
||||
|
||||
@item er
|
||||
error recognition
|
||||
@item mmco
|
||||
memory management control operations (H.264)
|
||||
@item bugs
|
||||
|
||||
@item vis_qp
|
||||
visualize quantization parameter (QP), lower QP are tinted greener
|
||||
@item vis_mb_type
|
||||
visualize block types
|
||||
@item buffers
|
||||
picture buffer allocations
|
||||
@item thread_ops
|
||||
threading operations
|
||||
@end table
|
||||
@item -vismv[:stream_specifier] @var{integer} (@emph{input,video})
|
||||
visualize motion vectors (MVs)
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item pf
|
||||
forward predicted MVs of P-frames
|
||||
@item bf
|
||||
forward predicted MVs of B-frames
|
||||
@item bb
|
||||
backward predicted MVs of B-frames
|
||||
@end table
|
||||
@item -cmp[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
full-pel ME compare function
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item sad
|
||||
sum of absolute differences, fast (default)
|
||||
@item sse
|
||||
sum of squared errors
|
||||
@item satd
|
||||
sum of absolute Hadamard transformed differences
|
||||
@item dct
|
||||
sum of absolute DCT transformed differences
|
||||
@item psnr
|
||||
sum of squared quantization errors (avoid, low quality)
|
||||
@item bit
|
||||
number of bits needed for the block
|
||||
@item rd
|
||||
rate distortion optimal, slow
|
||||
@item zero
|
||||
0
|
||||
@item vsad
|
||||
sum of absolute vertical differences
|
||||
@item vsse
|
||||
sum of squared vertical differences
|
||||
@item nsse
|
||||
noise preserving sum of squared differences
|
||||
@item dctmax
|
||||
|
||||
@item chroma
|
||||
|
||||
@end table
|
||||
@item -subcmp[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
sub-pel ME compare function
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item sad
|
||||
sum of absolute differences, fast (default)
|
||||
@item sse
|
||||
sum of squared errors
|
||||
@item satd
|
||||
sum of absolute Hadamard transformed differences
|
||||
@item dct
|
||||
sum of absolute DCT transformed differences
|
||||
@item psnr
|
||||
sum of squared quantization errors (avoid, low quality)
|
||||
@item bit
|
||||
number of bits needed for the block
|
||||
@item rd
|
||||
rate distortion optimal, slow
|
||||
@item zero
|
||||
0
|
||||
@item vsad
|
||||
sum of absolute vertical differences
|
||||
@item vsse
|
||||
sum of squared vertical differences
|
||||
@item nsse
|
||||
noise preserving sum of squared differences
|
||||
@item dctmax
|
||||
|
||||
@item chroma
|
||||
|
||||
@end table
|
||||
@item -mbcmp[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
macroblock compare function
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item sad
|
||||
sum of absolute differences, fast (default)
|
||||
@item sse
|
||||
sum of squared errors
|
||||
@item satd
|
||||
sum of absolute Hadamard transformed differences
|
||||
@item dct
|
||||
sum of absolute DCT transformed differences
|
||||
@item psnr
|
||||
sum of squared quantization errors (avoid, low quality)
|
||||
@item bit
|
||||
number of bits needed for the block
|
||||
@item rd
|
||||
rate distortion optimal, slow
|
||||
@item zero
|
||||
0
|
||||
@item vsad
|
||||
sum of absolute vertical differences
|
||||
@item vsse
|
||||
sum of squared vertical differences
|
||||
@item nsse
|
||||
noise preserving sum of squared differences
|
||||
@item dctmax
|
||||
|
||||
@item chroma
|
||||
|
||||
@end table
|
||||
@item -ildctcmp[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
interlaced DCT compare function
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item sad
|
||||
sum of absolute differences, fast (default)
|
||||
@item sse
|
||||
sum of squared errors
|
||||
@item satd
|
||||
sum of absolute Hadamard transformed differences
|
||||
@item dct
|
||||
sum of absolute DCT transformed differences
|
||||
@item psnr
|
||||
sum of squared quantization errors (avoid, low quality)
|
||||
@item bit
|
||||
number of bits needed for the block
|
||||
@item rd
|
||||
rate distortion optimal, slow
|
||||
@item zero
|
||||
0
|
||||
@item vsad
|
||||
sum of absolute vertical differences
|
||||
@item vsse
|
||||
sum of squared vertical differences
|
||||
@item nsse
|
||||
noise preserving sum of squared differences
|
||||
@item dctmax
|
||||
|
||||
@item chroma
|
||||
|
||||
@end table
|
||||
@item -dia_size[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
diamond type & size for motion estimation
|
||||
@item -last_pred[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
amount of motion predictors from the previous frame
|
||||
@item -preme[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
pre motion estimation
|
||||
@item -precmp[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
pre motion estimation compare function
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item sad
|
||||
sum of absolute differences, fast (default)
|
||||
@item sse
|
||||
sum of squared errors
|
||||
@item satd
|
||||
sum of absolute Hadamard transformed differences
|
||||
@item dct
|
||||
sum of absolute DCT transformed differences
|
||||
@item psnr
|
||||
sum of squared quantization errors (avoid, low quality)
|
||||
@item bit
|
||||
number of bits needed for the block
|
||||
@item rd
|
||||
rate distortion optimal, slow
|
||||
@item zero
|
||||
0
|
||||
@item vsad
|
||||
sum of absolute vertical differences
|
||||
@item vsse
|
||||
sum of squared vertical differences
|
||||
@item nsse
|
||||
noise preserving sum of squared differences
|
||||
@item dctmax
|
||||
|
||||
@item chroma
|
||||
|
||||
@end table
|
||||
@item -pre_dia_size[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
diamond type & size for motion estimation pre-pass
|
||||
@item -subq[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
sub-pel motion estimation quality
|
||||
@item -me_range[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
limit motion vectors range (1023 for DivX player)
|
||||
@item -ibias[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
intra quant bias
|
||||
@item -pbias[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
inter quant bias
|
||||
@item -global_quality[:stream_specifier] @var{integer} (@emph{output,audio,video})
|
||||
@item -coder[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item vlc
|
||||
variable length coder / Huffman coder
|
||||
@item ac
|
||||
arithmetic coder
|
||||
@item raw
|
||||
raw (no encoding)
|
||||
@item rle
|
||||
run-length coder
|
||||
@item deflate
|
||||
deflate-based coder
|
||||
@end table
|
||||
@item -context[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
context model
|
||||
@item -mbd[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
macroblock decision algorithm (high quality mode)
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item simple
|
||||
use mbcmp (default)
|
||||
@item bits
|
||||
use fewest bits
|
||||
@item rd
|
||||
use best rate distortion
|
||||
@end table
|
||||
@item -sc_threshold[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
scene change threshold
|
||||
@item -lmin[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
minimum Lagrange factor (VBR)
|
||||
@item -lmax[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
maximum Lagrange factor (VBR)
|
||||
@item -nr[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
noise reduction
|
||||
@item -rc_init_occupancy[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
number of bits which should be loaded into the rc buffer before decoding starts
|
||||
@item -flags2[:stream_specifier] @var{flags} (@emph{input/output,audio,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item fast
|
||||
allow non-spec-compliant speedup tricks
|
||||
@item noout
|
||||
skip bitstream encoding
|
||||
@item ignorecrop
|
||||
ignore cropping information from sps
|
||||
@item local_header
|
||||
place global headers at every keyframe instead of in extradata
|
||||
@item chunks
|
||||
Frame data might be split into multiple chunks
|
||||
@item showall
|
||||
Show all frames before the first keyframe
|
||||
@end table
|
||||
@item -error[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
@item -threads[:stream_specifier] @var{integer} (@emph{input/output,audio,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item auto
|
||||
autodetect a suitable number of threads to use
|
||||
@end table
|
||||
@item -me_threshold[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
motion estimation threshold
|
||||
@item -mb_threshold[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
macroblock threshold
|
||||
@item -dc[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
intra_dc_precision
|
||||
@item -nssew[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
nsse weight
|
||||
@item -skip_top[:stream_specifier] @var{integer} (@emph{input,video})
|
||||
number of macroblock rows at the top which are skipped
|
||||
@item -skip_bottom[:stream_specifier] @var{integer} (@emph{input,video})
|
||||
number of macroblock rows at the bottom which are skipped
|
||||
@item -profile[:stream_specifier] @var{integer} (@emph{output,audio,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item unknown
|
||||
|
||||
@item aac_main
|
||||
|
||||
@item aac_low
|
||||
|
||||
@item aac_ssr
|
||||
|
||||
@item aac_ltp
|
||||
|
||||
@item aac_he
|
||||
|
||||
@item aac_he_v2
|
||||
|
||||
@item aac_ld
|
||||
|
||||
@item aac_eld
|
||||
|
||||
@item mpeg2_aac_low
|
||||
|
||||
@item mpeg2_aac_he
|
||||
|
||||
@item dts
|
||||
|
||||
@item dts_es
|
||||
|
||||
@item dts_96_24
|
||||
|
||||
@item dts_hd_hra
|
||||
|
||||
@item dts_hd_ma
|
||||
|
||||
@end table
|
||||
@item -level[:stream_specifier] @var{integer} (@emph{output,audio,video})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item unknown
|
||||
|
||||
@end table
|
||||
@item -lowres[:stream_specifier] @var{integer} (@emph{input,audio,video})
|
||||
decode at 1= 1/2, 2=1/4, 3=1/8 resolutions
|
||||
@item -skip_threshold[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
frame skip threshold
|
||||
@item -skip_factor[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
frame skip factor
|
||||
@item -skip_exp[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
frame skip exponent
|
||||
@item -skipcmp[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
frame skip compare function
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item sad
|
||||
sum of absolute differences, fast (default)
|
||||
@item sse
|
||||
sum of squared errors
|
||||
@item satd
|
||||
sum of absolute Hadamard transformed differences
|
||||
@item dct
|
||||
sum of absolute DCT transformed differences
|
||||
@item psnr
|
||||
sum of squared quantization errors (avoid, low quality)
|
||||
@item bit
|
||||
number of bits needed for the block
|
||||
@item rd
|
||||
rate distortion optimal, slow
|
||||
@item zero
|
||||
0
|
||||
@item vsad
|
||||
sum of absolute vertical differences
|
||||
@item vsse
|
||||
sum of squared vertical differences
|
||||
@item nsse
|
||||
noise preserving sum of squared differences
|
||||
@item dctmax
|
||||
|
||||
@item chroma
|
||||
|
||||
@end table
|
||||
@item -border_mask[:stream_specifier] @var{float} (@emph{output,video})
|
||||
increase the quantizer for macroblocks close to borders
|
||||
@item -mblmin[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
minimum macroblock Lagrange factor (VBR)
|
||||
@item -mblmax[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
maximum macroblock Lagrange factor (VBR)
|
||||
@item -mepc[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
motion estimation bitrate penalty compensation (1.0 = 256)
|
||||
@item -skip_loop_filter[:stream_specifier] @var{integer} (@emph{input,video})
|
||||
skip loop filtering process for the selected frames
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item none
|
||||
discard no frame
|
||||
@item default
|
||||
discard useless frames
|
||||
@item noref
|
||||
discard all non-reference frames
|
||||
@item bidir
|
||||
discard all bidirectional frames
|
||||
@item nokey
|
||||
discard all frames except keyframes
|
||||
@item all
|
||||
discard all frames
|
||||
@end table
|
||||
@item -skip_idct[:stream_specifier] @var{integer} (@emph{input,video})
|
||||
skip IDCT/dequantization for the selected frames
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item none
|
||||
discard no frame
|
||||
@item default
|
||||
discard useless frames
|
||||
@item noref
|
||||
discard all non-reference frames
|
||||
@item bidir
|
||||
discard all bidirectional frames
|
||||
@item nokey
|
||||
discard all frames except keyframes
|
||||
@item all
|
||||
discard all frames
|
||||
@end table
|
||||
@item -skip_frame[:stream_specifier] @var{integer} (@emph{input,video})
|
||||
skip decoding for the selected frames
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item none
|
||||
discard no frame
|
||||
@item default
|
||||
discard useless frames
|
||||
@item noref
|
||||
discard all non-reference frames
|
||||
@item bidir
|
||||
discard all bidirectional frames
|
||||
@item nokey
|
||||
discard all frames except keyframes
|
||||
@item all
|
||||
discard all frames
|
||||
@end table
|
||||
@item -bidir_refine[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
refine the two motion vectors used in bidirectional macroblocks
|
||||
@item -brd_scale[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
downscale frames for dynamic B-frame decision
|
||||
@item -keyint_min[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
minimum interval between IDR-frames
|
||||
@item -refs[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
reference frames to consider for motion compensation
|
||||
@item -chromaoffset[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
chroma QP offset from luma
|
||||
@item -trellis[:stream_specifier] @var{integer} (@emph{output,audio,video})
|
||||
rate-distortion optimal quantization
|
||||
@item -sc_factor[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
multiplied by qscale for each frame and added to scene_change_score
|
||||
@item -mv0_threshold[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
@item -b_sensitivity[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
adjust sensitivity of b_frame_strategy 1
|
||||
@item -compression_level[:stream_specifier] @var{integer} (@emph{output,audio,video})
|
||||
@item -min_prediction_order[:stream_specifier] @var{integer} (@emph{output,audio})
|
||||
@item -max_prediction_order[:stream_specifier] @var{integer} (@emph{output,audio})
|
||||
@item -timecode_frame_start[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
GOP timecode frame start number, in non-drop-frame format
|
||||
@item -request_channels[:stream_specifier] @var{integer} (@emph{input,audio})
|
||||
set desired number of audio channels
|
||||
@item -channel_layout[:stream_specifier] @var{integer} (@emph{input/output,audio})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@end table
|
||||
@item -request_channel_layout[:stream_specifier] @var{integer} (@emph{input,audio})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@end table
|
||||
@item -rc_max_vbv_use[:stream_specifier] @var{float} (@emph{output,video})
|
||||
@item -rc_min_vbv_use[:stream_specifier] @var{float} (@emph{output,video})
|
||||
@item -ticks_per_frame[:stream_specifier] @var{integer} (@emph{input/output,audio,video})
|
||||
@item -color_primaries[:stream_specifier] @var{integer} (@emph{input/output,video})
|
||||
@item -color_trc[:stream_specifier] @var{integer} (@emph{input/output,video})
|
||||
@item -colorspace[:stream_specifier] @var{integer} (@emph{input/output,video})
|
||||
@item -color_range[:stream_specifier] @var{integer} (@emph{input/output,video})
|
||||
@item -chroma_sample_location[:stream_specifier] @var{integer} (@emph{input/output,video})
|
||||
@item -slices[:stream_specifier] @var{integer} (@emph{output,video})
|
||||
number of slices, used in parallelized encoding
|
||||
@item -thread_type[:stream_specifier] @var{flags} (@emph{input/output,audio,video})
|
||||
select multithreading type
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item slice
|
||||
|
||||
@item frame
|
||||
|
||||
@end table
|
||||
@item -audio_service_type[:stream_specifier] @var{integer} (@emph{output,audio})
|
||||
audio service type
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item ma
|
||||
Main Audio Service
|
||||
@item ef
|
||||
Effects
|
||||
@item vi
|
||||
Visually Impaired
|
||||
@item hi
|
||||
Hearing Impaired
|
||||
@item di
|
||||
Dialogue
|
||||
@item co
|
||||
Commentary
|
||||
@item em
|
||||
Emergency
|
||||
@item vo
|
||||
Voice Over
|
||||
@item ka
|
||||
Karaoke
|
||||
@end table
|
||||
@item -request_sample_fmt[:stream_specifier] @var{value} (@emph{input,audio})
|
||||
sample format audio decoders should prefer
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@end table
|
||||
@item -sub_charenc[:stream_specifier] @var{string} (@emph{input,subtitles})
|
||||
set input text subtitles character encoding
|
||||
@item -sub_charenc_mode[:stream_specifier] @var{flags} (@emph{input,subtitles})
|
||||
set input text subtitles character encoding mode
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item do_nothing
|
||||
|
||||
@item auto
|
||||
|
||||
@item pre_decoder
|
||||
|
||||
@end table
|
||||
@item -refcounted_frames[:stream_specifier] @var{integer} (@emph{input,audio,video})
|
||||
@item -skip_alpha[:stream_specifier] @var{integer} (@emph{input,video})
|
||||
Skip processing alpha
|
||||
@item -field_order[:stream_specifier] @var{integer} (@emph{input/output,video})
|
||||
Field order
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item progressive
|
||||
|
||||
@item tt
|
||||
|
||||
@item bb
|
||||
|
||||
@item tb
|
||||
|
||||
@item bt
|
||||
|
||||
@end table
|
||||
@end table
|
@ -1,122 +0,0 @@
|
||||
@c DO NOT EDIT THIS FILE!
|
||||
@c It was generated by print_options.
|
||||
|
||||
@section Format AVOptions
|
||||
@table @option
|
||||
@item -avioflags @var{flags} (@emph{input/output})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item direct
|
||||
reduce buffering
|
||||
@end table
|
||||
@item -probesize @var{integer} (@emph{input})
|
||||
set probing size
|
||||
@item -packetsize @var{integer} (@emph{output})
|
||||
set packet size
|
||||
@item -fflags @var{flags} (@emph{input/output})
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item flush_packets
|
||||
reduce the latency by flushing out packets immediately
|
||||
@item ignidx
|
||||
ignore index
|
||||
@item genpts
|
||||
generate pts
|
||||
@item nofillin
|
||||
do not fill in missing values that can be exactly calculated
|
||||
@item noparse
|
||||
disable AVParsers, this needs nofillin too
|
||||
@item igndts
|
||||
ignore dts
|
||||
@item discardcorrupt
|
||||
discard corrupted frames
|
||||
@item sortdts
|
||||
try to interleave outputted packets by dts
|
||||
@item keepside
|
||||
don't merge side data
|
||||
@item latm
|
||||
enable RTP MP4A-LATM payload
|
||||
@item nobuffer
|
||||
reduce the latency introduced by optional buffering
|
||||
@end table
|
||||
@item -seek2any @var{integer} (@emph{input})
|
||||
allow seeking to non-keyframes on demuxer level when supported
|
||||
@item -analyzeduration @var{integer} (@emph{input})
|
||||
specify how many microseconds are analyzed to probe the input
|
||||
@item -cryptokey @var{hexadecimal string} (@emph{input})
|
||||
decryption key
|
||||
@item -indexmem @var{integer} (@emph{input})
|
||||
max memory used for timestamp index (per stream)
|
||||
@item -rtbufsize @var{integer} (@emph{input})
|
||||
max memory used for buffering real-time frames
|
||||
@item -fdebug @var{flags} (@emph{input/output})
|
||||
print specific debug info
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item ts
|
||||
|
||||
@end table
|
||||
@item -max_delay @var{integer} (@emph{input/output})
|
||||
maximum muxing or demuxing delay in microseconds
|
||||
@item -fpsprobesize @var{integer} (@emph{input})
|
||||
number of frames used to probe fps
|
||||
@item -audio_preload @var{integer} (@emph{output})
|
||||
microseconds by which audio packets should be interleaved earlier
|
||||
@item -chunk_duration @var{integer} (@emph{output})
|
||||
microseconds for each chunk
|
||||
@item -chunk_size @var{integer} (@emph{output})
|
||||
size in bytes for each chunk
|
||||
@item -f_err_detect @var{flags} (@emph{input})
|
||||
set error detection flags (deprecated; use err_detect, save via avconv)
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item crccheck
|
||||
verify embedded CRCs
|
||||
@item bitstream
|
||||
detect bitstream specification deviations
|
||||
@item buffer
|
||||
detect improper bitstream length
|
||||
@item explode
|
||||
abort decoding on minor error detection
|
||||
@item careful
|
||||
consider things that violate the spec, are fast to check and have not been seen in the wild as errors
|
||||
@item compliant
|
||||
consider all spec non compliancies as errors
|
||||
@item aggressive
|
||||
consider things that a sane encoder shouldn't do as an error
|
||||
@end table
|
||||
@item -err_detect @var{flags} (@emph{input})
|
||||
set error detection flags
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item crccheck
|
||||
verify embedded CRCs
|
||||
@item bitstream
|
||||
detect bitstream specification deviations
|
||||
@item buffer
|
||||
detect improper bitstream length
|
||||
@item explode
|
||||
abort decoding on minor error detection
|
||||
@item careful
|
||||
consider things that violate the spec, are fast to check and have not been seen in the wild as errors
|
||||
@item compliant
|
||||
consider all spec non compliancies as errors
|
||||
@item aggressive
|
||||
consider things that a sane encoder shouldn't do as an error
|
||||
@end table
|
||||
@item -use_wallclock_as_timestamps @var{integer} (@emph{input})
|
||||
use wallclock as timestamps
|
||||
@item -avoid_negative_ts @var{integer} (@emph{output})
|
||||
shift timestamps to make them non-negative. 1 enables, 0 disables, default of -1 enables when required by target format.
|
||||
@item -skip_initial_bytes @var{integer} (@emph{input})
|
||||
set number of bytes to skip before reading header and frames
|
||||
@item -correct_ts_overflow @var{integer} (@emph{input})
|
||||
correct single timestamp overflows
|
||||
@item -flush_packets @var{integer} (@emph{output})
|
||||
enable flushing of the I/O context after each packet
|
||||
@end table
|
@ -1,36 +0,0 @@
|
||||
AVUtil
|
||||
======
|
||||
libavutil is a small lightweight library of generally useful functions.
|
||||
It is not a library for code needed by both libavcodec and libavformat.
|
||||
|
||||
|
||||
Overview:
|
||||
=========
|
||||
adler32.c adler32 checksum
|
||||
aes.c AES encryption and decryption
|
||||
fifo.c resizeable first in first out buffer
|
||||
intfloat_readwrite.c portable reading and writing of floating point values
|
||||
log.c "printf" with context and level
|
||||
md5.c MD5 Message-Digest Algorithm
|
||||
rational.c code to perform exact calculations with rational numbers
|
||||
tree.c generic AVL tree
|
||||
crc.c generic CRC checksumming code
|
||||
integer.c 128bit integer math
|
||||
lls.c
|
||||
mathematics.c greatest common divisor, integer sqrt, integer log2, ...
|
||||
mem.c memory allocation routines with guaranteed alignment
|
||||
|
||||
Headers:
|
||||
bswap.h big/little/native-endian conversion code
|
||||
x86_cpu.h a few useful macros for unifying x86-64 and x86-32 code
|
||||
avutil.h
|
||||
common.h
|
||||
intreadwrite.h reading and writing of unaligned big/little/native-endian integers
|
||||
|
||||
|
||||
Goals:
|
||||
======
|
||||
* Modular (few interdependencies and the possibility of disabling individual parts during ./configure)
|
||||
* Small (source and object)
|
||||
* Efficient (low CPU and memory usage)
|
||||
* Useful (avoid useless features almost no one needs)
|
@ -1,128 +0,0 @@
|
||||
@chapter Bitstream Filters
|
||||
@c man begin BITSTREAM FILTERS
|
||||
|
||||
When you configure your FFmpeg build, all the supported bitstream
|
||||
filters are enabled by default. You can list all available ones using
|
||||
the configure option @code{--list-bsfs}.
|
||||
|
||||
You can disable all the bitstream filters using the configure option
|
||||
@code{--disable-bsfs}, and selectively enable any bitstream filter using
|
||||
the option @code{--enable-bsf=BSF}, or you can disable a particular
|
||||
bitstream filter using the option @code{--disable-bsf=BSF}.
|
||||
|
||||
The option @code{-bsfs} of the ff* tools will display the list of
|
||||
all the supported bitstream filters included in your build.
|
||||
|
||||
Below is a description of the currently available bitstream filters.
|
||||
|
||||
@section aac_adtstoasc
|
||||
|
||||
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
|
||||
bitstream filter.
|
||||
|
||||
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
|
||||
ADTS header and removes the ADTS header.
|
||||
|
||||
This is required for example when copying an AAC stream from a raw
|
||||
ADTS AAC container to a FLV or a MOV/MP4 file.
|
||||
|
||||
@section chomp
|
||||
|
||||
Remove zero padding at the end of a packet.
|
||||
|
||||
@section dump_extra
|
||||
|
||||
Add extradata to the beginning of the filtered packets.
|
||||
|
||||
The additional argument specifies which packets should be filtered.
|
||||
It accepts the values:
|
||||
@table @samp
|
||||
@item a
|
||||
add extradata to all key packets, but only if @var{local_header} is
|
||||
set in the @option{flags2} codec context field
|
||||
|
||||
@item k
|
||||
add extradata to all key packets
|
||||
|
||||
@item e
|
||||
add extradata to all packets
|
||||
@end table
|
||||
|
||||
If not specified it is assumed @samp{k}.
|
||||
|
||||
For example the following @command{ffmpeg} command forces a global
|
||||
header (thus disabling individual packet headers) in the H.264 packets
|
||||
generated by the @code{libx264} encoder, but corrects them by adding
|
||||
the header stored in extradata to the key packets:
|
||||
@example
|
||||
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
|
||||
@end example
|
||||
|
||||
@section h264_mp4toannexb
|
||||
|
||||
Convert an H.264 bitstream from length prefixed mode to start code
|
||||
prefixed mode (as defined in the Annex B of the ITU-T H.264
|
||||
specification).
|
||||
|
||||
This is required by some streaming formats, typically the MPEG-2
|
||||
transport stream format ("mpegts").
|
||||
|
||||
For example to remux an MP4 file containing an H.264 stream to mpegts
|
||||
format with @command{ffmpeg}, you can use the command:
|
||||
|
||||
@example
|
||||
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
|
||||
@end example
|
||||
|
||||
@section imx_dump_header
|
||||
|
||||
@section mjpeg2jpeg
|
||||
|
||||
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
|
||||
|
||||
MJPEG is a video codec wherein each video frame is essentially a
|
||||
JPEG image. The individual frames can be extracted without loss,
|
||||
e.g. by
|
||||
|
||||
@example
|
||||
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
|
||||
@end example
|
||||
|
||||
Unfortunately, these chunks are incomplete JPEG images, because
|
||||
they lack the DHT segment required for decoding. Quoting from
|
||||
@url{http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml}:
|
||||
|
||||
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
|
||||
commented that "MJPEG, or at least the MJPEG in AVIs having the
|
||||
MJPG fourcc, is restricted JPEG with a fixed -- and *omitted* --
|
||||
Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
|
||||
and it must use basic Huffman encoding, not arithmetic or
|
||||
progressive. . . . You can indeed extract the MJPEG frames and
|
||||
decode them with a regular JPEG decoder, but you have to prepend
|
||||
the DHT segment to them, or else the decoder won't have any idea
|
||||
how to decompress the data. The exact table necessary is given in
|
||||
the OpenDML spec."
|
||||
|
||||
This bitstream filter patches the header of frames extracted from an MJPEG
|
||||
stream (carrying the AVI1 header ID and lacking a DHT segment) to
|
||||
produce fully qualified JPEG images.
|
||||
|
||||
@example
|
||||
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
|
||||
exiftran -i -9 frame*.jpg
|
||||
ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
|
||||
@end example
|
||||
|
||||
@section mjpega_dump_header
|
||||
|
||||
@section movsub
|
||||
|
||||
@section mp3_header_compress
|
||||
|
||||
@section mp3_header_decompress
|
||||
|
||||
@section noise
|
||||
|
||||
@section remove_extra
|
||||
|
||||
@c man end BITSTREAM FILTERS
|
@ -1,50 +0,0 @@
|
||||
FFmpeg currently uses a custom build system, this text attempts to document
|
||||
some of its obscure features and options.
|
||||
|
||||
Makefile variables:
|
||||
|
||||
V
|
||||
Disable the default terse mode, the full command issued by make and its
|
||||
output will be shown on the screen.
|
||||
|
||||
DESTDIR
|
||||
Destination directory for the install targets, useful to prepare packages
|
||||
or install FFmpeg in cross-environments.
|
||||
|
||||
Makefile targets:
|
||||
|
||||
all
|
||||
Default target, builds all the libraries and the executables.
|
||||
|
||||
fate
|
||||
Run the fate test suite, note you must have installed it
|
||||
|
||||
fate-list
|
||||
Will list all fate/regression test targets
|
||||
|
||||
install
|
||||
Install headers, libraries and programs.
|
||||
|
||||
libavformat/output-example
|
||||
Build the libavformat basic example.
|
||||
|
||||
libavcodec/api-example
|
||||
Build the libavcodec basic example.
|
||||
|
||||
libswscale/swscale-test
|
||||
Build the swscale self-test (useful also as example).
|
||||
|
||||
|
||||
Useful standard make commands:
|
||||
make -t <target>
|
||||
Touch all files that otherwise would be build, this is useful to reduce
|
||||
unneeded rebuilding when changing headers, but note you must force rebuilds
|
||||
of files that actually need it by hand then.
|
||||
|
||||
make -j<num>
|
||||
rebuild with multiple jobs at the same time. Faster on multi processor systems
|
||||
|
||||
make -k
|
||||
continue build in case of errors, this is useful for the regression tests
|
||||
sometimes but note it will still not run all reg tests.
|
||||
|
File diff suppressed because it is too large
Load Diff
File diff suppressed because it is too large
Load Diff
@ -1,202 +0,0 @@
|
||||
@chapter Decoders
|
||||
@c man begin DECODERS
|
||||
|
||||
Decoders are configured elements in FFmpeg which allow the decoding of
|
||||
multimedia streams.
|
||||
|
||||
When you configure your FFmpeg build, all the supported native decoders
|
||||
are enabled by default. Decoders requiring an external library must be enabled
|
||||
manually via the corresponding @code{--enable-lib} option. You can list all
|
||||
available decoders using the configure option @code{--list-decoders}.
|
||||
|
||||
You can disable all the decoders with the configure option
|
||||
@code{--disable-decoders} and selectively enable / disable single decoders
|
||||
with the options @code{--enable-decoder=@var{DECODER}} /
|
||||
@code{--disable-decoder=@var{DECODER}}.
|
||||
|
||||
The option @code{-codecs} of the ff* tools will display the list of
|
||||
enabled decoders.
|
||||
|
||||
@c man end DECODERS
|
||||
|
||||
@chapter Video Decoders
|
||||
@c man begin VIDEO DECODERS
|
||||
|
||||
A description of some of the currently available video decoders
|
||||
follows.
|
||||
|
||||
@section rawvideo
|
||||
|
||||
Raw video decoder.
|
||||
|
||||
This decoder decodes rawvideo streams.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item top @var{top_field_first}
|
||||
Specify the assumed field type of the input video.
|
||||
@table @option
|
||||
@item -1
|
||||
the video is assumed to be progressive (default)
|
||||
@item 0
|
||||
bottom-field-first is assumed
|
||||
@item 1
|
||||
top-field-first is assumed
|
||||
@end table
|
||||
|
||||
@end table
|
||||
|
||||
@c man end VIDEO DECODERS
|
||||
|
||||
@chapter Audio Decoders
|
||||
@c man begin AUDIO DECODERS
|
||||
|
||||
@section ffwavesynth
|
||||
|
||||
Internal wave synthetizer.
|
||||
|
||||
This decoder generates wave patterns according to predefined sequences. Its
|
||||
use is purely internal and the format of the data it accepts is not publicly
|
||||
documented.
|
||||
|
||||
@section libcelt
|
||||
|
||||
libcelt decoder wrapper.
|
||||
|
||||
libcelt allows libavcodec to decode the Xiph CELT ultra-low delay audio codec.
|
||||
Requires the presence of the libcelt headers and library during configuration.
|
||||
You need to explicitly configure the build with @code{--enable-libcelt}.
|
||||
|
||||
@section libgsm
|
||||
|
||||
libgsm decoder wrapper.
|
||||
|
||||
libgsm allows libavcodec to decode the GSM full rate audio codec. Requires
|
||||
the presence of the libgsm headers and library during configuration. You need
|
||||
to explicitly configure the build with @code{--enable-libgsm}.
|
||||
|
||||
This decoder supports both the ordinary GSM and the Microsoft variant.
|
||||
|
||||
@section libilbc
|
||||
|
||||
libilbc decoder wrapper.
|
||||
|
||||
libilbc allows libavcodec to decode the Internet Low Bitrate Codec (iLBC)
|
||||
audio codec. Requires the presence of the libilbc headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libilbc}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
The following option is supported by the libilbc wrapper.
|
||||
|
||||
@table @option
|
||||
@item enhance
|
||||
|
||||
Enable the enhancement of the decoded audio when set to 1. The default
|
||||
value is 0 (disabled).
|
||||
|
||||
@end table
|
||||
|
||||
@section libopencore-amrnb
|
||||
|
||||
libopencore-amrnb decoder wrapper.
|
||||
|
||||
libopencore-amrnb allows libavcodec to decode the Adaptive Multi-Rate
|
||||
Narrowband audio codec. Using it requires the presence of the
|
||||
libopencore-amrnb headers and library during configuration. You need to
|
||||
explicitly configure the build with @code{--enable-libopencore-amrnb}.
|
||||
|
||||
An FFmpeg native decoder for AMR-NB exists, so users can decode AMR-NB
|
||||
without this library.
|
||||
|
||||
@section libopencore-amrwb
|
||||
|
||||
libopencore-amrwb decoder wrapper.
|
||||
|
||||
libopencore-amrwb allows libavcodec to decode the Adaptive Multi-Rate
|
||||
Wideband audio codec. Using it requires the presence of the
|
||||
libopencore-amrwb headers and library during configuration. You need to
|
||||
explicitly configure the build with @code{--enable-libopencore-amrwb}.
|
||||
|
||||
An FFmpeg native decoder for AMR-WB exists, so users can decode AMR-WB
|
||||
without this library.
|
||||
|
||||
@section libopus
|
||||
|
||||
libopus decoder wrapper.
|
||||
|
||||
libopus allows libavcodec to decode the Opus Interactive Audio Codec.
|
||||
Requires the presence of the libopus headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libopus}.
|
||||
|
||||
@c man end AUDIO DECODERS
|
||||
|
||||
@chapter Subtitles Decoders
|
||||
@c man begin SUBTILES DECODERS
|
||||
|
||||
@section dvdsub
|
||||
|
||||
This codec decodes the bitmap subtitles used in DVDs; the same subtitles can
|
||||
also be found in VobSub file pairs and in some Matroska files.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item palette
|
||||
Specify the global palette used by the bitmaps. When stored in VobSub, the
|
||||
palette is normally specified in the index file; in Matroska, the palette is
|
||||
stored in the codec extra-data in the same format as in VobSub. In DVDs, the
|
||||
palette is stored in the IFO file, and therefore not available when reading
|
||||
from dumped VOB files.
|
||||
|
||||
The format for this option is a string containing 16 24-bits hexadecimal
|
||||
numbers (without 0x prefix) separated by comas, for example @code{0d00ee,
|
||||
ee450d, 101010, eaeaea, 0ce60b, ec14ed, ebff0b, 0d617a, 7b7b7b, d1d1d1,
|
||||
7b2a0e, 0d950c, 0f007b, cf0dec, cfa80c, 7c127b}.
|
||||
@end table
|
||||
|
||||
@section libzvbi-teletext
|
||||
|
||||
Libzvbi allows libavcodec to decode DVB teletext pages and DVB teletext
|
||||
subtitles. Requires the presence of the libzvbi headers and library during
|
||||
configuration. You need to explicitly configure the build with
|
||||
@code{--enable-libzvbi}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item txt_page
|
||||
List of teletext page numbers to decode. You may use the special * string to
|
||||
match all pages. Pages that do not match the specified list are dropped.
|
||||
Default value is *.
|
||||
@item txt_chop_top
|
||||
Discards the top teletext line. Default value is 1.
|
||||
@item txt_format
|
||||
Specifies the format of the decoded subtitles. The teletext decoder is capable
|
||||
of decoding the teletext pages to bitmaps or to simple text, you should use
|
||||
"bitmap" for teletext pages, because certain graphics and colors cannot be
|
||||
expressed in simple text. You might use "text" for teletext based subtitles if
|
||||
your application can handle simple text based subtitles. Default value is
|
||||
bitmap.
|
||||
@item txt_left
|
||||
X offset of generated bitmaps, default is 0.
|
||||
@item txt_top
|
||||
Y offset of generated bitmaps, default is 0.
|
||||
@item txt_chop_spaces
|
||||
Chops leading and trailing spaces and removes empty lines from the generated
|
||||
text. This option is useful for teletext based subtitles where empty spaces may
|
||||
be present at the start or at the end of the lines or empty lines may be
|
||||
present between the subtitle lines because of double-sized teletext charactes.
|
||||
Default value is 1.
|
||||
@item txt_duration
|
||||
Sets the display duration of the decoded teletext pages or subtitles in
|
||||
miliseconds. Default value is 30000 which is 30 seconds.
|
||||
@item txt_transparent
|
||||
Force transparent background of the generated teletext bitmaps. Default value
|
||||
is 0 which means an opaque (black) background.
|
||||
@end table
|
||||
|
||||
@c man end SUBTILES DECODERS
|
@ -1,165 +0,0 @@
|
||||
a.summary-letter {
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
a {
|
||||
color: #2D6198;
|
||||
}
|
||||
|
||||
a:visited {
|
||||
color: #884488;
|
||||
}
|
||||
|
||||
#banner {
|
||||
background-color: white;
|
||||
position: relative;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#banner img {
|
||||
padding-bottom: 1px;
|
||||
padding-top: 5px;
|
||||
}
|
||||
|
||||
#body {
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
body {
|
||||
background-color: #313131;
|
||||
margin: 0;
|
||||
text-align: justify;
|
||||
}
|
||||
|
||||
.center {
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#container {
|
||||
background-color: white;
|
||||
color: #202020;
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
#footer {
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
h1 a, h2 a, h3 a, h4 a {
|
||||
text-decoration: inherit;
|
||||
color: inherit;
|
||||
}
|
||||
|
||||
h1, h2, h3, h4 {
|
||||
padding-left: 0.4em;
|
||||
border-radius: 4px;
|
||||
padding-bottom: 0.25em;
|
||||
padding-top: 0.25em;
|
||||
border: 1px solid #6A996A;
|
||||
}
|
||||
|
||||
h1 {
|
||||
background-color: #7BB37B;
|
||||
color: #151515;
|
||||
font-size: 1.2em;
|
||||
padding-bottom: 0.3em;
|
||||
padding-top: 0.3em;
|
||||
}
|
||||
|
||||
h2 {
|
||||
color: #313131;
|
||||
font-size: 1.0em;
|
||||
background-color: #ABE3AB;
|
||||
}
|
||||
|
||||
h3 {
|
||||
color: #313131;
|
||||
font-size: 0.9em;
|
||||
margin-bottom: -6px;
|
||||
background-color: #BBF3BB;
|
||||
}
|
||||
|
||||
h4 {
|
||||
color: #313131;
|
||||
font-size: 0.8em;
|
||||
margin-bottom: -8px;
|
||||
background-color: #D1FDD1;
|
||||
}
|
||||
|
||||
img {
|
||||
border: 0;
|
||||
}
|
||||
|
||||
#navbar {
|
||||
background-color: #738073;
|
||||
border-bottom: 1px solid #5C665C;
|
||||
border-top: 1px solid #5C665C;
|
||||
margin-top: 12px;
|
||||
padding: 0.3em;
|
||||
position: relative;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#navbar a, #navbar_secondary a {
|
||||
color: white;
|
||||
padding: 0.3em;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
#navbar a:hover, #navbar_secondary a:hover {
|
||||
background-color: #313131;
|
||||
color: white;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
#navbar_secondary {
|
||||
background-color: #738073;
|
||||
border-bottom: 1px solid #5C665C;
|
||||
border-left: 1px solid #5C665C;
|
||||
border-right: 1px solid #5C665C;
|
||||
padding: 0.3em;
|
||||
position: relative;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
p {
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
}
|
||||
|
||||
pre {
|
||||
margin-left: 3em;
|
||||
margin-right: 3em;
|
||||
padding: 0.3em;
|
||||
border: 1px solid #bbb;
|
||||
background-color: #f7f7f7;
|
||||
}
|
||||
|
||||
dl dt {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
#proj_desc {
|
||||
font-size: 1.2em;
|
||||
}
|
||||
|
||||
#repos {
|
||||
margin-left: 1em;
|
||||
margin-right: 1em;
|
||||
border-collapse: collapse;
|
||||
border: solid 1px #6A996A;
|
||||
}
|
||||
|
||||
#repos th {
|
||||
background-color: #7BB37B;
|
||||
border: solid 1px #6A996A;
|
||||
}
|
||||
|
||||
#repos td {
|
||||
padding: 0.2em;
|
||||
border: solid 1px #6A996A;
|
||||
}
|
@ -1,375 +0,0 @@
|
||||
@chapter Demuxers
|
||||
@c man begin DEMUXERS
|
||||
|
||||
Demuxers are configured elements in FFmpeg that can read the
|
||||
multimedia streams from a particular type of file.
|
||||
|
||||
When you configure your FFmpeg build, all the supported demuxers
|
||||
are enabled by default. You can list all available ones using the
|
||||
configure option @code{--list-demuxers}.
|
||||
|
||||
You can disable all the demuxers using the configure option
|
||||
@code{--disable-demuxers}, and selectively enable a single demuxer with
|
||||
the option @code{--enable-demuxer=@var{DEMUXER}}, or disable it
|
||||
with the option @code{--disable-demuxer=@var{DEMUXER}}.
|
||||
|
||||
The option @code{-formats} of the ff* tools will display the list of
|
||||
enabled demuxers.
|
||||
|
||||
The description of some of the currently available demuxers follows.
|
||||
|
||||
@section applehttp
|
||||
|
||||
Apple HTTP Live Streaming demuxer.
|
||||
|
||||
This demuxer presents all AVStreams from all variant streams.
|
||||
The id field is set to the bitrate variant index number. By setting
|
||||
the discard flags on AVStreams (by pressing 'a' or 'v' in ffplay),
|
||||
the caller can decide which variant streams to actually receive.
|
||||
The total bitrate of the variant that the stream belongs to is
|
||||
available in a metadata key named "variant_bitrate".
|
||||
|
||||
@section asf
|
||||
|
||||
Advanced Systems Format demuxer.
|
||||
|
||||
This demuxer is used to demux ASF files and MMS network streams.
|
||||
|
||||
@table @option
|
||||
@item -no_resync_search @var{bool}
|
||||
Do not try to resynchronize by looking for a certain optional start code.
|
||||
@end table
|
||||
|
||||
@anchor{concat}
|
||||
@section concat
|
||||
|
||||
Virtual concatenation script demuxer.
|
||||
|
||||
This demuxer reads a list of files and other directives from a text file and
|
||||
demuxes them one after the other, as if all their packet had been muxed
|
||||
together.
|
||||
|
||||
The timestamps in the files are adjusted so that the first file starts at 0
|
||||
and each next file starts where the previous one finishes. Note that it is
|
||||
done globally and may cause gaps if all streams do not have exactly the same
|
||||
length.
|
||||
|
||||
All files must have the same streams (same codecs, same time base, etc.).
|
||||
|
||||
The duration of each file is used to adjust the timestamps of the next file:
|
||||
if the duration is incorrect (because it was computed using the bit-rate or
|
||||
because the file is truncated, for example), it can cause artifacts. The
|
||||
@code{duration} directive can be used to override the duration stored in
|
||||
each file.
|
||||
|
||||
@subsection Syntax
|
||||
|
||||
The script is a text file in extended-ASCII, with one directive per line.
|
||||
Empty lines, leading spaces and lines starting with '#' are ignored. The
|
||||
following directive is recognized:
|
||||
|
||||
@table @option
|
||||
|
||||
@item @code{file @var{path}}
|
||||
Path to a file to read; special characters and spaces must be escaped with
|
||||
backslash or single quotes.
|
||||
|
||||
All subsequent directives apply to that file.
|
||||
|
||||
@item @code{ffconcat version 1.0}
|
||||
Identify the script type and version. It also sets the @option{safe} option
|
||||
to 1 if it was to its default -1.
|
||||
|
||||
To make FFmpeg recognize the format automatically, this directive must
|
||||
appears exactly as is (no extra space or byte-order-mark) on the very first
|
||||
line of the script.
|
||||
|
||||
@item @code{duration @var{dur}}
|
||||
Duration of the file. This information can be specified from the file;
|
||||
specifying it here may be more efficient or help if the information from the
|
||||
file is not available or accurate.
|
||||
|
||||
If the duration is set for all files, then it is possible to seek in the
|
||||
whole concatenated video.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Options
|
||||
|
||||
This demuxer accepts the following option:
|
||||
|
||||
@table @option
|
||||
|
||||
@item safe
|
||||
If set to 1, reject unsafe file paths. A file path is considered safe if it
|
||||
does not contain a protocol specification and is relative and all components
|
||||
only contain characters from the portable character set (letters, digits,
|
||||
period, underscore and hyphen) and have no period at the beginning of a
|
||||
component.
|
||||
|
||||
If set to 0, any file name is accepted.
|
||||
|
||||
The default is -1, it is equivalent to 1 if the format was automatically
|
||||
probed and 0 otherwise.
|
||||
|
||||
@end table
|
||||
|
||||
@section flv
|
||||
|
||||
Adobe Flash Video Format demuxer.
|
||||
|
||||
This demuxer is used to demux FLV files and RTMP network streams.
|
||||
|
||||
@table @option
|
||||
@item -flv_metadata @var{bool}
|
||||
Allocate the streams according to the onMetaData array content.
|
||||
@end table
|
||||
|
||||
@section libgme
|
||||
|
||||
The Game Music Emu library is a collection of video game music file emulators.
|
||||
|
||||
See @url{http://code.google.com/p/game-music-emu/} for more information.
|
||||
|
||||
Some files have multiple tracks. The demuxer will pick the first track by
|
||||
default. The @option{track_index} option can be used to select a different
|
||||
track. Track indexes start at 0. The demuxer exports the number of tracks as
|
||||
@var{tracks} meta data entry.
|
||||
|
||||
For very large files, the @option{max_size} option may have to be adjusted.
|
||||
|
||||
@section libquvi
|
||||
|
||||
Play media from Internet services using the quvi project.
|
||||
|
||||
The demuxer accepts a @option{format} option to request a specific quality. It
|
||||
is by default set to @var{best}.
|
||||
|
||||
See @url{http://quvi.sourceforge.net/} for more information.
|
||||
|
||||
FFmpeg needs to be built with @code{--enable-libquvi} for this demuxer to be
|
||||
enabled.
|
||||
|
||||
@section image2
|
||||
|
||||
Image file demuxer.
|
||||
|
||||
This demuxer reads from a list of image files specified by a pattern.
|
||||
The syntax and meaning of the pattern is specified by the
|
||||
option @var{pattern_type}.
|
||||
|
||||
The pattern may contain a suffix which is used to automatically
|
||||
determine the format of the images contained in the files.
|
||||
|
||||
The size, the pixel format, and the format of each image must be the
|
||||
same for all the files in the sequence.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
@item framerate
|
||||
Set the frame rate for the video stream. It defaults to 25.
|
||||
@item loop
|
||||
If set to 1, loop over the input. Default value is 0.
|
||||
@item pattern_type
|
||||
Select the pattern type used to interpret the provided filename.
|
||||
|
||||
@var{pattern_type} accepts one of the following values.
|
||||
@table @option
|
||||
@item sequence
|
||||
Select a sequence pattern type, used to specify a sequence of files
|
||||
indexed by sequential numbers.
|
||||
|
||||
A sequence pattern may contain the string "%d" or "%0@var{N}d", which
|
||||
specifies the position of the characters representing a sequential
|
||||
number in each filename matched by the pattern. If the form
|
||||
"%d0@var{N}d" is used, the string representing the number in each
|
||||
filename is 0-padded and @var{N} is the total number of 0-padded
|
||||
digits representing the number. The literal character '%' can be
|
||||
specified in the pattern with the string "%%".
|
||||
|
||||
If the sequence pattern contains "%d" or "%0@var{N}d", the first filename of
|
||||
the file list specified by the pattern must contain a number
|
||||
inclusively contained between @var{start_number} and
|
||||
@var{start_number}+@var{start_number_range}-1, and all the following
|
||||
numbers must be sequential.
|
||||
|
||||
For example the pattern "img-%03d.bmp" will match a sequence of
|
||||
filenames of the form @file{img-001.bmp}, @file{img-002.bmp}, ...,
|
||||
@file{img-010.bmp}, etc.; the pattern "i%%m%%g-%d.jpg" will match a
|
||||
sequence of filenames of the form @file{i%m%g-1.jpg},
|
||||
@file{i%m%g-2.jpg}, ..., @file{i%m%g-10.jpg}, etc.
|
||||
|
||||
Note that the pattern must not necessarily contain "%d" or
|
||||
"%0@var{N}d", for example to convert a single image file
|
||||
@file{img.jpeg} you can employ the command:
|
||||
@example
|
||||
ffmpeg -i img.jpeg img.png
|
||||
@end example
|
||||
|
||||
@item glob
|
||||
Select a glob wildcard pattern type.
|
||||
|
||||
The pattern is interpreted like a @code{glob()} pattern. This is only
|
||||
selectable if libavformat was compiled with globbing support.
|
||||
|
||||
@item glob_sequence @emph{(deprecated, will be removed)}
|
||||
Select a mixed glob wildcard/sequence pattern.
|
||||
|
||||
If your version of libavformat was compiled with globbing support, and
|
||||
the provided pattern contains at least one glob meta character among
|
||||
@code{%*?[]@{@}} that is preceded by an unescaped "%", the pattern is
|
||||
interpreted like a @code{glob()} pattern, otherwise it is interpreted
|
||||
like a sequence pattern.
|
||||
|
||||
All glob special characters @code{%*?[]@{@}} must be prefixed
|
||||
with "%". To escape a literal "%" you shall use "%%".
|
||||
|
||||
For example the pattern @code{foo-%*.jpeg} will match all the
|
||||
filenames prefixed by "foo-" and terminating with ".jpeg", and
|
||||
@code{foo-%?%?%?.jpeg} will match all the filenames prefixed with
|
||||
"foo-", followed by a sequence of three characters, and terminating
|
||||
with ".jpeg".
|
||||
|
||||
This pattern type is deprecated in favor of @var{glob} and
|
||||
@var{sequence}.
|
||||
@end table
|
||||
|
||||
Default value is @var{glob_sequence}.
|
||||
@item pixel_format
|
||||
Set the pixel format of the images to read. If not specified the pixel
|
||||
format is guessed from the first image file in the sequence.
|
||||
@item start_number
|
||||
Set the index of the file matched by the image file pattern to start
|
||||
to read from. Default value is 0.
|
||||
@item start_number_range
|
||||
Set the index interval range to check when looking for the first image
|
||||
file in the sequence, starting from @var{start_number}. Default value
|
||||
is 5.
|
||||
@item ts_from_file
|
||||
If set to 1, will set frame timestamp to modification time of image file. Note
|
||||
that monotonity of timestamps is not provided: images go in the same order as
|
||||
without this option. Default value is 0.
|
||||
@item video_size
|
||||
Set the video size of the images to read. If not specified the video
|
||||
size is guessed from the first image file in the sequence.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Use @command{ffmpeg} for creating a video from the images in the file
|
||||
sequence @file{img-001.jpeg}, @file{img-002.jpeg}, ..., assuming an
|
||||
input frame rate of 10 frames per second:
|
||||
@example
|
||||
ffmpeg -framerate 10 -i 'img-%03d.jpeg' out.mkv
|
||||
@end example
|
||||
|
||||
@item
|
||||
As above, but start by reading from a file with index 100 in the sequence:
|
||||
@example
|
||||
ffmpeg -framerate 10 -start_number 100 -i 'img-%03d.jpeg' out.mkv
|
||||
@end example
|
||||
|
||||
@item
|
||||
Read images matching the "*.png" glob pattern , that is all the files
|
||||
terminating with the ".png" suffix:
|
||||
@example
|
||||
ffmpeg -framerate 10 -pattern_type glob -i "*.png" out.mkv
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@section mpegts
|
||||
|
||||
MPEG-2 transport stream demuxer.
|
||||
|
||||
@table @option
|
||||
|
||||
@item fix_teletext_pts
|
||||
Overrides teletext packet PTS and DTS values with the timestamps calculated
|
||||
from the PCR of the first program which the teletext stream is part of and is
|
||||
not discarded. Default value is 1, set this option to 0 if you want your
|
||||
teletext packet PTS and DTS values untouched.
|
||||
@end table
|
||||
|
||||
@section rawvideo
|
||||
|
||||
Raw video demuxer.
|
||||
|
||||
This demuxer allows to read raw video data. Since there is no header
|
||||
specifying the assumed video parameters, the user must specify them
|
||||
in order to be able to decode the data correctly.
|
||||
|
||||
This demuxer accepts the following options:
|
||||
@table @option
|
||||
|
||||
@item framerate
|
||||
Set input video frame rate. Default value is 25.
|
||||
|
||||
@item pixel_format
|
||||
Set the input video pixel format. Default value is @code{yuv420p}.
|
||||
|
||||
@item video_size
|
||||
Set the input video size. This value must be specified explicitly.
|
||||
@end table
|
||||
|
||||
For example to read a rawvideo file @file{input.raw} with
|
||||
@command{ffplay}, assuming a pixel format of @code{rgb24}, a video
|
||||
size of @code{320x240}, and a frame rate of 10 images per second, use
|
||||
the command:
|
||||
@example
|
||||
ffplay -f rawvideo -pixel_format rgb24 -video_size 320x240 -framerate 10 input.raw
|
||||
@end example
|
||||
|
||||
@section sbg
|
||||
|
||||
SBaGen script demuxer.
|
||||
|
||||
This demuxer reads the script language used by SBaGen
|
||||
@url{http://uazu.net/sbagen/} to generate binaural beats sessions. A SBG
|
||||
script looks like that:
|
||||
@example
|
||||
-SE
|
||||
a: 300-2.5/3 440+4.5/0
|
||||
b: 300-2.5/0 440+4.5/3
|
||||
off: -
|
||||
NOW == a
|
||||
+0:07:00 == b
|
||||
+0:14:00 == a
|
||||
+0:21:00 == b
|
||||
+0:30:00 off
|
||||
@end example
|
||||
|
||||
A SBG script can mix absolute and relative timestamps. If the script uses
|
||||
either only absolute timestamps (including the script start time) or only
|
||||
relative ones, then its layout is fixed, and the conversion is
|
||||
straightforward. On the other hand, if the script mixes both kind of
|
||||
timestamps, then the @var{NOW} reference for relative timestamps will be
|
||||
taken from the current time of day at the time the script is read, and the
|
||||
script layout will be frozen according to that reference. That means that if
|
||||
the script is directly played, the actual times will match the absolute
|
||||
timestamps up to the sound controller's clock accuracy, but if the user
|
||||
somehow pauses the playback or seeks, all times will be shifted accordingly.
|
||||
|
||||
@section tedcaptions
|
||||
|
||||
JSON captions used for @url{http://www.ted.com/, TED Talks}.
|
||||
|
||||
TED does not provide links to the captions, but they can be guessed from the
|
||||
page. The file @file{tools/bookmarklets.html} from the FFmpeg source tree
|
||||
contains a bookmarklet to expose them.
|
||||
|
||||
This demuxer accepts the following option:
|
||||
@table @option
|
||||
@item start_time
|
||||
Set the start time of the TED talk, in milliseconds. The default is 15000
|
||||
(15s). It is used to sync the captions with the downloadable videos, because
|
||||
they include a 15s intro.
|
||||
@end table
|
||||
|
||||
Example: convert the captions to a format most players understand:
|
||||
@example
|
||||
ffmpeg -i http://www.ted.com/talks/subtitles/id/1/lang/en talk1-en.srt
|
||||
@end example
|
||||
|
||||
@c man end DEMUXERS
|
@ -1,797 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Developer Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Developer Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Developers Guide
|
||||
|
||||
@section Notes for external developers
|
||||
|
||||
This document is mostly useful for internal FFmpeg developers.
|
||||
External developers who need to use the API in their application should
|
||||
refer to the API doxygen documentation in the public headers, and
|
||||
check the examples in @file{doc/examples} and in the source code to
|
||||
see how the public API is employed.
|
||||
|
||||
You can use the FFmpeg libraries in your commercial program, but you
|
||||
are encouraged to @emph{publish any patch you make}. In this case the
|
||||
best way to proceed is to send your patches to the ffmpeg-devel
|
||||
mailing list following the guidelines illustrated in the remainder of
|
||||
this document.
|
||||
|
||||
For more detailed legal information about the use of FFmpeg in
|
||||
external programs read the @file{LICENSE} file in the source tree and
|
||||
consult @url{http://ffmpeg.org/legal.html}.
|
||||
|
||||
@section Contributing
|
||||
|
||||
There are 3 ways by which code gets into ffmpeg.
|
||||
@itemize @bullet
|
||||
@item Submitting Patches to the main developer mailing list
|
||||
see @ref{Submitting patches} for details.
|
||||
@item Directly committing changes to the main tree.
|
||||
@item Committing changes to a git clone, for example on github.com or
|
||||
gitorious.org. And asking us to merge these changes.
|
||||
@end itemize
|
||||
|
||||
Whichever way, changes should be reviewed by the maintainer of the code
|
||||
before they are committed. And they should follow the @ref{Coding Rules}.
|
||||
The developer making the commit and the author are responsible for their changes
|
||||
and should try to fix issues their commit causes.
|
||||
|
||||
@anchor{Coding Rules}
|
||||
@section Coding Rules
|
||||
|
||||
@subsection Code formatting conventions
|
||||
|
||||
There are the following guidelines regarding the indentation in files:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
Indent size is 4.
|
||||
|
||||
@item
|
||||
The TAB character is forbidden outside of Makefiles as is any
|
||||
form of trailing whitespace. Commits containing either will be
|
||||
rejected by the git repository.
|
||||
|
||||
@item
|
||||
You should try to limit your code lines to 80 characters; however, do so if
|
||||
and only if this improves readability.
|
||||
@end itemize
|
||||
The presentation is one inspired by 'indent -i4 -kr -nut'.
|
||||
|
||||
The main priority in FFmpeg is simplicity and small code size in order to
|
||||
minimize the bug count.
|
||||
|
||||
@subsection Comments
|
||||
Use the JavaDoc/Doxygen format (see examples below) so that code documentation
|
||||
can be generated automatically. All nontrivial functions should have a comment
|
||||
above them explaining what the function does, even if it is just one sentence.
|
||||
All structures and their member variables should be documented, too.
|
||||
|
||||
Avoid Qt-style and similar Doxygen syntax with @code{!} in it, i.e. replace
|
||||
@code{//!} with @code{///} and similar. Also @@ syntax should be employed
|
||||
for markup commands, i.e. use @code{@@param} and not @code{\param}.
|
||||
|
||||
@example
|
||||
/**
|
||||
* @@file
|
||||
* MPEG codec.
|
||||
* @@author ...
|
||||
*/
|
||||
|
||||
/**
|
||||
* Summary sentence.
|
||||
* more text ...
|
||||
* ...
|
||||
*/
|
||||
typedef struct Foobar@{
|
||||
int var1; /**< var1 description */
|
||||
int var2; ///< var2 description
|
||||
/** var3 description */
|
||||
int var3;
|
||||
@} Foobar;
|
||||
|
||||
/**
|
||||
* Summary sentence.
|
||||
* more text ...
|
||||
* ...
|
||||
* @@param my_parameter description of my_parameter
|
||||
* @@return return value description
|
||||
*/
|
||||
int myfunc(int my_parameter)
|
||||
...
|
||||
@end example
|
||||
|
||||
@subsection C language features
|
||||
|
||||
FFmpeg is programmed in the ISO C90 language with a few additional
|
||||
features from ISO C99, namely:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
the @samp{inline} keyword;
|
||||
|
||||
@item
|
||||
@samp{//} comments;
|
||||
|
||||
@item
|
||||
designated struct initializers (@samp{struct s x = @{ .i = 17 @};})
|
||||
|
||||
@item
|
||||
compound literals (@samp{x = (struct s) @{ 17, 23 @};})
|
||||
@end itemize
|
||||
|
||||
These features are supported by all compilers we care about, so we will not
|
||||
accept patches to remove their use unless they absolutely do not impair
|
||||
clarity and performance.
|
||||
|
||||
All code must compile with recent versions of GCC and a number of other
|
||||
currently supported compilers. To ensure compatibility, please do not use
|
||||
additional C99 features or GCC extensions. Especially watch out for:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
mixing statements and declarations;
|
||||
|
||||
@item
|
||||
@samp{long long} (use @samp{int64_t} instead);
|
||||
|
||||
@item
|
||||
@samp{__attribute__} not protected by @samp{#ifdef __GNUC__} or similar;
|
||||
|
||||
@item
|
||||
GCC statement expressions (@samp{(x = (@{ int y = 4; y; @})}).
|
||||
@end itemize
|
||||
|
||||
@subsection Naming conventions
|
||||
All names should be composed with underscores (_), not CamelCase. For example,
|
||||
@samp{avfilter_get_video_buffer} is an acceptable function name and
|
||||
@samp{AVFilterGetVideo} is not. The exception from this are type names, like
|
||||
for example structs and enums; they should always be in the CamelCase
|
||||
|
||||
There are the following conventions for naming variables and functions:
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
For local variables no prefix is required.
|
||||
|
||||
@item
|
||||
For file-scope variables and functions declared as @code{static}, no prefix
|
||||
is required.
|
||||
|
||||
@item
|
||||
For variables and functions visible outside of file scope, but only used
|
||||
internally by a library, an @code{ff_} prefix should be used,
|
||||
e.g. @samp{ff_w64_demuxer}.
|
||||
|
||||
@item
|
||||
For variables and functions visible outside of file scope, used internally
|
||||
across multiple libraries, use @code{avpriv_} as prefix, for example,
|
||||
@samp{avpriv_aac_parse_header}.
|
||||
|
||||
@item
|
||||
Each library has its own prefix for public symbols, in addition to the
|
||||
commonly used @code{av_} (@code{avformat_} for libavformat,
|
||||
@code{avcodec_} for libavcodec, @code{swr_} for libswresample, etc).
|
||||
Check the existing code and choose names accordingly.
|
||||
Note that some symbols without these prefixes are also exported for
|
||||
retro-compatibility reasons. These exceptions are declared in the
|
||||
@code{lib<name>/lib<name>.v} files.
|
||||
@end itemize
|
||||
|
||||
Furthermore, name space reserved for the system should not be invaded.
|
||||
Identifiers ending in @code{_t} are reserved by
|
||||
@url{http://pubs.opengroup.org/onlinepubs/007904975/functions/xsh_chap02_02.html#tag_02_02_02, POSIX}.
|
||||
Also avoid names starting with @code{__} or @code{_} followed by an uppercase
|
||||
letter as they are reserved by the C standard. Names starting with @code{_}
|
||||
are reserved at the file level and may not be used for externally visible
|
||||
symbols. If in doubt, just avoid names starting with @code{_} altogether.
|
||||
|
||||
@subsection Miscellaneous conventions
|
||||
|
||||
@itemize @bullet
|
||||
@item
|
||||
fprintf and printf are forbidden in libavformat and libavcodec,
|
||||
please use av_log() instead.
|
||||
|
||||
@item
|
||||
Casts should be used only when necessary. Unneeded parentheses
|
||||
should also be avoided if they don't make the code easier to understand.
|
||||
@end itemize
|
||||
|
||||
@subsection Editor configuration
|
||||
In order to configure Vim to follow FFmpeg formatting conventions, paste
|
||||
the following snippet into your @file{.vimrc}:
|
||||
@example
|
||||
" indentation rules for FFmpeg: 4 spaces, no tabs
|
||||
set expandtab
|
||||
set shiftwidth=4
|
||||
set softtabstop=4
|
||||
set cindent
|
||||
set cinoptions=(0
|
||||
" Allow tabs in Makefiles.
|
||||
autocmd FileType make,automake set noexpandtab shiftwidth=8 softtabstop=8
|
||||
" Trailing whitespace and tabs are forbidden, so highlight them.
|
||||
highlight ForbiddenWhitespace ctermbg=red guibg=red
|
||||
match ForbiddenWhitespace /\s\+$\|\t/
|
||||
" Do not highlight spaces at the end of line while typing on that line.
|
||||
autocmd InsertEnter * match ForbiddenWhitespace /\t\|\s\+\%#\@@<!$/
|
||||
@end example
|
||||
|
||||
For Emacs, add these roughly equivalent lines to your @file{.emacs.d/init.el}:
|
||||
@example
|
||||
(c-add-style "ffmpeg"
|
||||
'("k&r"
|
||||
(c-basic-offset . 4)
|
||||
(indent-tabs-mode . nil)
|
||||
(show-trailing-whitespace . t)
|
||||
(c-offsets-alist
|
||||
(statement-cont . (c-lineup-assignments +)))
|
||||
)
|
||||
)
|
||||
(setq c-default-style "ffmpeg")
|
||||
@end example
|
||||
|
||||
@section Development Policy
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Contributions should be licensed under the
|
||||
@uref{http://www.gnu.org/licenses/lgpl-2.1.html, LGPL 2.1},
|
||||
including an "or any later version" clause, or, if you prefer
|
||||
a gift-style license, the
|
||||
@uref{http://www.isc.org/software/license/, ISC} or
|
||||
@uref{http://mit-license.org/, MIT} license.
|
||||
@uref{http://www.gnu.org/licenses/gpl-2.0.html, GPL 2} including
|
||||
an "or any later version" clause is also acceptable, but LGPL is
|
||||
preferred.
|
||||
If you add a new file, give it a proper license header. Do not copy and
|
||||
paste it from a random place, use an existing file as template.
|
||||
|
||||
@item
|
||||
You must not commit code which breaks FFmpeg! (Meaning unfinished but
|
||||
enabled code which breaks compilation or compiles but does not work or
|
||||
breaks the regression tests)
|
||||
You can commit unfinished stuff (for testing etc), but it must be disabled
|
||||
(#ifdef etc) by default so it does not interfere with other developers'
|
||||
work.
|
||||
|
||||
@item
|
||||
The commit message should have a short first line in the form of
|
||||
a @samp{topic: short description} as a header, separated by a newline
|
||||
from the body consisting of an explanation of why the change is necessary.
|
||||
If the commit fixes a known bug on the bug tracker, the commit message
|
||||
should include its bug ID. Referring to the issue on the bug tracker does
|
||||
not exempt you from writing an excerpt of the bug in the commit message.
|
||||
|
||||
@item
|
||||
You do not have to over-test things. If it works for you, and you think it
|
||||
should work for others, then commit. If your code has problems
|
||||
(portability, triggers compiler bugs, unusual environment etc) they will be
|
||||
reported and eventually fixed.
|
||||
|
||||
@item
|
||||
Do not commit unrelated changes together, split them into self-contained
|
||||
pieces. Also do not forget that if part B depends on part A, but A does not
|
||||
depend on B, then A can and should be committed first and separate from B.
|
||||
Keeping changes well split into self-contained parts makes reviewing and
|
||||
understanding them on the commit log mailing list easier. This also helps
|
||||
in case of debugging later on.
|
||||
Also if you have doubts about splitting or not splitting, do not hesitate to
|
||||
ask/discuss it on the developer mailing list.
|
||||
|
||||
@item
|
||||
Do not change behavior of the programs (renaming options etc) or public
|
||||
API or ABI without first discussing it on the ffmpeg-devel mailing list.
|
||||
Do not remove functionality from the code. Just improve!
|
||||
|
||||
Note: Redundant code can be removed.
|
||||
|
||||
@item
|
||||
Do not commit changes to the build system (Makefiles, configure script)
|
||||
which change behavior, defaults etc, without asking first. The same
|
||||
applies to compiler warning fixes, trivial looking fixes and to code
|
||||
maintained by other developers. We usually have a reason for doing things
|
||||
the way we do. Send your changes as patches to the ffmpeg-devel mailing
|
||||
list, and if the code maintainers say OK, you may commit. This does not
|
||||
apply to files you wrote and/or maintain.
|
||||
|
||||
@item
|
||||
We refuse source indentation and other cosmetic changes if they are mixed
|
||||
with functional changes, such commits will be rejected and removed. Every
|
||||
developer has his own indentation style, you should not change it. Of course
|
||||
if you (re)write something, you can use your own style, even though we would
|
||||
prefer if the indentation throughout FFmpeg was consistent (Many projects
|
||||
force a given indentation style - we do not.). If you really need to make
|
||||
indentation changes (try to avoid this), separate them strictly from real
|
||||
changes.
|
||||
|
||||
NOTE: If you had to put if()@{ .. @} over a large (> 5 lines) chunk of code,
|
||||
then either do NOT change the indentation of the inner part within (do not
|
||||
move it to the right)! or do so in a separate commit
|
||||
|
||||
@item
|
||||
Always fill out the commit log message. Describe in a few lines what you
|
||||
changed and why. You can refer to mailing list postings if you fix a
|
||||
particular bug. Comments such as "fixed!" or "Changed it." are unacceptable.
|
||||
Recommended format:
|
||||
area changed: Short 1 line description
|
||||
|
||||
details describing what and why and giving references.
|
||||
|
||||
@item
|
||||
Make sure the author of the commit is set correctly. (see git commit --author)
|
||||
If you apply a patch, send an
|
||||
answer to ffmpeg-devel (or wherever you got the patch from) saying that
|
||||
you applied the patch.
|
||||
|
||||
@item
|
||||
When applying patches that have been discussed (at length) on the mailing
|
||||
list, reference the thread in the log message.
|
||||
|
||||
@item
|
||||
Do NOT commit to code actively maintained by others without permission.
|
||||
Send a patch to ffmpeg-devel instead. If no one answers within a reasonable
|
||||
timeframe (12h for build failures and security fixes, 3 days small changes,
|
||||
1 week for big patches) then commit your patch if you think it is OK.
|
||||
Also note, the maintainer can simply ask for more time to review!
|
||||
|
||||
@item
|
||||
Subscribe to the ffmpeg-cvslog mailing list. The diffs of all commits
|
||||
are sent there and reviewed by all the other developers. Bugs and possible
|
||||
improvements or general questions regarding commits are discussed there. We
|
||||
expect you to react if problems with your code are uncovered.
|
||||
|
||||
@item
|
||||
Update the documentation if you change behavior or add features. If you are
|
||||
unsure how best to do this, send a patch to ffmpeg-devel, the documentation
|
||||
maintainer(s) will review and commit your stuff.
|
||||
|
||||
@item
|
||||
Try to keep important discussions and requests (also) on the public
|
||||
developer mailing list, so that all developers can benefit from them.
|
||||
|
||||
@item
|
||||
Never write to unallocated memory, never write over the end of arrays,
|
||||
always check values read from some untrusted source before using them
|
||||
as array index or other risky things.
|
||||
|
||||
@item
|
||||
Remember to check if you need to bump versions for the specific libav*
|
||||
parts (libavutil, libavcodec, libavformat) you are changing. You need
|
||||
to change the version integer.
|
||||
Incrementing the first component means no backward compatibility to
|
||||
previous versions (e.g. removal of a function from the public API).
|
||||
Incrementing the second component means backward compatible change
|
||||
(e.g. addition of a function to the public API or extension of an
|
||||
existing data structure).
|
||||
Incrementing the third component means a noteworthy binary compatible
|
||||
change (e.g. encoder bug fix that matters for the decoder). The third
|
||||
component always starts at 100 to distinguish FFmpeg from Libav.
|
||||
|
||||
@item
|
||||
Compiler warnings indicate potential bugs or code with bad style. If a type of
|
||||
warning always points to correct and clean code, that warning should
|
||||
be disabled, not the code changed.
|
||||
Thus the remaining warnings can either be bugs or correct code.
|
||||
If it is a bug, the bug has to be fixed. If it is not, the code should
|
||||
be changed to not generate a warning unless that causes a slowdown
|
||||
or obfuscates the code.
|
||||
|
||||
@item
|
||||
Make sure that no parts of the codebase that you maintain are missing from the
|
||||
@file{MAINTAINERS} file. If something that you want to maintain is missing add it with
|
||||
your name after it.
|
||||
If at some point you no longer want to maintain some code, then please help
|
||||
finding a new maintainer and also don't forget updating the @file{MAINTAINERS} file.
|
||||
@end enumerate
|
||||
|
||||
We think our rules are not too hard. If you have comments, contact us.
|
||||
|
||||
@anchor{Submitting patches}
|
||||
@section Submitting patches
|
||||
|
||||
First, read the @ref{Coding Rules} above if you did not yet, in particular
|
||||
the rules regarding patch submission.
|
||||
|
||||
When you submit your patch, please use @code{git format-patch} or
|
||||
@code{git send-email}. We cannot read other diffs :-)
|
||||
|
||||
Also please do not submit a patch which contains several unrelated changes.
|
||||
Split it into separate, self-contained pieces. This does not mean splitting
|
||||
file by file. Instead, make the patch as small as possible while still
|
||||
keeping it as a logical unit that contains an individual change, even
|
||||
if it spans multiple files. This makes reviewing your patches much easier
|
||||
for us and greatly increases your chances of getting your patch applied.
|
||||
|
||||
Use the patcheck tool of FFmpeg to check your patch.
|
||||
The tool is located in the tools directory.
|
||||
|
||||
Run the @ref{Regression tests} before submitting a patch in order to verify
|
||||
it does not cause unexpected problems.
|
||||
|
||||
It also helps quite a bit if you tell us what the patch does (for example
|
||||
'replaces lrint by lrintf'), and why (for example '*BSD isn't C99 compliant
|
||||
and has no lrint()')
|
||||
|
||||
Also please if you send several patches, send each patch as a separate mail,
|
||||
do not attach several unrelated patches to the same mail.
|
||||
|
||||
Patches should be posted to the
|
||||
@uref{http://lists.ffmpeg.org/mailman/listinfo/ffmpeg-devel, ffmpeg-devel}
|
||||
mailing list. Use @code{git send-email} when possible since it will properly
|
||||
send patches without requiring extra care. If you cannot, then send patches
|
||||
as base64-encoded attachments, so your patch is not trashed during
|
||||
transmission.
|
||||
|
||||
Your patch will be reviewed on the mailing list. You will likely be asked
|
||||
to make some changes and are expected to send in an improved version that
|
||||
incorporates the requests from the review. This process may go through
|
||||
several iterations. Once your patch is deemed good enough, some developer
|
||||
will pick it up and commit it to the official FFmpeg tree.
|
||||
|
||||
Give us a few days to react. But if some time passes without reaction,
|
||||
send a reminder by email. Your patch should eventually be dealt with.
|
||||
|
||||
|
||||
@section New codecs or formats checklist
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Did you use av_cold for codec initialization and close functions?
|
||||
|
||||
@item
|
||||
Did you add a long_name under NULL_IF_CONFIG_SMALL to the AVCodec or
|
||||
AVInputFormat/AVOutputFormat struct?
|
||||
|
||||
@item
|
||||
Did you bump the minor version number (and reset the micro version
|
||||
number) in @file{libavcodec/version.h} or @file{libavformat/version.h}?
|
||||
|
||||
@item
|
||||
Did you register it in @file{allcodecs.c} or @file{allformats.c}?
|
||||
|
||||
@item
|
||||
Did you add the AVCodecID to @file{avcodec.h}?
|
||||
When adding new codec IDs, also add an entry to the codec descriptor
|
||||
list in @file{libavcodec/codec_desc.c}.
|
||||
|
||||
@item
|
||||
If it has a FourCC, did you add it to @file{libavformat/riff.c},
|
||||
even if it is only a decoder?
|
||||
|
||||
@item
|
||||
Did you add a rule to compile the appropriate files in the Makefile?
|
||||
Remember to do this even if you're just adding a format to a file that is
|
||||
already being compiled by some other rule, like a raw demuxer.
|
||||
|
||||
@item
|
||||
Did you add an entry to the table of supported formats or codecs in
|
||||
@file{doc/general.texi}?
|
||||
|
||||
@item
|
||||
Did you add an entry in the Changelog?
|
||||
|
||||
@item
|
||||
If it depends on a parser or a library, did you add that dependency in
|
||||
configure?
|
||||
|
||||
@item
|
||||
Did you @code{git add} the appropriate files before committing?
|
||||
|
||||
@item
|
||||
Did you make sure it compiles standalone, i.e. with
|
||||
@code{configure --disable-everything --enable-decoder=foo}
|
||||
(or @code{--enable-demuxer} or whatever your component is)?
|
||||
@end enumerate
|
||||
|
||||
|
||||
@section patch submission checklist
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Does @code{make fate} pass with the patch applied?
|
||||
|
||||
@item
|
||||
Was the patch generated with git format-patch or send-email?
|
||||
|
||||
@item
|
||||
Did you sign off your patch? (git commit -s)
|
||||
See @url{http://git.kernel.org/?p=linux/kernel/git/torvalds/linux.git;a=blob_plain;f=Documentation/SubmittingPatches} for the meaning
|
||||
of sign off.
|
||||
|
||||
@item
|
||||
Did you provide a clear git commit log message?
|
||||
|
||||
@item
|
||||
Is the patch against latest FFmpeg git master branch?
|
||||
|
||||
@item
|
||||
Are you subscribed to ffmpeg-devel?
|
||||
(the list is subscribers only due to spam)
|
||||
|
||||
@item
|
||||
Have you checked that the changes are minimal, so that the same cannot be
|
||||
achieved with a smaller patch and/or simpler final code?
|
||||
|
||||
@item
|
||||
If the change is to speed critical code, did you benchmark it?
|
||||
|
||||
@item
|
||||
If you did any benchmarks, did you provide them in the mail?
|
||||
|
||||
@item
|
||||
Have you checked that the patch does not introduce buffer overflows or
|
||||
other security issues?
|
||||
|
||||
@item
|
||||
Did you test your decoder or demuxer against damaged data? If no, see
|
||||
tools/trasher, the noise bitstream filter, and
|
||||
@uref{http://caca.zoy.org/wiki/zzuf, zzuf}. Your decoder or demuxer
|
||||
should not crash, end in a (near) infinite loop, or allocate ridiculous
|
||||
amounts of memory when fed damaged data.
|
||||
|
||||
@item
|
||||
Does the patch not mix functional and cosmetic changes?
|
||||
|
||||
@item
|
||||
Did you add tabs or trailing whitespace to the code? Both are forbidden.
|
||||
|
||||
@item
|
||||
Is the patch attached to the email you send?
|
||||
|
||||
@item
|
||||
Is the mime type of the patch correct? It should be text/x-diff or
|
||||
text/x-patch or at least text/plain and not application/octet-stream.
|
||||
|
||||
@item
|
||||
If the patch fixes a bug, did you provide a verbose analysis of the bug?
|
||||
|
||||
@item
|
||||
If the patch fixes a bug, did you provide enough information, including
|
||||
a sample, so the bug can be reproduced and the fix can be verified?
|
||||
Note please do not attach samples >100k to mails but rather provide a
|
||||
URL, you can upload to ftp://upload.ffmpeg.org
|
||||
|
||||
@item
|
||||
Did you provide a verbose summary about what the patch does change?
|
||||
|
||||
@item
|
||||
Did you provide a verbose explanation why it changes things like it does?
|
||||
|
||||
@item
|
||||
Did you provide a verbose summary of the user visible advantages and
|
||||
disadvantages if the patch is applied?
|
||||
|
||||
@item
|
||||
Did you provide an example so we can verify the new feature added by the
|
||||
patch easily?
|
||||
|
||||
@item
|
||||
If you added a new file, did you insert a license header? It should be
|
||||
taken from FFmpeg, not randomly copied and pasted from somewhere else.
|
||||
|
||||
@item
|
||||
You should maintain alphabetical order in alphabetically ordered lists as
|
||||
long as doing so does not break API/ABI compatibility.
|
||||
|
||||
@item
|
||||
Lines with similar content should be aligned vertically when doing so
|
||||
improves readability.
|
||||
|
||||
@item
|
||||
Consider to add a regression test for your code.
|
||||
|
||||
@item
|
||||
If you added YASM code please check that things still work with --disable-yasm
|
||||
|
||||
@item
|
||||
Make sure you check the return values of function and return appropriate
|
||||
error codes. Especially memory allocation functions like @code{av_malloc()}
|
||||
are notoriously left unchecked, which is a serious problem.
|
||||
|
||||
@item
|
||||
Test your code with valgrind and or Address Sanitizer to ensure it's free
|
||||
of leaks, out of array accesses, etc.
|
||||
@end enumerate
|
||||
|
||||
@section Patch review process
|
||||
|
||||
All patches posted to ffmpeg-devel will be reviewed, unless they contain a
|
||||
clear note that the patch is not for the git master branch.
|
||||
Reviews and comments will be posted as replies to the patch on the
|
||||
mailing list. The patch submitter then has to take care of every comment,
|
||||
that can be by resubmitting a changed patch or by discussion. Resubmitted
|
||||
patches will themselves be reviewed like any other patch. If at some point
|
||||
a patch passes review with no comments then it is approved, that can for
|
||||
simple and small patches happen immediately while large patches will generally
|
||||
have to be changed and reviewed many times before they are approved.
|
||||
After a patch is approved it will be committed to the repository.
|
||||
|
||||
We will review all submitted patches, but sometimes we are quite busy so
|
||||
especially for large patches this can take several weeks.
|
||||
|
||||
If you feel that the review process is too slow and you are willing to try to
|
||||
take over maintainership of the area of code you change then just clone
|
||||
git master and maintain the area of code there. We will merge each area from
|
||||
where its best maintained.
|
||||
|
||||
When resubmitting patches, please do not make any significant changes
|
||||
not related to the comments received during review. Such patches will
|
||||
be rejected. Instead, submit significant changes or new features as
|
||||
separate patches.
|
||||
|
||||
@anchor{Regression tests}
|
||||
@section Regression tests
|
||||
|
||||
Before submitting a patch (or committing to the repository), you should at least
|
||||
test that you did not break anything.
|
||||
|
||||
Running 'make fate' accomplishes this, please see @url{fate.html} for details.
|
||||
|
||||
[Of course, some patches may change the results of the regression tests. In
|
||||
this case, the reference results of the regression tests shall be modified
|
||||
accordingly].
|
||||
|
||||
@subsection Adding files to the fate-suite dataset
|
||||
|
||||
When there is no muxer or encoder available to generate test media for a
|
||||
specific test then the media has to be inlcuded in the fate-suite.
|
||||
First please make sure that the sample file is as small as possible to test the
|
||||
respective decoder or demuxer sufficiently. Large files increase network
|
||||
bandwidth and disk space requirements.
|
||||
Once you have a working fate test and fate sample, provide in the commit
|
||||
message or introductionary message for the patch series that you post to
|
||||
the ffmpeg-devel mailing list, a direct link to download the sample media.
|
||||
|
||||
|
||||
@subsection Visualizing Test Coverage
|
||||
|
||||
The FFmpeg build system allows visualizing the test coverage in an easy
|
||||
manner with the coverage tools @code{gcov}/@code{lcov}. This involves
|
||||
the following steps:
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Configure to compile with instrumentation enabled:
|
||||
@code{configure --toolchain=gcov}.
|
||||
|
||||
@item
|
||||
Run your test case, either manually or via FATE. This can be either
|
||||
the full FATE regression suite, or any arbitrary invocation of any
|
||||
front-end tool provided by FFmpeg, in any combination.
|
||||
|
||||
@item
|
||||
Run @code{make lcov} to generate coverage data in HTML format.
|
||||
|
||||
@item
|
||||
View @code{lcov/index.html} in your preferred HTML viewer.
|
||||
@end enumerate
|
||||
|
||||
You can use the command @code{make lcov-reset} to reset the coverage
|
||||
measurements. You will need to rerun @code{make lcov} after running a
|
||||
new test.
|
||||
|
||||
@subsection Using Valgrind
|
||||
|
||||
The configure script provides a shortcut for using valgrind to spot bugs
|
||||
related to memory handling. Just add the option
|
||||
@code{--toolchain=valgrind-memcheck} or @code{--toolchain=valgrind-massif}
|
||||
to your configure line, and reasonable defaults will be set for running
|
||||
FATE under the supervision of either the @strong{memcheck} or the
|
||||
@strong{massif} tool of the valgrind suite.
|
||||
|
||||
In case you need finer control over how valgrind is invoked, use the
|
||||
@code{--target-exec='valgrind <your_custom_valgrind_options>} option in
|
||||
your configure line instead.
|
||||
|
||||
@anchor{Release process}
|
||||
@section Release process
|
||||
|
||||
FFmpeg maintains a set of @strong{release branches}, which are the
|
||||
recommended deliverable for system integrators and distributors (such as
|
||||
Linux distributions, etc.). At regular times, a @strong{release
|
||||
manager} prepares, tests and publishes tarballs on the
|
||||
@url{http://ffmpeg.org} website.
|
||||
|
||||
There are two kinds of releases:
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
@strong{Major releases} always include the latest and greatest
|
||||
features and functionality.
|
||||
|
||||
@item
|
||||
@strong{Point releases} are cut from @strong{release} branches,
|
||||
which are named @code{release/X}, with @code{X} being the release
|
||||
version number.
|
||||
@end enumerate
|
||||
|
||||
Note that we promise to our users that shared libraries from any FFmpeg
|
||||
release never break programs that have been @strong{compiled} against
|
||||
previous versions of @strong{the same release series} in any case!
|
||||
|
||||
However, from time to time, we do make API changes that require adaptations
|
||||
in applications. Such changes are only allowed in (new) major releases and
|
||||
require further steps such as bumping library version numbers and/or
|
||||
adjustments to the symbol versioning file. Please discuss such changes
|
||||
on the @strong{ffmpeg-devel} mailing list in time to allow forward planning.
|
||||
|
||||
@anchor{Criteria for Point Releases}
|
||||
@subsection Criteria for Point Releases
|
||||
|
||||
Changes that match the following criteria are valid candidates for
|
||||
inclusion into a point release:
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Fixes a security issue, preferably identified by a @strong{CVE
|
||||
number} issued by @url{http://cve.mitre.org/}.
|
||||
|
||||
@item
|
||||
Fixes a documented bug in @url{https://trac.ffmpeg.org}.
|
||||
|
||||
@item
|
||||
Improves the included documentation.
|
||||
|
||||
@item
|
||||
Retains both source code and binary compatibility with previous
|
||||
point releases of the same release branch.
|
||||
@end enumerate
|
||||
|
||||
The order for checking the rules is (1 OR 2 OR 3) AND 4.
|
||||
|
||||
|
||||
@subsection Release Checklist
|
||||
|
||||
The release process involves the following steps:
|
||||
|
||||
@enumerate
|
||||
@item
|
||||
Ensure that the @file{RELEASE} file contains the version number for
|
||||
the upcoming release.
|
||||
|
||||
@item
|
||||
Add the release at @url{https://trac.ffmpeg.org/admin/ticket/versions}.
|
||||
|
||||
@item
|
||||
Announce the intent to do a release to the mailing list.
|
||||
|
||||
@item
|
||||
Make sure all relevant security fixes have been backported. See
|
||||
@url{https://ffmpeg.org/security.html}.
|
||||
|
||||
@item
|
||||
Ensure that the FATE regression suite still passes in the release
|
||||
branch on at least @strong{i386} and @strong{amd64}
|
||||
(cf. @ref{Regression tests}).
|
||||
|
||||
@item
|
||||
Prepare the release tarballs in @code{bz2} and @code{gz} formats, and
|
||||
supplementing files that contain @code{gpg} signatures
|
||||
|
||||
@item
|
||||
Publish the tarballs at @url{http://ffmpeg.org/releases}. Create and
|
||||
push an annotated tag in the form @code{nX}, with @code{X}
|
||||
containing the version number.
|
||||
|
||||
@item
|
||||
Propose and send a patch to the @strong{ffmpeg-devel} mailing list
|
||||
with a news entry for the website.
|
||||
|
||||
@item
|
||||
Publish the news entry.
|
||||
|
||||
@item
|
||||
Send announcement to the mailing list.
|
||||
@end enumerate
|
||||
|
||||
@bye
|
@ -1,21 +0,0 @@
|
||||
@chapter Device Options
|
||||
@c man begin DEVICE OPTIONS
|
||||
|
||||
The libavdevice library provides the same interface as
|
||||
libavformat. Namely, an input device is considered like a demuxer, and
|
||||
an output device like a muxer, and the interface and generic device
|
||||
options are the same provided by libavformat (see the ffmpeg-formats
|
||||
manual).
|
||||
|
||||
In addition each input or output device may support so-called private
|
||||
options, which are specific for that component.
|
||||
|
||||
Options may be set by specifying -@var{option} @var{value} in the
|
||||
FFmpeg tools, or by setting the value explicitly in the device
|
||||
@code{AVFormatContext} options or using the @file{libavutil/opt.h} API
|
||||
for programmatic use.
|
||||
|
||||
@c man end DEVICE OPTIONS
|
||||
|
||||
@include indevs.texi
|
||||
@include outdevs.texi
|
@ -1,14 +0,0 @@
|
||||
#!/bin/sh
|
||||
|
||||
SRC_PATH="${1}"
|
||||
DOXYFILE="${2}"
|
||||
|
||||
shift 2
|
||||
|
||||
doxygen - <<EOF
|
||||
@INCLUDE = ${DOXYFILE}
|
||||
INPUT = $@
|
||||
HTML_HEADER = ${SRC_PATH}/doc/doxy/header.html
|
||||
HTML_FOOTER = ${SRC_PATH}/doc/doxy/footer.html
|
||||
HTML_STYLESHEET = ${SRC_PATH}/doc/doxy/doxy_stylesheet.css
|
||||
EOF
|
File diff suppressed because it is too large
Load Diff
@ -1,9 +0,0 @@
|
||||
|
||||
<footer class="footer pagination-right">
|
||||
<span class="label label-info">
|
||||
Generated on $datetime for $projectname by <a href="http://www.doxygen.org/index.html">doxygen</a> $doxygenversion
|
||||
</span>
|
||||
</footer>
|
||||
</div>
|
||||
</body>
|
||||
</html>
|
@ -1,16 +0,0 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta http-equiv="Content-Type" content="text/html; charset=UTF-8"/>
|
||||
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
|
||||
<!--BEGIN PROJECT_NAME--><title>$projectname: $title</title><!--END PROJECT_NAME-->
|
||||
<!--BEGIN !PROJECT_NAME--><title>$title</title><!--END !PROJECT_NAME-->
|
||||
<link href="$relpath$doxy_stylesheet.css" rel="stylesheet" type="text/css" />
|
||||
<!--Header replace -->
|
||||
|
||||
</head>
|
||||
|
||||
<div class="container">
|
||||
|
||||
<!--Header replace -->
|
||||
<div class="menu">
|
File diff suppressed because it is too large
Load Diff
@ -1,174 +0,0 @@
|
||||
The following table lists most error codes found in various operating
|
||||
systems supported by FFmpeg.
|
||||
|
||||
OS
|
||||
Code Std F LBMWwb Text (YMMV)
|
||||
|
||||
E2BIG POSIX ++++++ Argument list too long
|
||||
EACCES POSIX ++++++ Permission denied
|
||||
EADDRINUSE POSIX +++..+ Address in use
|
||||
EADDRNOTAVAIL POSIX +++..+ Cannot assign requested address
|
||||
EADV +..... Advertise error
|
||||
EAFNOSUPPORT POSIX +++..+ Address family not supported
|
||||
EAGAIN POSIX + ++++++ Resource temporarily unavailable
|
||||
EALREADY POSIX +++..+ Operation already in progress
|
||||
EAUTH .++... Authentication error
|
||||
EBADARCH ..+... Bad CPU type in executable
|
||||
EBADE +..... Invalid exchange
|
||||
EBADEXEC ..+... Bad executable
|
||||
EBADF POSIX ++++++ Bad file descriptor
|
||||
EBADFD +..... File descriptor in bad state
|
||||
EBADMACHO ..+... Malformed Macho file
|
||||
EBADMSG POSIX ++4... Bad message
|
||||
EBADR +..... Invalid request descriptor
|
||||
EBADRPC .++... RPC struct is bad
|
||||
EBADRQC +..... Invalid request code
|
||||
EBADSLT +..... Invalid slot
|
||||
EBFONT +..... Bad font file format
|
||||
EBUSY POSIX - ++++++ Device or resource busy
|
||||
ECANCELED POSIX +++... Operation canceled
|
||||
ECHILD POSIX ++++++ No child processes
|
||||
ECHRNG +..... Channel number out of range
|
||||
ECOMM +..... Communication error on send
|
||||
ECONNABORTED POSIX +++..+ Software caused connection abort
|
||||
ECONNREFUSED POSIX - +++ss+ Connection refused
|
||||
ECONNRESET POSIX +++..+ Connection reset
|
||||
EDEADLK POSIX ++++++ Resource deadlock avoided
|
||||
EDEADLOCK +..++. File locking deadlock error
|
||||
EDESTADDRREQ POSIX +++... Destination address required
|
||||
EDEVERR ..+... Device error
|
||||
EDOM C89 - ++++++ Numerical argument out of domain
|
||||
EDOOFUS .F.... Programming error
|
||||
EDOTDOT +..... RFS specific error
|
||||
EDQUOT POSIX +++... Disc quota exceeded
|
||||
EEXIST POSIX ++++++ File exists
|
||||
EFAULT POSIX - ++++++ Bad address
|
||||
EFBIG POSIX - ++++++ File too large
|
||||
EFTYPE .++... Inappropriate file type or format
|
||||
EHOSTDOWN +++... Host is down
|
||||
EHOSTUNREACH POSIX +++..+ No route to host
|
||||
EHWPOISON +..... Memory page has hardware error
|
||||
EIDRM POSIX +++... Identifier removed
|
||||
EILSEQ C99 ++++++ Illegal byte sequence
|
||||
EINPROGRESS POSIX - +++ss+ Operation in progress
|
||||
EINTR POSIX - ++++++ Interrupted system call
|
||||
EINVAL POSIX + ++++++ Invalid argument
|
||||
EIO POSIX + ++++++ I/O error
|
||||
EISCONN POSIX +++..+ Socket is already connected
|
||||
EISDIR POSIX ++++++ Is a directory
|
||||
EISNAM +..... Is a named type file
|
||||
EKEYEXPIRED +..... Key has expired
|
||||
EKEYREJECTED +..... Key was rejected by service
|
||||
EKEYREVOKED +..... Key has been revoked
|
||||
EL2HLT +..... Level 2 halted
|
||||
EL2NSYNC +..... Level 2 not synchronized
|
||||
EL3HLT +..... Level 3 halted
|
||||
EL3RST +..... Level 3 reset
|
||||
ELIBACC +..... Can not access a needed shared library
|
||||
ELIBBAD +..... Accessing a corrupted shared library
|
||||
ELIBEXEC +..... Cannot exec a shared library directly
|
||||
ELIBMAX +..... Too many shared libraries
|
||||
ELIBSCN +..... .lib section in a.out corrupted
|
||||
ELNRNG +..... Link number out of range
|
||||
ELOOP POSIX +++..+ Too many levels of symbolic links
|
||||
EMEDIUMTYPE +..... Wrong medium type
|
||||
EMFILE POSIX ++++++ Too many open files
|
||||
EMLINK POSIX ++++++ Too many links
|
||||
EMSGSIZE POSIX +++..+ Message too long
|
||||
EMULTIHOP POSIX ++4... Multihop attempted
|
||||
ENAMETOOLONG POSIX - ++++++ Filen ame too long
|
||||
ENAVAIL +..... No XENIX semaphores available
|
||||
ENEEDAUTH .++... Need authenticator
|
||||
ENETDOWN POSIX +++..+ Network is down
|
||||
ENETRESET SUSv3 +++..+ Network dropped connection on reset
|
||||
ENETUNREACH POSIX +++..+ Network unreachable
|
||||
ENFILE POSIX ++++++ Too many open files in system
|
||||
ENOANO +..... No anode
|
||||
ENOATTR .++... Attribute not found
|
||||
ENOBUFS POSIX - +++..+ No buffer space available
|
||||
ENOCSI +..... No CSI structure available
|
||||
ENODATA XSR +N4... No message available
|
||||
ENODEV POSIX - ++++++ No such device
|
||||
ENOENT POSIX - ++++++ No such file or directory
|
||||
ENOEXEC POSIX ++++++ Exec format error
|
||||
ENOFILE ...++. No such file or directory
|
||||
ENOKEY +..... Required key not available
|
||||
ENOLCK POSIX ++++++ No locks available
|
||||
ENOLINK POSIX ++4... Link has been severed
|
||||
ENOMEDIUM +..... No medium found
|
||||
ENOMEM POSIX ++++++ Not enough space
|
||||
ENOMSG POSIX +++..+ No message of desired type
|
||||
ENONET +..... Machine is not on the network
|
||||
ENOPKG +..... Package not installed
|
||||
ENOPROTOOPT POSIX +++..+ Protocol not available
|
||||
ENOSPC POSIX ++++++ No space left on device
|
||||
ENOSR XSR +N4... No STREAM resources
|
||||
ENOSTR XSR +N4... Not a STREAM
|
||||
ENOSYS POSIX + ++++++ Function not implemented
|
||||
ENOTBLK +++... Block device required
|
||||
ENOTCONN POSIX +++..+ Socket is not connected
|
||||
ENOTDIR POSIX ++++++ Not a directory
|
||||
ENOTEMPTY POSIX ++++++ Directory not empty
|
||||
ENOTNAM +..... Not a XENIX named type file
|
||||
ENOTRECOVERABLE SUSv4 - +..... State not recoverable
|
||||
ENOTSOCK POSIX +++..+ Socket operation on non-socket
|
||||
ENOTSUP POSIX +++... Operation not supported
|
||||
ENOTTY POSIX ++++++ Inappropriate I/O control operation
|
||||
ENOTUNIQ +..... Name not unique on network
|
||||
ENXIO POSIX ++++++ No such device or address
|
||||
EOPNOTSUPP POSIX +++..+ Operation not supported (on socket)
|
||||
EOVERFLOW POSIX +++..+ Value too large to be stored in data type
|
||||
EOWNERDEAD SUSv4 +..... Owner died
|
||||
EPERM POSIX - ++++++ Operation not permitted
|
||||
EPFNOSUPPORT +++..+ Protocol family not supported
|
||||
EPIPE POSIX - ++++++ Broken pipe
|
||||
EPROCLIM .++... Too many processes
|
||||
EPROCUNAVAIL .++... Bad procedure for program
|
||||
EPROGMISMATCH .++... Program version wrong
|
||||
EPROGUNAVAIL .++... RPC prog. not avail
|
||||
EPROTO POSIX ++4... Protocol error
|
||||
EPROTONOSUPPORT POSIX - +++ss+ Protocol not supported
|
||||
EPROTOTYPE POSIX +++..+ Protocol wrong type for socket
|
||||
EPWROFF ..+... Device power is off
|
||||
ERANGE C89 - ++++++ Result too large
|
||||
EREMCHG +..... Remote address changed
|
||||
EREMOTE +++... Object is remote
|
||||
EREMOTEIO +..... Remote I/O error
|
||||
ERESTART +..... Interrupted system call should be restarted
|
||||
ERFKILL +..... Operation not possible due to RF-kill
|
||||
EROFS POSIX ++++++ Read-only file system
|
||||
ERPCMISMATCH .++... RPC version wrong
|
||||
ESHLIBVERS ..+... Shared library version mismatch
|
||||
ESHUTDOWN +++..+ Cannot send after socket shutdown
|
||||
ESOCKTNOSUPPORT +++... Socket type not supported
|
||||
ESPIPE POSIX ++++++ Illegal seek
|
||||
ESRCH POSIX ++++++ No such process
|
||||
ESRMNT +..... Srmount error
|
||||
ESTALE POSIX +++..+ Stale NFS file handle
|
||||
ESTRPIPE +..... Streams pipe error
|
||||
ETIME XSR +N4... Stream ioctl timeout
|
||||
ETIMEDOUT POSIX - +++ss+ Connection timed out
|
||||
ETOOMANYREFS +++... Too many references: cannot splice
|
||||
ETXTBSY POSIX +++... Text file busy
|
||||
EUCLEAN +..... Structure needs cleaning
|
||||
EUNATCH +..... Protocol driver not attached
|
||||
EUSERS +++... Too many users
|
||||
EWOULDBLOCK POSIX +++..+ Operation would block
|
||||
EXDEV POSIX ++++++ Cross-device link
|
||||
EXFULL +..... Exchange full
|
||||
|
||||
Notations:
|
||||
|
||||
F: used in FFmpeg (-: a few times, +: a lot)
|
||||
|
||||
SUSv3: Single Unix Specification, version 3
|
||||
SUSv4: Single Unix Specification, version 4
|
||||
XSR: XSI STREAMS (obsolete)
|
||||
|
||||
OS: availability on some supported operating systems
|
||||
L: GNU/Linux
|
||||
B: BSD (F: FreeBSD, N: NetBSD)
|
||||
M: MacOS X
|
||||
W: Microsoft Windows (s: emulated with winsock, see libavformat/network.h)
|
||||
w: Mingw32 (3.17) and Mingw64 (2.0.1)
|
||||
b: BeOS
|
@ -1,38 +0,0 @@
|
||||
# use pkg-config for getting CFLAGS and LDLIBS
|
||||
FFMPEG_LIBS= libavdevice \
|
||||
libavformat \
|
||||
libavfilter \
|
||||
libavcodec \
|
||||
libswresample \
|
||||
libswscale \
|
||||
libavutil \
|
||||
|
||||
CFLAGS += -Wall -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
EXAMPLES= decoding_encoding \
|
||||
demuxing \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
metadata \
|
||||
muxing \
|
||||
resampling_audio \
|
||||
scaling_video \
|
||||
|
||||
OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
decoding_encoding: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
|
||||
.phony: all clean-test clean
|
||||
|
||||
all: $(OBJS) $(EXAMPLES)
|
||||
|
||||
clean-test:
|
||||
$(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg
|
||||
|
||||
clean: clean-test
|
||||
$(RM) $(EXAMPLES) $(OBJS)
|
@ -1,18 +0,0 @@
|
||||
FFmpeg examples README
|
||||
----------------------
|
||||
|
||||
Both following use cases rely on pkg-config and make, thus make sure
|
||||
that you have them installed and working on your system.
|
||||
|
||||
|
||||
1) Build the installed examples in a generic read/write user directory
|
||||
|
||||
Copy to a read/write user directory and just use "make", it will link
|
||||
to the libraries on your system, assuming the PKG_CONFIG_PATH is
|
||||
correctly configured.
|
||||
|
||||
2) Build the examples in-tree
|
||||
|
||||
Assuming you are in the source FFmpeg checkout directory, you need to build
|
||||
FFmpeg (no need to make install in any prefix). Then you can go into
|
||||
doc/examples and run a command such as PKG_CONFIG_PATH=pc-uninstalled make.
|
@ -1,650 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2001 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavcodec API use example.
|
||||
*
|
||||
* Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
|
||||
* not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
|
||||
* format handling
|
||||
* @example doc/examples/decoding_encoding.c
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/common.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
|
||||
#define INBUF_SIZE 4096
|
||||
#define AUDIO_INBUF_SIZE 20480
|
||||
#define AUDIO_REFILL_THRESH 4096
|
||||
|
||||
/* check that a given sample format is supported by the encoder */
|
||||
static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
const enum AVSampleFormat *p = codec->sample_fmts;
|
||||
|
||||
while (*p != AV_SAMPLE_FMT_NONE) {
|
||||
if (*p == sample_fmt)
|
||||
return 1;
|
||||
p++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* just pick the highest supported samplerate */
|
||||
static int select_sample_rate(AVCodec *codec)
|
||||
{
|
||||
const int *p;
|
||||
int best_samplerate = 0;
|
||||
|
||||
if (!codec->supported_samplerates)
|
||||
return 44100;
|
||||
|
||||
p = codec->supported_samplerates;
|
||||
while (*p) {
|
||||
best_samplerate = FFMAX(*p, best_samplerate);
|
||||
p++;
|
||||
}
|
||||
return best_samplerate;
|
||||
}
|
||||
|
||||
/* select layout with the highest channel count */
|
||||
static int select_channel_layout(AVCodec *codec)
|
||||
{
|
||||
const uint64_t *p;
|
||||
uint64_t best_ch_layout = 0;
|
||||
int best_nb_channels = 0;
|
||||
|
||||
if (!codec->channel_layouts)
|
||||
return AV_CH_LAYOUT_STEREO;
|
||||
|
||||
p = codec->channel_layouts;
|
||||
while (*p) {
|
||||
int nb_channels = av_get_channel_layout_nb_channels(*p);
|
||||
|
||||
if (nb_channels > best_nb_channels) {
|
||||
best_ch_layout = *p;
|
||||
best_nb_channels = nb_channels;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
return best_ch_layout;
|
||||
}
|
||||
|
||||
/*
|
||||
* Audio encoding example
|
||||
*/
|
||||
static void audio_encode_example(const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
int i, j, k, ret, got_output;
|
||||
int buffer_size;
|
||||
FILE *f;
|
||||
uint16_t *samples;
|
||||
float t, tincr;
|
||||
|
||||
printf("Encode audio file %s\n", filename);
|
||||
|
||||
/* find the MP2 encoder */
|
||||
codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate audio codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 64000;
|
||||
|
||||
/* check that the encoder supports s16 pcm input */
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
if (!check_sample_fmt(codec, c->sample_fmt)) {
|
||||
fprintf(stderr, "Encoder does not support sample format %s",
|
||||
av_get_sample_fmt_name(c->sample_fmt));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* select other audio parameters supported by the encoder */
|
||||
c->sample_rate = select_sample_rate(codec);
|
||||
c->channel_layout = select_channel_layout(codec);
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "wb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* frame containing input raw audio */
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->nb_samples = c->frame_size;
|
||||
frame->format = c->sample_fmt;
|
||||
frame->channel_layout = c->channel_layout;
|
||||
|
||||
/* the codec gives us the frame size, in samples,
|
||||
* we calculate the size of the samples buffer in bytes */
|
||||
buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
|
||||
c->sample_fmt, 0);
|
||||
samples = av_malloc(buffer_size);
|
||||
if (!samples) {
|
||||
fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
|
||||
buffer_size);
|
||||
exit(1);
|
||||
}
|
||||
/* setup the data pointers in the AVFrame */
|
||||
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
|
||||
(const uint8_t*)samples, buffer_size, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not setup audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* encode a single tone sound */
|
||||
t = 0;
|
||||
tincr = 2 * M_PI * 440.0 / c->sample_rate;
|
||||
for(i=0;i<200;i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
for (j = 0; j < c->frame_size; j++) {
|
||||
samples[2*j] = (int)(sin(t) * 10000);
|
||||
|
||||
for (k = 1; k < c->channels; k++)
|
||||
samples[2*j + k] = samples[2*j];
|
||||
t += tincr;
|
||||
}
|
||||
/* encode the samples */
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
fclose(f);
|
||||
|
||||
av_freep(&samples);
|
||||
avcodec_free_frame(&frame);
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
}
|
||||
|
||||
/*
|
||||
* Audio decoding.
|
||||
*/
|
||||
static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int len;
|
||||
FILE *f, *outfile;
|
||||
uint8_t inbuf[AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
AVPacket avpkt;
|
||||
AVFrame *decoded_frame = NULL;
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
printf("Decode audio file %s to %s\n", filename, outfilename);
|
||||
|
||||
/* find the mpeg audio decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate audio codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "rb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
outfile = fopen(outfilename, "wb");
|
||||
if (!outfile) {
|
||||
av_free(c);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* decode until eof */
|
||||
avpkt.data = inbuf;
|
||||
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
|
||||
|
||||
while (avpkt.size > 0) {
|
||||
int got_frame = 0;
|
||||
|
||||
if (!decoded_frame) {
|
||||
if (!(decoded_frame = avcodec_alloc_frame())) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
} else
|
||||
avcodec_get_frame_defaults(decoded_frame);
|
||||
|
||||
len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding\n");
|
||||
exit(1);
|
||||
}
|
||||
if (got_frame) {
|
||||
/* if a frame has been decoded, output it */
|
||||
int data_size = av_samples_get_buffer_size(NULL, c->channels,
|
||||
decoded_frame->nb_samples,
|
||||
c->sample_fmt, 1);
|
||||
fwrite(decoded_frame->data[0], 1, data_size, outfile);
|
||||
}
|
||||
avpkt.size -= len;
|
||||
avpkt.data += len;
|
||||
avpkt.dts =
|
||||
avpkt.pts = AV_NOPTS_VALUE;
|
||||
if (avpkt.size < AUDIO_REFILL_THRESH) {
|
||||
/* Refill the input buffer, to avoid trying to decode
|
||||
* incomplete frames. Instead of this, one could also use
|
||||
* a parser, or use a proper container format through
|
||||
* libavformat. */
|
||||
memmove(inbuf, avpkt.data, avpkt.size);
|
||||
avpkt.data = inbuf;
|
||||
len = fread(avpkt.data + avpkt.size, 1,
|
||||
AUDIO_INBUF_SIZE - avpkt.size, f);
|
||||
if (len > 0)
|
||||
avpkt.size += len;
|
||||
}
|
||||
}
|
||||
|
||||
fclose(outfile);
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
avcodec_free_frame(&decoded_frame);
|
||||
}
|
||||
|
||||
/*
|
||||
* Video encoding example
|
||||
*/
|
||||
static void video_encode_example(const char *filename, int codec_id)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int i, ret, x, y, got_output;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
|
||||
|
||||
printf("Encode video file %s\n", filename);
|
||||
|
||||
/* find the mpeg1 video encoder */
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 400000;
|
||||
/* resolution must be a multiple of two */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* frames per second */
|
||||
c->time_base= (AVRational){1,25};
|
||||
c->gop_size = 10; /* emit one intra frame every ten frames */
|
||||
c->max_b_frames=1;
|
||||
c->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
|
||||
if(codec_id == AV_CODEC_ID_H264)
|
||||
av_opt_set(c->priv_data, "preset", "slow", 0);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "wb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
frame->format = c->pix_fmt;
|
||||
frame->width = c->width;
|
||||
frame->height = c->height;
|
||||
|
||||
/* the image can be allocated by any means and av_image_alloc() is
|
||||
* just the most convenient way if av_malloc() is to be used */
|
||||
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
|
||||
c->pix_fmt, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw picture buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* encode 1 second of video */
|
||||
for(i=0;i<25;i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
fflush(stdout);
|
||||
/* prepare a dummy image */
|
||||
/* Y */
|
||||
for(y=0;y<c->height;y++) {
|
||||
for(x=0;x<c->width;x++) {
|
||||
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
|
||||
}
|
||||
}
|
||||
|
||||
/* Cb and Cr */
|
||||
for(y=0;y<c->height/2;y++) {
|
||||
for(x=0;x<c->width/2;x++) {
|
||||
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
|
||||
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
}
|
||||
|
||||
frame->pts = i;
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
fflush(stdout);
|
||||
|
||||
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* add sequence end code to have a real mpeg file */
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_freep(&frame->data[0]);
|
||||
avcodec_free_frame(&frame);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Video decoding example
|
||||
*/
|
||||
|
||||
static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
|
||||
char *filename)
|
||||
{
|
||||
FILE *f;
|
||||
int i;
|
||||
|
||||
f=fopen(filename,"w");
|
||||
fprintf(f,"P5\n%d %d\n%d\n",xsize,ysize,255);
|
||||
for(i=0;i<ysize;i++)
|
||||
fwrite(buf + i * wrap,1,xsize,f);
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
|
||||
AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
|
||||
{
|
||||
int len, got_frame;
|
||||
char buf[1024];
|
||||
|
||||
len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
|
||||
return len;
|
||||
}
|
||||
if (got_frame) {
|
||||
printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder, no need to free it */
|
||||
snprintf(buf, sizeof(buf), outfilename, *frame_count);
|
||||
pgm_save(frame->data[0], frame->linesize[0],
|
||||
avctx->width, avctx->height, buf);
|
||||
(*frame_count)++;
|
||||
}
|
||||
if (pkt->data) {
|
||||
pkt->size -= len;
|
||||
pkt->data += len;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void video_decode_example(const char *outfilename, const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int frame_count;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
|
||||
AVPacket avpkt;
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
|
||||
memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
printf("Decode video file %s to %s\n", filename, outfilename);
|
||||
|
||||
/* find the mpeg1 video decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if(codec->capabilities&CODEC_CAP_TRUNCATED)
|
||||
c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */
|
||||
|
||||
/* For some codecs, such as msmpeg4 and mpeg4, width and height
|
||||
MUST be initialized there because this information is not
|
||||
available in the bitstream. */
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "rb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame_count = 0;
|
||||
for(;;) {
|
||||
avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
|
||||
if (avpkt.size == 0)
|
||||
break;
|
||||
|
||||
/* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
|
||||
and this is the only method to use them because you cannot
|
||||
know the compressed data size before analysing it.
|
||||
|
||||
BUT some other codecs (msmpeg4, mpeg4) are inherently frame
|
||||
based, so you must call them with all the data for one
|
||||
frame exactly. You must also initialize 'width' and
|
||||
'height' before initializing them. */
|
||||
|
||||
/* NOTE2: some codecs allow the raw parameters (frame size,
|
||||
sample rate) to be changed at any frame. We handle this, so
|
||||
you should also take care of it */
|
||||
|
||||
/* here, we use a stream based decoder (mpeg1video), so we
|
||||
feed decoder and see if it could decode a frame */
|
||||
avpkt.data = inbuf;
|
||||
while (avpkt.size > 0)
|
||||
if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* some codecs, such as MPEG, transmit the I and P frame with a
|
||||
latency of one frame. You must do the following to have a
|
||||
chance to get the last frame of the video */
|
||||
avpkt.data = NULL;
|
||||
avpkt.size = 0;
|
||||
decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);
|
||||
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
avcodec_free_frame(&frame);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *output_type;
|
||||
|
||||
/* register all the codecs */
|
||||
avcodec_register_all();
|
||||
|
||||
if (argc < 2) {
|
||||
printf("usage: %s output_type\n"
|
||||
"API example program to decode/encode a media stream with libavcodec.\n"
|
||||
"This program generates a synthetic stream and encodes it to a file\n"
|
||||
"named test.h264, test.mp2 or test.mpg depending on output_type.\n"
|
||||
"The encoded stream is then decoded and written to a raw data output.\n"
|
||||
"output_type must be choosen between 'h264', 'mp2', 'mpg'.\n",
|
||||
argv[0]);
|
||||
return 1;
|
||||
}
|
||||
output_type = argv[1];
|
||||
|
||||
if (!strcmp(output_type, "h264")) {
|
||||
video_encode_example("test.h264", AV_CODEC_ID_H264);
|
||||
} else if (!strcmp(output_type, "mp2")) {
|
||||
audio_encode_example("test.mp2");
|
||||
audio_decode_example("test.sw", "test.mp2");
|
||||
} else if (!strcmp(output_type, "mpg")) {
|
||||
video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
|
||||
video_decode_example("test%02d.pgm", "test.mpg");
|
||||
} else {
|
||||
fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n",
|
||||
output_type);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,341 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat demuxing API use example.
|
||||
*
|
||||
* Show how to use the libavformat and libavcodec API to demux and
|
||||
* decode audio and video data.
|
||||
* @example doc/examples/demuxing.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
|
||||
static AVStream *video_stream = NULL, *audio_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
static const char *video_dst_filename = NULL;
|
||||
static const char *audio_dst_filename = NULL;
|
||||
static FILE *video_dst_file = NULL;
|
||||
static FILE *audio_dst_file = NULL;
|
||||
|
||||
static uint8_t *video_dst_data[4] = {NULL};
|
||||
static int video_dst_linesize[4];
|
||||
static int video_dst_bufsize;
|
||||
|
||||
static int video_stream_idx = -1, audio_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
static int audio_frame_count = 0;
|
||||
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int ret = 0;
|
||||
int decoded = pkt.size;
|
||||
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
/* decode video frame */
|
||||
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding video frame\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
video_frame_count++, frame->coded_picture_number,
|
||||
av_ts2timestr(frame->pts, &video_dec_ctx->time_base));
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
video_dec_ctx->pix_fmt, video_dec_ctx->width, video_dec_ctx->height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
}
|
||||
} else if (pkt.stream_index == audio_stream_idx) {
|
||||
/* decode audio frame */
|
||||
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding audio frame\n");
|
||||
return ret;
|
||||
}
|
||||
/* Some audio decoders decode only part of the packet, and have to be
|
||||
* called again with the remainder of the packet data.
|
||||
* Sample: fate-suite/lossless-audio/luckynight-partial.shn
|
||||
* Also, some decoders might over-read the packet. */
|
||||
decoded = FFMIN(ret, pkt.size);
|
||||
|
||||
if (*got_frame) {
|
||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
/* Write the raw audio data samples of the first plane. This works
|
||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||
* most audio decoders output planar audio, which uses a separate
|
||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||
* In other words, this code will write only the first audio channel
|
||||
* in these cases.
|
||||
* You should use libswresample or libavfilter to convert the frame
|
||||
* to packed data. */
|
||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||
}
|
||||
}
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
*stream_idx = ret;
|
||||
st = fmt_ctx->streams[*stream_idx];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
dec = avcodec_find_decoder(dec_ctx->codec_id);
|
||||
if (!dec) {
|
||||
fprintf(stderr, "Failed to find %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"sample format %s is not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 4) {
|
||||
fprintf(stderr, "usage: %s input_file video_output_file audio_output_file\n"
|
||||
"API example program to show how to read frames from an input file.\n"
|
||||
"This program reads frames from a file, decodes them, and writes decoded\n"
|
||||
"video frames to a rawvideo file named video_output_file, and decoded\n"
|
||||
"audio frames to a rawaudio file named audio_output_file.\n"
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
src_filename = argv[1];
|
||||
video_dst_filename = argv[2];
|
||||
audio_dst_filename = argv[3];
|
||||
|
||||
/* register all formats and codecs */
|
||||
av_register_all();
|
||||
|
||||
/* open input file, and allocate format context */
|
||||
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open source file %s\n", src_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* retrieve stream information */
|
||||
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
|
||||
video_stream = fmt_ctx->streams[video_stream_idx];
|
||||
video_dec_ctx = video_stream->codec;
|
||||
|
||||
video_dst_file = fopen(video_dst_filename, "wb");
|
||||
if (!video_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate image where the decoded image will be put */
|
||||
ret = av_image_alloc(video_dst_data, video_dst_linesize,
|
||||
video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dec_ctx->pix_fmt, 1);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw video buffer\n");
|
||||
goto end;
|
||||
}
|
||||
video_dst_bufsize = ret;
|
||||
}
|
||||
|
||||
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
|
||||
audio_stream = fmt_ctx->streams[audio_stream_idx];
|
||||
audio_dec_ctx = audio_stream->codec;
|
||||
audio_dst_file = fopen(audio_dst_filename, "wb");
|
||||
if (!audio_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
/* dump input information to stderr */
|
||||
av_dump_format(fmt_ctx, 0, src_filename, 0);
|
||||
|
||||
if (!audio_stream && !video_stream) {
|
||||
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
if (video_stream)
|
||||
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
|
||||
if (audio_stream)
|
||||
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_free_packet(&orig_pkt);
|
||||
}
|
||||
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
printf("Demuxing succeeded.\n");
|
||||
|
||||
if (video_stream) {
|
||||
printf("Play the output video file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(video_dec_ctx->pix_fmt), video_dec_ctx->width, video_dec_ctx->height,
|
||||
video_dst_filename);
|
||||
}
|
||||
|
||||
if (audio_stream) {
|
||||
enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
|
||||
int n_channels = audio_dec_ctx->channels;
|
||||
const char *fmt;
|
||||
|
||||
if (av_sample_fmt_is_planar(sfmt)) {
|
||||
const char *packed = av_get_sample_fmt_name(sfmt);
|
||||
printf("Warning: the sample format the decoder produced is planar "
|
||||
"(%s). This example will output the first channel only.\n",
|
||||
packed ? packed : "?");
|
||||
sfmt = av_get_packed_sample_fmt(sfmt);
|
||||
n_channels = 1;
|
||||
}
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
|
||||
goto end;
|
||||
|
||||
printf("Play the output audio file with the command:\n"
|
||||
"ffplay -f %s -ac %d -ar %d %s\n",
|
||||
fmt, n_channels, audio_dec_ctx->sample_rate,
|
||||
audio_dst_filename);
|
||||
}
|
||||
|
||||
end:
|
||||
if (video_dec_ctx)
|
||||
avcodec_close(video_dec_ctx);
|
||||
if (audio_dec_ctx)
|
||||
avcodec_close(audio_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
if (video_dst_file)
|
||||
fclose(video_dst_file);
|
||||
if (audio_dst_file)
|
||||
fclose(audio_dst_file);
|
||||
av_free(frame);
|
||||
av_free(video_dst_data[0]);
|
||||
|
||||
return ret < 0;
|
||||
}
|
@ -1,265 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2012 Clément Bœsch
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for audio decoding and filtering
|
||||
* @example doc/examples/filtering_audio.c
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
|
||||
static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int audio_stream_index = -1;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the audio stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find a audio stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
audio_stream_index = ret;
|
||||
dec_ctx = fmt_ctx->streams[audio_stream_index]->codec;
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the audio decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret;
|
||||
AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
|
||||
AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
|
||||
static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
|
||||
static const int out_sample_rates[] = { 8000, -1 };
|
||||
const AVFilterLink *outlink;
|
||||
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
|
||||
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
time_base.num, time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* buffer audio sink: to terminate the filter chain. */
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
return ret;
|
||||
|
||||
/* Print summary of the sink buffer
|
||||
* Note: args buffer is reused to store channel layout string */
|
||||
outlink = buffersink_ctx->inputs[0];
|
||||
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
|
||||
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
|
||||
(int)outlink->sample_rate,
|
||||
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
|
||||
args);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void print_frame(const AVFrame *frame)
|
||||
{
|
||||
const int n = frame->nb_samples * av_get_channel_layout_nb_channels(av_frame_get_channel_layout(frame));
|
||||
const uint16_t *p = (uint16_t*)frame->data[0];
|
||||
const uint16_t *p_end = p + n;
|
||||
|
||||
while (p < p_end) {
|
||||
fputc(*p & 0xff, stdout);
|
||||
fputc(*p>>8 & 0xff, stdout);
|
||||
p++;
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
int got_frame;
|
||||
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
avcodec_register_all();
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet.stream_index == audio_stream_index) {
|
||||
avcodec_get_frame_defaults(frame);
|
||||
got_frame = 0;
|
||||
ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
|
||||
continue;
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
/* push the audio data from decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered audio from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if(ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if(ret < 0)
|
||||
goto end;
|
||||
print_frame(filt_frame);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
}
|
||||
}
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
if (dec_ctx)
|
||||
avcodec_close(dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
char buf[1024];
|
||||
av_strerror(ret, buf, sizeof(buf));
|
||||
fprintf(stderr, "Error occurred: %s\n", buf);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
@ -1,251 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for decoding and filtering
|
||||
* @example doc/examples/filtering_video.c
|
||||
*/
|
||||
|
||||
#define _XOPEN_SOURCE 600 /* for usleep */
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
|
||||
const char *filter_descr = "scale=78:24";
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int video_stream_index = -1;
|
||||
static int64_t last_pts = AV_NOPTS_VALUE;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the video stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
video_stream_index = ret;
|
||||
dec_ctx = fmt_ctx->streams[video_stream_index]->codec;
|
||||
|
||||
/* init the video decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret;
|
||||
AVFilter *buffersrc = avfilter_get_by_name("buffer");
|
||||
AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||
AVBufferSinkParams *buffersink_params;
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
|
||||
/* buffer video source: the decoded frames from the decoder will be inserted here. */
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* buffer video sink: to terminate the filter chain. */
|
||||
buffersink_params = av_buffersink_params_alloc();
|
||||
buffersink_params->pixel_fmts = pix_fmts;
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, buffersink_params, filter_graph);
|
||||
av_free(buffersink_params);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
return ret;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
return ret;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void display_frame(const AVFrame *frame, AVRational time_base)
|
||||
{
|
||||
int x, y;
|
||||
uint8_t *p0, *p;
|
||||
int64_t delay;
|
||||
|
||||
if (frame->pts != AV_NOPTS_VALUE) {
|
||||
if (last_pts != AV_NOPTS_VALUE) {
|
||||
/* sleep roughly the right amount of time;
|
||||
* usleep is in microseconds, just like AV_TIME_BASE. */
|
||||
delay = av_rescale_q(frame->pts - last_pts,
|
||||
time_base, AV_TIME_BASE_Q);
|
||||
if (delay > 0 && delay < 1000000)
|
||||
usleep(delay);
|
||||
}
|
||||
last_pts = frame->pts;
|
||||
}
|
||||
|
||||
/* Trivial ASCII grayscale display. */
|
||||
p0 = frame->data[0];
|
||||
puts("\033c");
|
||||
for (y = 0; y < frame->height; y++) {
|
||||
p = p0;
|
||||
for (x = 0; x < frame->width; x++)
|
||||
putchar(" .-+#"[*(p++) / 52]);
|
||||
putchar('\n');
|
||||
p0 += frame->linesize[0];
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
int got_frame;
|
||||
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
avcodec_register_all();
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet.stream_index == video_stream_index) {
|
||||
avcodec_get_frame_defaults(frame);
|
||||
got_frame = 0;
|
||||
ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
frame->pts = av_frame_get_best_effort_timestamp(frame);
|
||||
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
}
|
||||
}
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
if (dec_ctx)
|
||||
avcodec_close(dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
char buf[1024];
|
||||
av_strerror(ret, buf, sizeof(buf));
|
||||
fprintf(stderr, "Error occurred: %s\n", buf);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
@ -1,56 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2011 Reinhard Tartler
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Shows how the metadata API can be used in application programs.
|
||||
* @example doc/examples/metadata.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/dict.h>
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVDictionaryEntry *tag = NULL;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
printf("usage: %s <input_file>\n"
|
||||
"example program to demonstrate the use of the libavformat metadata API.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL)))
|
||||
return ret;
|
||||
|
||||
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
|
||||
printf("%s=%s\n", tag->key, tag->value);
|
||||
|
||||
avformat_close_input(&fmt_ctx);
|
||||
return 0;
|
||||
}
|
@ -1,564 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat API example.
|
||||
*
|
||||
* Output a media file in any supported libavformat format.
|
||||
* The default codecs are used.
|
||||
* @example doc/examples/muxing.c
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
/* 5 seconds stream duration */
|
||||
#define STREAM_DURATION 200.0
|
||||
#define STREAM_FRAME_RATE 25 /* 25 images/s */
|
||||
#define STREAM_NB_FRAMES ((int)(STREAM_DURATION * STREAM_FRAME_RATE))
|
||||
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
|
||||
|
||||
static int sws_flags = SWS_BICUBIC;
|
||||
|
||||
/* Add an output stream. */
|
||||
static AVStream *add_stream(AVFormatContext *oc, AVCodec **codec,
|
||||
enum AVCodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVStream *st;
|
||||
|
||||
/* find the encoder */
|
||||
*codec = avcodec_find_encoder(codec_id);
|
||||
if (!(*codec)) {
|
||||
fprintf(stderr, "Could not find encoder for '%s'\n",
|
||||
avcodec_get_name(codec_id));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
st = avformat_new_stream(oc, *codec);
|
||||
if (!st) {
|
||||
fprintf(stderr, "Could not allocate stream\n");
|
||||
exit(1);
|
||||
}
|
||||
st->id = oc->nb_streams-1;
|
||||
c = st->codec;
|
||||
|
||||
switch ((*codec)->type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
c->sample_fmt = AV_SAMPLE_FMT_FLTP;
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
c->channels = 2;
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
c->codec_id = codec_id;
|
||||
|
||||
c->bit_rate = 400000;
|
||||
/* Resolution must be a multiple of two. */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* timebase: This is the fundamental unit of time (in seconds) in terms
|
||||
* of which frame timestamps are represented. For fixed-fps content,
|
||||
* timebase should be 1/framerate and timestamp increments should be
|
||||
* identical to 1. */
|
||||
c->time_base.den = STREAM_FRAME_RATE;
|
||||
c->time_base.num = 1;
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = STREAM_PIX_FMT;
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
||||
/* just for testing, we also add B frames */
|
||||
c->max_b_frames = 2;
|
||||
}
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
|
||||
/* Needed to avoid using macroblocks in which some coeffs overflow.
|
||||
* This does not happen with normal video, it just happens here as
|
||||
* the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision = 2;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Some formats want stream headers to be separate. */
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
return st;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* audio output */
|
||||
|
||||
static float t, tincr, tincr2;
|
||||
|
||||
static uint8_t **src_samples_data;
|
||||
static int src_samples_linesize;
|
||||
static int src_nb_samples;
|
||||
|
||||
static int max_dst_nb_samples;
|
||||
uint8_t **dst_samples_data;
|
||||
int dst_samples_linesize;
|
||||
int dst_samples_size;
|
||||
|
||||
struct SwrContext *swr_ctx = NULL;
|
||||
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, AVStream *st)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int ret;
|
||||
|
||||
c = st->codec;
|
||||
|
||||
/* open it */
|
||||
ret = avcodec_open2(c, codec, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* init signal generator */
|
||||
t = 0;
|
||||
tincr = 2 * M_PI * 110.0 / c->sample_rate;
|
||||
/* increment frequency by 110 Hz per second */
|
||||
tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
||||
|
||||
src_nb_samples = c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE ?
|
||||
10000 : c->frame_size;
|
||||
|
||||
ret = av_samples_alloc_array_and_samples(&src_samples_data, &src_samples_linesize, c->channels,
|
||||
src_nb_samples, c->sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate source samples\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
if (c->sample_fmt != AV_SAMPLE_FMT_S16) {
|
||||
swr_ctx = swr_alloc();
|
||||
if (!swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int (swr_ctx, "in_channel_count", c->channels, 0);
|
||||
av_opt_set_int (swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_int (swr_ctx, "out_channel_count", c->channels, 0);
|
||||
av_opt_set_int (swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* compute the number of converted samples: buffering is avoided
|
||||
* ensuring that the output buffer will contain at least all the
|
||||
* converted input samples */
|
||||
max_dst_nb_samples = src_nb_samples;
|
||||
ret = av_samples_alloc_array_and_samples(&dst_samples_data, &dst_samples_linesize, c->channels,
|
||||
max_dst_nb_samples, c->sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate destination samples\n");
|
||||
exit(1);
|
||||
}
|
||||
dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, max_dst_nb_samples,
|
||||
c->sample_fmt, 0);
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
* 'nb_channels' channels. */
|
||||
static void get_audio_frame(int16_t *samples, int frame_size, int nb_channels)
|
||||
{
|
||||
int j, i, v;
|
||||
int16_t *q;
|
||||
|
||||
q = samples;
|
||||
for (j = 0; j < frame_size; j++) {
|
||||
v = (int)(sin(t) * 10000);
|
||||
for (i = 0; i < nb_channels; i++)
|
||||
*q++ = v;
|
||||
t += tincr;
|
||||
tincr += tincr2;
|
||||
}
|
||||
}
|
||||
|
||||
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVPacket pkt = { 0 }; // data and size must be 0;
|
||||
AVFrame *frame = avcodec_alloc_frame();
|
||||
int got_packet, ret, dst_nb_samples;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
c = st->codec;
|
||||
|
||||
get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);
|
||||
|
||||
/* convert samples from native format to destination codec format, using the resampler */
|
||||
if (swr_ctx) {
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
|
||||
c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
||||
if (dst_nb_samples > max_dst_nb_samples) {
|
||||
av_free(dst_samples_data[0]);
|
||||
ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
|
||||
dst_nb_samples, c->sample_fmt, 0);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
max_dst_nb_samples = dst_nb_samples;
|
||||
dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
|
||||
c->sample_fmt, 0);
|
||||
}
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(swr_ctx,
|
||||
dst_samples_data, dst_nb_samples,
|
||||
(const uint8_t **)src_samples_data, src_nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
exit(1);
|
||||
}
|
||||
} else {
|
||||
dst_samples_data[0] = src_samples_data[0];
|
||||
dst_nb_samples = src_nb_samples;
|
||||
}
|
||||
|
||||
frame->nb_samples = dst_nb_samples;
|
||||
avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
|
||||
dst_samples_data[0], dst_samples_size, 0);
|
||||
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (!got_packet)
|
||||
return;
|
||||
|
||||
pkt.stream_index = st->index;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "Error while writing audio frame: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
avcodec_free_frame(&frame);
|
||||
}
|
||||
|
||||
static void close_audio(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
avcodec_close(st->codec);
|
||||
av_free(src_samples_data[0]);
|
||||
av_free(dst_samples_data[0]);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* video output */
|
||||
|
||||
static AVFrame *frame;
|
||||
static AVPicture src_picture, dst_picture;
|
||||
static int frame_count;
|
||||
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, AVStream *st)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c = st->codec;
|
||||
|
||||
/* open the codec */
|
||||
ret = avcodec_open2(c, codec, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* allocate and init a re-usable frame */
|
||||
frame = avcodec_alloc_frame();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* Allocate the encoded raw picture. */
|
||||
ret = avpicture_alloc(&dst_picture, c->pix_fmt, c->width, c->height);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate picture: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* If the output format is not YUV420P, then a temporary YUV420P
|
||||
* picture is needed too. It is then converted to the required
|
||||
* output format. */
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
ret = avpicture_alloc(&src_picture, AV_PIX_FMT_YUV420P, c->width, c->height);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate temporary picture: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* copy data and linesize picture pointers to frame */
|
||||
*((AVPicture *)frame) = dst_picture;
|
||||
}
|
||||
|
||||
/* Prepare a dummy image. */
|
||||
static void fill_yuv_image(AVPicture *pict, int frame_index,
|
||||
int width, int height)
|
||||
{
|
||||
int x, y, i;
|
||||
|
||||
i = frame_index;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
|
||||
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void write_video_frame(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
int ret;
|
||||
static struct SwsContext *sws_ctx;
|
||||
AVCodecContext *c = st->codec;
|
||||
|
||||
if (frame_count >= STREAM_NB_FRAMES) {
|
||||
/* No more frames to compress. The codec has a latency of a few
|
||||
* frames if using B-frames, so we get the last frames by
|
||||
* passing the same picture again. */
|
||||
} else {
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
/* as we only generate a YUV420P picture, we must convert it
|
||||
* to the codec pixel format if needed */
|
||||
if (!sws_ctx) {
|
||||
sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
|
||||
c->width, c->height, c->pix_fmt,
|
||||
sws_flags, NULL, NULL, NULL);
|
||||
if (!sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Could not initialize the conversion context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
fill_yuv_image(&src_picture, frame_count, c->width, c->height);
|
||||
sws_scale(sws_ctx,
|
||||
(const uint8_t * const *)src_picture.data, src_picture.linesize,
|
||||
0, c->height, dst_picture.data, dst_picture.linesize);
|
||||
} else {
|
||||
fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
|
||||
}
|
||||
}
|
||||
|
||||
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
|
||||
/* Raw video case - directly store the picture in the packet */
|
||||
AVPacket pkt;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.stream_index = st->index;
|
||||
pkt.data = dst_picture.data[0];
|
||||
pkt.size = sizeof(AVPicture);
|
||||
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
} else {
|
||||
AVPacket pkt = { 0 };
|
||||
int got_packet;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
/* If size is zero, it means the image was buffered. */
|
||||
|
||||
if (!ret && got_packet && pkt.size) {
|
||||
pkt.stream_index = st->index;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
if (ret != 0) {
|
||||
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
frame_count++;
|
||||
}
|
||||
|
||||
static void close_video(AVFormatContext *oc, AVStream *st)
|
||||
{
|
||||
avcodec_close(st->codec);
|
||||
av_free(src_picture.data[0]);
|
||||
av_free(dst_picture.data[0]);
|
||||
av_free(frame);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* media file output */
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *filename;
|
||||
AVOutputFormat *fmt;
|
||||
AVFormatContext *oc;
|
||||
AVStream *audio_st, *video_st;
|
||||
AVCodec *audio_codec, *video_codec;
|
||||
double audio_time, video_time;
|
||||
int ret;
|
||||
|
||||
/* Initialize libavcodec, and register all codecs and formats. */
|
||||
av_register_all();
|
||||
|
||||
if (argc != 2) {
|
||||
printf("usage: %s output_file\n"
|
||||
"API example program to output a media file with libavformat.\n"
|
||||
"This program generates a synthetic audio and video stream, encodes and\n"
|
||||
"muxes them into a file named output_file.\n"
|
||||
"The output format is automatically guessed according to the file extension.\n"
|
||||
"Raw images can also be output by using '%%d' in the filename.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
filename = argv[1];
|
||||
|
||||
/* allocate the output media context */
|
||||
avformat_alloc_output_context2(&oc, NULL, NULL, filename);
|
||||
if (!oc) {
|
||||
printf("Could not deduce output format from file extension: using MPEG.\n");
|
||||
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
|
||||
}
|
||||
if (!oc) {
|
||||
return 1;
|
||||
}
|
||||
fmt = oc->oformat;
|
||||
|
||||
/* Add the audio and video streams using the default format codecs
|
||||
* and initialize the codecs. */
|
||||
video_st = NULL;
|
||||
audio_st = NULL;
|
||||
|
||||
if (fmt->video_codec != AV_CODEC_ID_NONE) {
|
||||
video_st = add_stream(oc, &video_codec, fmt->video_codec);
|
||||
}
|
||||
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
|
||||
audio_st = add_stream(oc, &audio_codec, fmt->audio_codec);
|
||||
}
|
||||
|
||||
/* Now that all the parameters are set, we can open the audio and
|
||||
* video codecs and allocate the necessary encode buffers. */
|
||||
if (video_st)
|
||||
open_video(oc, video_codec, video_st);
|
||||
if (audio_st)
|
||||
open_audio(oc, audio_codec, audio_st);
|
||||
|
||||
av_dump_format(oc, 0, filename, 1);
|
||||
|
||||
/* open the output file, if needed */
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open '%s': %s\n", filename,
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the stream header, if any. */
|
||||
ret = avformat_write_header(oc, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file: %s\n",
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
if (frame)
|
||||
frame->pts = 0;
|
||||
for (;;) {
|
||||
/* Compute current audio and video time. */
|
||||
audio_time = audio_st ? audio_st->pts.val * av_q2d(audio_st->time_base) : 0.0;
|
||||
video_time = video_st ? video_st->pts.val * av_q2d(video_st->time_base) : 0.0;
|
||||
|
||||
if ((!audio_st || audio_time >= STREAM_DURATION) &&
|
||||
(!video_st || video_time >= STREAM_DURATION))
|
||||
break;
|
||||
|
||||
/* write interleaved audio and video frames */
|
||||
if (!video_st || (video_st && audio_st && audio_time < video_time)) {
|
||||
write_audio_frame(oc, audio_st);
|
||||
} else {
|
||||
write_video_frame(oc, video_st);
|
||||
frame->pts += av_rescale_q(1, video_st->codec->time_base, video_st->time_base);
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the trailer, if any. The trailer must be written before you
|
||||
* close the CodecContexts open when you wrote the header; otherwise
|
||||
* av_write_trailer() may try to use memory that was freed on
|
||||
* av_codec_close(). */
|
||||
av_write_trailer(oc);
|
||||
|
||||
/* Close each codec. */
|
||||
if (video_st)
|
||||
close_video(oc, video_st);
|
||||
if (audio_st)
|
||||
close_audio(oc, audio_st);
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE))
|
||||
/* Close the output file. */
|
||||
avio_close(oc->pb);
|
||||
|
||||
/* free the stream */
|
||||
avformat_free_context(oc);
|
||||
|
||||
return 0;
|
||||
}
|
@ -1,12 +0,0 @@
|
||||
prefix=
|
||||
exec_prefix=
|
||||
libdir=${pcfiledir}/../../../libavcodec
|
||||
includedir=${pcfiledir}/../../..
|
||||
|
||||
Name: libavcodec
|
||||
Description: FFmpeg codec library
|
||||
Version: 55.39.101
|
||||
Requires: libavutil = 52.48.101
|
||||
Conflicts:
|
||||
Libs: -L${libdir} -Wl,-rpath,${libdir} -lavcodec
|
||||
Cflags: -I${includedir}
|
@ -1,12 +0,0 @@
|
||||
prefix=
|
||||
exec_prefix=
|
||||
libdir=${pcfiledir}/../../../libavdevice
|
||||
includedir=${pcfiledir}/../../..
|
||||
|
||||
Name: libavdevice
|
||||
Description: FFmpeg device handling library
|
||||
Version: 55.5.100
|
||||
Requires: libavformat = 55.19.104
|
||||
Conflicts:
|
||||
Libs: -L${libdir} -Wl,-rpath,${libdir} -lavdevice
|
||||
Cflags: -I${includedir}
|
@ -1,12 +0,0 @@
|
||||
prefix=
|
||||
exec_prefix=
|
||||
libdir=${pcfiledir}/../../../libavformat
|
||||
includedir=${pcfiledir}/../../..
|
||||
|
||||
Name: libavformat
|
||||
Description: FFmpeg container format library
|
||||
Version: 55.19.104
|
||||
Requires: libavcodec = 55.39.101
|
||||
Conflicts:
|
||||
Libs: -L${libdir} -Wl,-rpath,${libdir} -lavformat
|
||||
Cflags: -I${includedir}
|
@ -1,12 +0,0 @@
|
||||
prefix=
|
||||
exec_prefix=
|
||||
libdir=${pcfiledir}/../../../libavutil
|
||||
includedir=${pcfiledir}/../../..
|
||||
|
||||
Name: libavutil
|
||||
Description: FFmpeg utility library
|
||||
Version: 52.48.101
|
||||
Requires:
|
||||
Conflicts:
|
||||
Libs: -L${libdir} -Wl,-rpath,${libdir} -lavutil
|
||||
Cflags: -I${includedir}
|
@ -1,12 +0,0 @@
|
||||
prefix=
|
||||
exec_prefix=
|
||||
libdir=${pcfiledir}/../../../libswresample
|
||||
includedir=${pcfiledir}/../../..
|
||||
|
||||
Name: libswresample
|
||||
Description: FFmpeg audio resampling library
|
||||
Version: 0.17.104
|
||||
Requires: libavutil = 52.48.101
|
||||
Conflicts:
|
||||
Libs: -L${libdir} -Wl,-rpath,${libdir} -lswresample
|
||||
Cflags: -I${includedir}
|
@ -1,12 +0,0 @@
|
||||
prefix=
|
||||
exec_prefix=
|
||||
libdir=${pcfiledir}/../../../libswscale
|
||||
includedir=${pcfiledir}/../../..
|
||||
|
||||
Name: libswscale
|
||||
Description: FFmpeg image rescaling library
|
||||
Version: 2.5.101
|
||||
Requires: libavutil = 52.48.101
|
||||
Conflicts:
|
||||
Libs: -L${libdir} -Wl,-rpath,${libdir} -lswscale
|
||||
Cflags: -I${includedir}
|
@ -1,211 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @example doc/examples/resampling_audio.c
|
||||
* libswresample API use example.
|
||||
*/
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"Sample format %s not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill dst buffer with nb_samples, generated starting from t.
|
||||
*/
|
||||
void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
|
||||
{
|
||||
int i, j;
|
||||
double tincr = 1.0 / sample_rate, *dstp = dst;
|
||||
const double c = 2 * M_PI * 440.0;
|
||||
|
||||
/* generate sin tone with 440Hz frequency and duplicated channels */
|
||||
for (i = 0; i < nb_samples; i++) {
|
||||
*dstp = sin(c * *t);
|
||||
for (j = 1; j < nb_channels; j++)
|
||||
dstp[j] = dstp[0];
|
||||
dstp += nb_channels;
|
||||
*t += tincr;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
|
||||
int src_rate = 48000, dst_rate = 44100;
|
||||
uint8_t **src_data = NULL, **dst_data = NULL;
|
||||
int src_nb_channels = 0, dst_nb_channels = 0;
|
||||
int src_linesize, dst_linesize;
|
||||
int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
|
||||
enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
const char *fmt;
|
||||
struct SwrContext *swr_ctx;
|
||||
double t;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s output_file\n"
|
||||
"API example program to show how to resample an audio stream with libswresample.\n"
|
||||
"This program generates a series of audio frames, resamples them to a specified "
|
||||
"output format and rate and saves them to an output file named output_file.\n",
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
swr_ctx = swr_alloc();
|
||||
if (!swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
|
||||
|
||||
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination samples buffers */
|
||||
|
||||
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
|
||||
src_nb_samples, src_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate source samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* compute the number of converted samples: buffering is avoided
|
||||
* ensuring that the output buffer will contain at least all the
|
||||
* converted input samples */
|
||||
max_dst_nb_samples = dst_nb_samples =
|
||||
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
|
||||
/* buffer is going to be directly written to a rawaudio file, no alignment */
|
||||
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate destination samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
t = 0;
|
||||
do {
|
||||
/* generate synthetic audio */
|
||||
fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);
|
||||
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
|
||||
src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
if (dst_nb_samples > max_dst_nb_samples) {
|
||||
av_free(dst_data[0]);
|
||||
ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 1);
|
||||
if (ret < 0)
|
||||
break;
|
||||
max_dst_nb_samples = dst_nb_samples;
|
||||
}
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
|
||||
ret, dst_sample_fmt, 1);
|
||||
printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
} while (t < 10);
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
|
||||
goto end;
|
||||
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
|
||||
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
|
||||
|
||||
end:
|
||||
if (dst_file)
|
||||
fclose(dst_file);
|
||||
|
||||
if (src_data)
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&src_data);
|
||||
|
||||
if (dst_data)
|
||||
av_freep(&dst_data[0]);
|
||||
av_freep(&dst_data);
|
||||
|
||||
swr_free(&swr_ctx);
|
||||
return ret < 0;
|
||||
}
|
@ -1,141 +0,0 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libswscale API use example.
|
||||
* @example doc/examples/scaling_video.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/parseutils.h>
|
||||
#include <libswscale/swscale.h>
|
||||
|
||||
static void fill_yuv_image(uint8_t *data[4], int linesize[4],
|
||||
int width, int height, int frame_index)
|
||||
{
|
||||
int x, y;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
data[0][y * linesize[0] + x] = x + y + frame_index * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
data[1][y * linesize[1] + x] = 128 + y + frame_index * 2;
|
||||
data[2][y * linesize[2] + x] = 64 + x + frame_index * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
uint8_t *src_data[4], *dst_data[4];
|
||||
int src_linesize[4], dst_linesize[4];
|
||||
int src_w = 320, src_h = 240, dst_w, dst_h;
|
||||
enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24;
|
||||
const char *dst_size = NULL;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
struct SwsContext *sws_ctx;
|
||||
int i, ret;
|
||||
|
||||
if (argc != 3) {
|
||||
fprintf(stderr, "Usage: %s output_file output_size\n"
|
||||
"API example program to show how to scale an image with libswscale.\n"
|
||||
"This program generates a series of pictures, rescales them to the given "
|
||||
"output_size and saves them to an output file named output_file\n."
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
dst_size = argv[2];
|
||||
|
||||
if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) {
|
||||
fprintf(stderr,
|
||||
"Invalid size '%s', must be in the form WxH or a valid size abbreviation\n",
|
||||
dst_size);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create scaling context */
|
||||
sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
|
||||
dst_w, dst_h, dst_pix_fmt,
|
||||
SWS_BILINEAR, NULL, NULL, NULL);
|
||||
if (!sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Impossible to create scale context for the conversion "
|
||||
"fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
|
||||
av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination image buffers */
|
||||
if ((ret = av_image_alloc(src_data, src_linesize,
|
||||
src_w, src_h, src_pix_fmt, 16)) < 0) {
|
||||
fprintf(stderr, "Could not allocate source image\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer is going to be written to rawvideo file, no alignment */
|
||||
if ((ret = av_image_alloc(dst_data, dst_linesize,
|
||||
dst_w, dst_h, dst_pix_fmt, 1)) < 0) {
|
||||
fprintf(stderr, "Could not allocate destination image\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = ret;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
/* generate synthetic video */
|
||||
fill_yuv_image(src_data, src_linesize, src_w, src_h, i);
|
||||
|
||||
/* convert to destination format */
|
||||
sws_scale(sws_ctx, (const uint8_t * const*)src_data,
|
||||
src_linesize, 0, src_h, dst_data, dst_linesize);
|
||||
|
||||
/* write scaled image to file */
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
}
|
||||
|
||||
fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
|
||||
|
||||
end:
|
||||
if (dst_file)
|
||||
fclose(dst_file);
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&dst_data[0]);
|
||||
sws_freeContext(sws_ctx);
|
||||
return ret < 0;
|
||||
}
|
@ -1,556 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg FAQ
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg FAQ}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter General Questions
|
||||
|
||||
@section Why doesn't FFmpeg support feature [xyz]?
|
||||
|
||||
Because no one has taken on that task yet. FFmpeg development is
|
||||
driven by the tasks that are important to the individual developers.
|
||||
If there is a feature that is important to you, the best way to get
|
||||
it implemented is to undertake the task yourself or sponsor a developer.
|
||||
|
||||
@section FFmpeg does not support codec XXX. Can you include a Windows DLL loader to support it?
|
||||
|
||||
No. Windows DLLs are not portable, bloated and often slow.
|
||||
Moreover FFmpeg strives to support all codecs natively.
|
||||
A DLL loader is not conducive to that goal.
|
||||
|
||||
@section I cannot read this file although this format seems to be supported by ffmpeg.
|
||||
|
||||
Even if ffmpeg can read the container format, it may not support all its
|
||||
codecs. Please consult the supported codec list in the ffmpeg
|
||||
documentation.
|
||||
|
||||
@section Which codecs are supported by Windows?
|
||||
|
||||
Windows does not support standard formats like MPEG very well, unless you
|
||||
install some additional codecs.
|
||||
|
||||
The following list of video codecs should work on most Windows systems:
|
||||
@table @option
|
||||
@item msmpeg4v2
|
||||
.avi/.asf
|
||||
@item msmpeg4
|
||||
.asf only
|
||||
@item wmv1
|
||||
.asf only
|
||||
@item wmv2
|
||||
.asf only
|
||||
@item mpeg4
|
||||
Only if you have some MPEG-4 codec like ffdshow or Xvid installed.
|
||||
@item mpeg1video
|
||||
.mpg only
|
||||
@end table
|
||||
Note, ASF files often have .wmv or .wma extensions in Windows. It should also
|
||||
be mentioned that Microsoft claims a patent on the ASF format, and may sue
|
||||
or threaten users who create ASF files with non-Microsoft software. It is
|
||||
strongly advised to avoid ASF where possible.
|
||||
|
||||
The following list of audio codecs should work on most Windows systems:
|
||||
@table @option
|
||||
@item adpcm_ima_wav
|
||||
@item adpcm_ms
|
||||
@item pcm_s16le
|
||||
always
|
||||
@item libmp3lame
|
||||
If some MP3 codec like LAME is installed.
|
||||
@end table
|
||||
|
||||
|
||||
@chapter Compilation
|
||||
|
||||
@section @code{error: can't find a register in class 'GENERAL_REGS' while reloading 'asm'}
|
||||
|
||||
This is a bug in gcc. Do not report it to us. Instead, please report it to
|
||||
the gcc developers. Note that we will not add workarounds for gcc bugs.
|
||||
|
||||
Also note that (some of) the gcc developers believe this is not a bug or
|
||||
not a bug they should fix:
|
||||
@url{http://gcc.gnu.org/bugzilla/show_bug.cgi?id=11203}.
|
||||
Then again, some of them do not know the difference between an undecidable
|
||||
problem and an NP-hard problem...
|
||||
|
||||
@section I have installed this library with my distro's package manager. Why does @command{configure} not see it?
|
||||
|
||||
Distributions usually split libraries in several packages. The main package
|
||||
contains the files necessary to run programs using the library. The
|
||||
development package contains the files necessary to build programs using the
|
||||
library. Sometimes, docs and/or data are in a separate package too.
|
||||
|
||||
To build FFmpeg, you need to install the development package. It is usually
|
||||
called @file{libfoo-dev} or @file{libfoo-devel}. You can remove it after the
|
||||
build is finished, but be sure to keep the main package.
|
||||
|
||||
@chapter Usage
|
||||
|
||||
@section ffmpeg does not work; what is wrong?
|
||||
|
||||
Try a @code{make distclean} in the ffmpeg source directory before the build.
|
||||
If this does not help see
|
||||
(@url{http://ffmpeg.org/bugreports.html}).
|
||||
|
||||
@section How do I encode single pictures into movies?
|
||||
|
||||
First, rename your pictures to follow a numerical sequence.
|
||||
For example, img1.jpg, img2.jpg, img3.jpg,...
|
||||
Then you may run:
|
||||
|
||||
@example
|
||||
ffmpeg -f image2 -i img%d.jpg /tmp/a.mpg
|
||||
@end example
|
||||
|
||||
Notice that @samp{%d} is replaced by the image number.
|
||||
|
||||
@file{img%03d.jpg} means the sequence @file{img001.jpg}, @file{img002.jpg}, etc.
|
||||
|
||||
Use the @option{-start_number} option to declare a starting number for
|
||||
the sequence. This is useful if your sequence does not start with
|
||||
@file{img001.jpg} but is still in a numerical order. The following
|
||||
example will start with @file{img100.jpg}:
|
||||
|
||||
@example
|
||||
ffmpeg -f image2 -start_number 100 -i img%d.jpg /tmp/a.mpg
|
||||
@end example
|
||||
|
||||
If you have large number of pictures to rename, you can use the
|
||||
following command to ease the burden. The command, using the bourne
|
||||
shell syntax, symbolically links all files in the current directory
|
||||
that match @code{*jpg} to the @file{/tmp} directory in the sequence of
|
||||
@file{img001.jpg}, @file{img002.jpg} and so on.
|
||||
|
||||
@example
|
||||
x=1; for i in *jpg; do counter=$(printf %03d $x); ln -s "$i" /tmp/img"$counter".jpg; x=$(($x+1)); done
|
||||
@end example
|
||||
|
||||
If you want to sequence them by oldest modified first, substitute
|
||||
@code{$(ls -r -t *jpg)} in place of @code{*jpg}.
|
||||
|
||||
Then run:
|
||||
|
||||
@example
|
||||
ffmpeg -f image2 -i /tmp/img%03d.jpg /tmp/a.mpg
|
||||
@end example
|
||||
|
||||
The same logic is used for any image format that ffmpeg reads.
|
||||
|
||||
You can also use @command{cat} to pipe images to ffmpeg:
|
||||
|
||||
@example
|
||||
cat *.jpg | ffmpeg -f image2pipe -c:v mjpeg -i - output.mpg
|
||||
@end example
|
||||
|
||||
@section How do I encode movie to single pictures?
|
||||
|
||||
Use:
|
||||
|
||||
@example
|
||||
ffmpeg -i movie.mpg movie%d.jpg
|
||||
@end example
|
||||
|
||||
The @file{movie.mpg} used as input will be converted to
|
||||
@file{movie1.jpg}, @file{movie2.jpg}, etc...
|
||||
|
||||
Instead of relying on file format self-recognition, you may also use
|
||||
@table @option
|
||||
@item -c:v ppm
|
||||
@item -c:v png
|
||||
@item -c:v mjpeg
|
||||
@end table
|
||||
to force the encoding.
|
||||
|
||||
Applying that to the previous example:
|
||||
@example
|
||||
ffmpeg -i movie.mpg -f image2 -c:v mjpeg menu%d.jpg
|
||||
@end example
|
||||
|
||||
Beware that there is no "jpeg" codec. Use "mjpeg" instead.
|
||||
|
||||
@section Why do I see a slight quality degradation with multithreaded MPEG* encoding?
|
||||
|
||||
For multithreaded MPEG* encoding, the encoded slices must be independent,
|
||||
otherwise thread n would practically have to wait for n-1 to finish, so it's
|
||||
quite logical that there is a small reduction of quality. This is not a bug.
|
||||
|
||||
@section How can I read from the standard input or write to the standard output?
|
||||
|
||||
Use @file{-} as file name.
|
||||
|
||||
@section -f jpeg doesn't work.
|
||||
|
||||
Try '-f image2 test%d.jpg'.
|
||||
|
||||
@section Why can I not change the frame rate?
|
||||
|
||||
Some codecs, like MPEG-1/2, only allow a small number of fixed frame rates.
|
||||
Choose a different codec with the -c:v command line option.
|
||||
|
||||
@section How do I encode Xvid or DivX video with ffmpeg?
|
||||
|
||||
Both Xvid and DivX (version 4+) are implementations of the ISO MPEG-4
|
||||
standard (note that there are many other coding formats that use this
|
||||
same standard). Thus, use '-c:v mpeg4' to encode in these formats. The
|
||||
default fourcc stored in an MPEG-4-coded file will be 'FMP4'. If you want
|
||||
a different fourcc, use the '-vtag' option. E.g., '-vtag xvid' will
|
||||
force the fourcc 'xvid' to be stored as the video fourcc rather than the
|
||||
default.
|
||||
|
||||
@section Which are good parameters for encoding high quality MPEG-4?
|
||||
|
||||
'-mbd rd -flags +mv4+aic -trellis 2 -cmp 2 -subcmp 2 -g 300 -pass 1/2',
|
||||
things to try: '-bf 2', '-flags qprd', '-flags mv0', '-flags skiprd'.
|
||||
|
||||
@section Which are good parameters for encoding high quality MPEG-1/MPEG-2?
|
||||
|
||||
'-mbd rd -trellis 2 -cmp 2 -subcmp 2 -g 100 -pass 1/2'
|
||||
but beware the '-g 100' might cause problems with some decoders.
|
||||
Things to try: '-bf 2', '-flags qprd', '-flags mv0', '-flags skiprd.
|
||||
|
||||
@section Interlaced video looks very bad when encoded with ffmpeg, what is wrong?
|
||||
|
||||
You should use '-flags +ilme+ildct' and maybe '-flags +alt' for interlaced
|
||||
material, and try '-top 0/1' if the result looks really messed-up.
|
||||
|
||||
@section How can I read DirectShow files?
|
||||
|
||||
If you have built FFmpeg with @code{./configure --enable-avisynth}
|
||||
(only possible on MinGW/Cygwin platforms),
|
||||
then you may use any file that DirectShow can read as input.
|
||||
|
||||
Just create an "input.avs" text file with this single line ...
|
||||
@example
|
||||
DirectShowSource("C:\path to your file\yourfile.asf")
|
||||
@end example
|
||||
... and then feed that text file to ffmpeg:
|
||||
@example
|
||||
ffmpeg -i input.avs
|
||||
@end example
|
||||
|
||||
For ANY other help on AviSynth, please visit the
|
||||
@uref{http://www.avisynth.org/, AviSynth homepage}.
|
||||
|
||||
@section How can I join video files?
|
||||
|
||||
To "join" video files is quite ambiguous. The following list explains the
|
||||
different kinds of "joining" and points out how those are addressed in
|
||||
FFmpeg. To join video files may mean:
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
To put them one after the other: this is called to @emph{concatenate} them
|
||||
(in short: concat) and is addressed
|
||||
@ref{How can I concatenate video files, in this very faq}.
|
||||
|
||||
@item
|
||||
To put them together in the same file, to let the user choose between the
|
||||
different versions (example: different audio languages): this is called to
|
||||
@emph{multiplex} them together (in short: mux), and is done by simply
|
||||
invoking ffmpeg with several @option{-i} options.
|
||||
|
||||
@item
|
||||
For audio, to put all channels together in a single stream (example: two
|
||||
mono streams into one stereo stream): this is sometimes called to
|
||||
@emph{merge} them, and can be done using the
|
||||
@url{http://ffmpeg.org/ffmpeg-filters.html#amerge, @code{amerge}} filter.
|
||||
|
||||
@item
|
||||
For audio, to play one on top of the other: this is called to @emph{mix}
|
||||
them, and can be done by first merging them into a single stream and then
|
||||
using the @url{http://ffmpeg.org/ffmpeg-filters.html#pan, @code{pan}} filter to mix
|
||||
the channels at will.
|
||||
|
||||
@item
|
||||
For video, to display both together, side by side or one on top of a part of
|
||||
the other; it can be done using the
|
||||
@url{http://ffmpeg.org/ffmpeg-filters.html#overlay, @code{overlay}} video filter.
|
||||
|
||||
@end itemize
|
||||
|
||||
@anchor{How can I concatenate video files}
|
||||
@section How can I concatenate video files?
|
||||
|
||||
There are several solutions, depending on the exact circumstances.
|
||||
|
||||
@subsection Concatenating using the concat @emph{filter}
|
||||
|
||||
FFmpeg has a @url{http://ffmpeg.org/ffmpeg-filters.html#concat,
|
||||
@code{concat}} filter designed specifically for that, with examples in the
|
||||
documentation. This operation is recommended if you need to re-encode.
|
||||
|
||||
@subsection Concatenating using the concat @emph{demuxer}
|
||||
|
||||
FFmpeg has a @url{http://www.ffmpeg.org/ffmpeg-formats.html#concat,
|
||||
@code{concat}} demuxer which you can use when you want to avoid a re-encode and
|
||||
your format doesn't support file level concatenation.
|
||||
|
||||
@subsection Concatenating using the concat @emph{protocol} (file level)
|
||||
|
||||
FFmpeg has a @url{http://ffmpeg.org/ffmpeg-protocols.html#concat,
|
||||
@code{concat}} protocol designed specifically for that, with examples in the
|
||||
documentation.
|
||||
|
||||
A few multimedia containers (MPEG-1, MPEG-2 PS, DV) allow to concatenate
|
||||
video by merely concatenating the files containing them.
|
||||
|
||||
Hence you may concatenate your multimedia files by first transcoding them to
|
||||
these privileged formats, then using the humble @code{cat} command (or the
|
||||
equally humble @code{copy} under Windows), and finally transcoding back to your
|
||||
format of choice.
|
||||
|
||||
@example
|
||||
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
|
||||
ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
|
||||
cat intermediate1.mpg intermediate2.mpg > intermediate_all.mpg
|
||||
ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
|
||||
@end example
|
||||
|
||||
Additionally, you can use the @code{concat} protocol instead of @code{cat} or
|
||||
@code{copy} which will avoid creation of a potentially huge intermediate file.
|
||||
|
||||
@example
|
||||
ffmpeg -i input1.avi -qscale:v 1 intermediate1.mpg
|
||||
ffmpeg -i input2.avi -qscale:v 1 intermediate2.mpg
|
||||
ffmpeg -i concat:"intermediate1.mpg|intermediate2.mpg" -c copy intermediate_all.mpg
|
||||
ffmpeg -i intermediate_all.mpg -qscale:v 2 output.avi
|
||||
@end example
|
||||
|
||||
Note that you may need to escape the character "|" which is special for many
|
||||
shells.
|
||||
|
||||
Another option is usage of named pipes, should your platform support it:
|
||||
|
||||
@example
|
||||
mkfifo intermediate1.mpg
|
||||
mkfifo intermediate2.mpg
|
||||
ffmpeg -i input1.avi -qscale:v 1 -y intermediate1.mpg < /dev/null &
|
||||
ffmpeg -i input2.avi -qscale:v 1 -y intermediate2.mpg < /dev/null &
|
||||
cat intermediate1.mpg intermediate2.mpg |\
|
||||
ffmpeg -f mpeg -i - -c:v mpeg4 -acodec libmp3lame output.avi
|
||||
@end example
|
||||
|
||||
@subsection Concatenating using raw audio and video
|
||||
|
||||
Similarly, the yuv4mpegpipe format, and the raw video, raw audio codecs also
|
||||
allow concatenation, and the transcoding step is almost lossless.
|
||||
When using multiple yuv4mpegpipe(s), the first line needs to be discarded
|
||||
from all but the first stream. This can be accomplished by piping through
|
||||
@code{tail} as seen below. Note that when piping through @code{tail} you
|
||||
must use command grouping, @code{@{ ;@}}, to background properly.
|
||||
|
||||
For example, let's say we want to concatenate two FLV files into an
|
||||
output.flv file:
|
||||
|
||||
@example
|
||||
mkfifo temp1.a
|
||||
mkfifo temp1.v
|
||||
mkfifo temp2.a
|
||||
mkfifo temp2.v
|
||||
mkfifo all.a
|
||||
mkfifo all.v
|
||||
ffmpeg -i input1.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp1.a < /dev/null &
|
||||
ffmpeg -i input2.flv -vn -f u16le -acodec pcm_s16le -ac 2 -ar 44100 - > temp2.a < /dev/null &
|
||||
ffmpeg -i input1.flv -an -f yuv4mpegpipe - > temp1.v < /dev/null &
|
||||
@{ ffmpeg -i input2.flv -an -f yuv4mpegpipe - < /dev/null | tail -n +2 > temp2.v ; @} &
|
||||
cat temp1.a temp2.a > all.a &
|
||||
cat temp1.v temp2.v > all.v &
|
||||
ffmpeg -f u16le -acodec pcm_s16le -ac 2 -ar 44100 -i all.a \
|
||||
-f yuv4mpegpipe -i all.v \
|
||||
-y output.flv
|
||||
rm temp[12].[av] all.[av]
|
||||
@end example
|
||||
|
||||
@section -profile option fails when encoding H.264 video with AAC audio
|
||||
|
||||
@command{ffmpeg} prints an error like
|
||||
|
||||
@example
|
||||
Undefined constant or missing '(' in 'baseline'
|
||||
Unable to parse option value "baseline"
|
||||
Error setting option profile to value baseline.
|
||||
@end example
|
||||
|
||||
Short answer: write @option{-profile:v} instead of @option{-profile}.
|
||||
|
||||
Long answer: this happens because the @option{-profile} option can apply to both
|
||||
video and audio. Specifically the AAC encoder also defines some profiles, none
|
||||
of which are named @var{baseline}.
|
||||
|
||||
The solution is to apply the @option{-profile} option to the video stream only
|
||||
by using @url{http://ffmpeg.org/ffmpeg.html#Stream-specifiers-1, Stream specifiers}.
|
||||
Appending @code{:v} to it will do exactly that.
|
||||
|
||||
@section Using @option{-f lavfi}, audio becomes mono for no apparent reason.
|
||||
|
||||
Use @option{-dumpgraph -} to find out exactly where the channel layout is
|
||||
lost.
|
||||
|
||||
Most likely, it is through @code{auto-inserted aresample}. Try to understand
|
||||
why the converting filter was needed at that place.
|
||||
|
||||
Just before the output is a likely place, as @option{-f lavfi} currently
|
||||
only support packed S16.
|
||||
|
||||
Then insert the correct @code{aformat} explicitly in the filtergraph,
|
||||
specifying the exact format.
|
||||
|
||||
@example
|
||||
aformat=sample_fmts=s16:channel_layouts=stereo
|
||||
@end example
|
||||
|
||||
@section Why does FFmpeg not see the subtitles in my VOB file?
|
||||
|
||||
VOB and a few other formats do not have a global header that describes
|
||||
everything present in the file. Instead, applications are supposed to scan
|
||||
the file to see what it contains. Since VOB files are frequently large, only
|
||||
the beginning is scanned. If the subtitles happen only later in the file,
|
||||
they will not be initally detected.
|
||||
|
||||
Some applications, including the @code{ffmpeg} command-line tool, can only
|
||||
work with streams that were detected during the initial scan; streams that
|
||||
are detected later are ignored.
|
||||
|
||||
The size of the initial scan is controlled by two options: @code{probesize}
|
||||
(default ~5 Mo) and @code{analyzeduration} (default 5,000,000 µs = 5 s). For
|
||||
the subtitle stream to be detected, both values must be large enough.
|
||||
|
||||
@section Why was the @command{ffmpeg} @option{-sameq} option removed? What to use instead?
|
||||
|
||||
The @option{-sameq} option meant "same quantizer", and made sense only in a
|
||||
very limited set of cases. Unfortunately, a lot of people mistook it for
|
||||
"same quality" and used it in places where it did not make sense: it had
|
||||
roughly the expected visible effect, but achieved it in a very inefficient
|
||||
way.
|
||||
|
||||
Each encoder has its own set of options to set the quality-vs-size balance,
|
||||
use the options for the encoder you are using to set the quality level to a
|
||||
point acceptable for your tastes. The most common options to do that are
|
||||
@option{-qscale} and @option{-qmax}, but you should peruse the documentation
|
||||
of the encoder you chose.
|
||||
|
||||
@chapter Development
|
||||
|
||||
@section Are there examples illustrating how to use the FFmpeg libraries, particularly libavcodec and libavformat?
|
||||
|
||||
Yes. Check the @file{doc/examples} directory in the source
|
||||
repository, also available online at:
|
||||
@url{https://github.com/FFmpeg/FFmpeg/tree/master/doc/examples}.
|
||||
|
||||
Examples are also installed by default, usually in
|
||||
@code{$PREFIX/share/ffmpeg/examples}.
|
||||
|
||||
Also you may read the Developers Guide of the FFmpeg documentation. Alternatively,
|
||||
examine the source code for one of the many open source projects that
|
||||
already incorporate FFmpeg at (@url{projects.html}).
|
||||
|
||||
@section Can you support my C compiler XXX?
|
||||
|
||||
It depends. If your compiler is C99-compliant, then patches to support
|
||||
it are likely to be welcome if they do not pollute the source code
|
||||
with @code{#ifdef}s related to the compiler.
|
||||
|
||||
@section Is Microsoft Visual C++ supported?
|
||||
|
||||
Yes. Please see the @uref{platform.html, Microsoft Visual C++}
|
||||
section in the FFmpeg documentation.
|
||||
|
||||
@section Can you add automake, libtool or autoconf support?
|
||||
|
||||
No. These tools are too bloated and they complicate the build.
|
||||
|
||||
@section Why not rewrite FFmpeg in object-oriented C++?
|
||||
|
||||
FFmpeg is already organized in a highly modular manner and does not need to
|
||||
be rewritten in a formal object language. Further, many of the developers
|
||||
favor straight C; it works for them. For more arguments on this matter,
|
||||
read @uref{http://www.tux.org/lkml/#s15, "Programming Religion"}.
|
||||
|
||||
@section Why are the ffmpeg programs devoid of debugging symbols?
|
||||
|
||||
The build process creates @command{ffmpeg_g}, @command{ffplay_g}, etc. which
|
||||
contain full debug information. Those binaries are stripped to create
|
||||
@command{ffmpeg}, @command{ffplay}, etc. If you need the debug information, use
|
||||
the *_g versions.
|
||||
|
||||
@section I do not like the LGPL, can I contribute code under the GPL instead?
|
||||
|
||||
Yes, as long as the code is optional and can easily and cleanly be placed
|
||||
under #if CONFIG_GPL without breaking anything. So, for example, a new codec
|
||||
or filter would be OK under GPL while a bug fix to LGPL code would not.
|
||||
|
||||
@section I'm using FFmpeg from within my C application but the linker complains about missing symbols from the libraries themselves.
|
||||
|
||||
FFmpeg builds static libraries by default. In static libraries, dependencies
|
||||
are not handled. That has two consequences. First, you must specify the
|
||||
libraries in dependency order: @code{-lavdevice} must come before
|
||||
@code{-lavformat}, @code{-lavutil} must come after everything else, etc.
|
||||
Second, external libraries that are used in FFmpeg have to be specified too.
|
||||
|
||||
An easy way to get the full list of required libraries in dependency order
|
||||
is to use @code{pkg-config}.
|
||||
|
||||
@example
|
||||
c99 -o program program.c $(pkg-config --cflags --libs libavformat libavcodec)
|
||||
@end example
|
||||
|
||||
See @file{doc/example/Makefile} and @file{doc/example/pc-uninstalled} for
|
||||
more details.
|
||||
|
||||
@section I'm using FFmpeg from within my C++ application but the linker complains about missing symbols which seem to be available.
|
||||
|
||||
FFmpeg is a pure C project, so to use the libraries within your C++ application
|
||||
you need to explicitly state that you are using a C library. You can do this by
|
||||
encompassing your FFmpeg includes using @code{extern "C"}.
|
||||
|
||||
See @url{http://www.parashift.com/c++-faq-lite/mixing-c-and-cpp.html#faq-32.3}
|
||||
|
||||
@section I'm using libavutil from within my C++ application but the compiler complains about 'UINT64_C' was not declared in this scope
|
||||
|
||||
FFmpeg is a pure C project using C99 math features, in order to enable C++
|
||||
to use them you have to append -D__STDC_CONSTANT_MACROS to your CXXFLAGS
|
||||
|
||||
@section I have a file in memory / a API different from *open/*read/ libc how do I use it with libavformat?
|
||||
|
||||
You have to create a custom AVIOContext using @code{avio_alloc_context},
|
||||
see @file{libavformat/aviobuf.c} in FFmpeg and @file{libmpdemux/demux_lavf.c} in MPlayer or MPlayer2 sources.
|
||||
|
||||
@section Where is the documentation about ffv1, msmpeg4, asv1, 4xm?
|
||||
|
||||
see @url{http://www.ffmpeg.org/~michael/}
|
||||
|
||||
@section How do I feed H.263-RTP (and other codecs in RTP) to libavcodec?
|
||||
|
||||
Even if peculiar since it is network oriented, RTP is a container like any
|
||||
other. You have to @emph{demux} RTP before feeding the payload to libavcodec.
|
||||
In this specific case please look at RFC 4629 to see how it should be done.
|
||||
|
||||
@section AVStream.r_frame_rate is wrong, it is much larger than the frame rate.
|
||||
|
||||
@code{r_frame_rate} is NOT the average frame rate, it is the smallest frame rate
|
||||
that can accurately represent all timestamps. So no, it is not
|
||||
wrong if it is larger than the average!
|
||||
For example, if you have mixed 25 and 30 fps content, then @code{r_frame_rate}
|
||||
will be 150 (it is the least common multiple).
|
||||
If you are looking for the average frame rate, see @code{AVStream.avg_frame_rate}.
|
||||
|
||||
@section Why is @code{make fate} not running all tests?
|
||||
|
||||
Make sure you have the fate-suite samples and the @code{SAMPLES} Make variable
|
||||
or @code{FATE_SAMPLES} environment variable or the @code{--samples}
|
||||
@command{configure} option is set to the right path.
|
||||
|
||||
@section Why is @code{make fate} not finding the samples?
|
||||
|
||||
Do you happen to have a @code{~} character in the samples path to indicate a
|
||||
home directory? The value is used in ways where the shell cannot expand it,
|
||||
causing FATE to not find files. Just replace @code{~} by the full path.
|
||||
|
||||
@bye
|
@ -1,205 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Automated Testing Environment
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Automated Testing Environment}
|
||||
@end titlepage
|
||||
|
||||
@node Top
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Introduction
|
||||
|
||||
FATE is an extended regression suite on the client-side and a means
|
||||
for results aggregation and presentation on the server-side.
|
||||
|
||||
The first part of this document explains how you can use FATE from
|
||||
your FFmpeg source directory to test your ffmpeg binary. The second
|
||||
part describes how you can run FATE to submit the results to FFmpeg's
|
||||
FATE server.
|
||||
|
||||
In any way you can have a look at the publicly viewable FATE results
|
||||
by visiting this website:
|
||||
|
||||
@url{http://fate.ffmpeg.org/}
|
||||
|
||||
This is especially recommended for all people contributing source
|
||||
code to FFmpeg, as it can be seen if some test on some platform broke
|
||||
with their recent contribution. This usually happens on the platforms
|
||||
the developers could not test on.
|
||||
|
||||
The second part of this document describes how you can run FATE to
|
||||
submit your results to FFmpeg's FATE server. If you want to submit your
|
||||
results be sure to check that your combination of CPU, OS and compiler
|
||||
is not already listed on the above mentioned website.
|
||||
|
||||
In the third part you can find a comprehensive listing of FATE makefile
|
||||
targets and variables.
|
||||
|
||||
|
||||
@chapter Using FATE from your FFmpeg source directory
|
||||
|
||||
If you want to run FATE on your machine you need to have the samples
|
||||
in place. You can get the samples via the build target fate-rsync.
|
||||
Use this command from the top-level source directory:
|
||||
|
||||
@example
|
||||
make fate-rsync SAMPLES=fate-suite/
|
||||
make fate SAMPLES=fate-suite/
|
||||
@end example
|
||||
|
||||
The above commands set the samples location by passing a makefile
|
||||
variable via command line. It is also possible to set the samples
|
||||
location at source configuration time by invoking configure with
|
||||
`--samples=<path to the samples directory>'. Afterwards you can
|
||||
invoke the makefile targets without setting the SAMPLES makefile
|
||||
variable. This is illustrated by the following commands:
|
||||
|
||||
@example
|
||||
./configure --samples=fate-suite/
|
||||
make fate-rsync
|
||||
make fate
|
||||
@end example
|
||||
|
||||
Yet another way to tell FATE about the location of the sample
|
||||
directory is by making sure the environment variable FATE_SAMPLES
|
||||
contains the path to your samples directory. This can be achieved
|
||||
by e.g. putting that variable in your shell profile or by setting
|
||||
it in your interactive session.
|
||||
|
||||
@example
|
||||
FATE_SAMPLES=fate-suite/ make fate
|
||||
@end example
|
||||
|
||||
@float NOTE
|
||||
Do not put a '~' character in the samples path to indicate a home
|
||||
directory. Because of shell nuances, this will cause FATE to fail.
|
||||
@end float
|
||||
|
||||
To use a custom wrapper to run the test, pass @option{--target-exec} to
|
||||
@command{configure} or set the @var{TARGET_EXEC} Make variable.
|
||||
|
||||
|
||||
@chapter Submitting the results to the FFmpeg result aggregation server
|
||||
|
||||
To submit your results to the server you should run fate through the
|
||||
shell script @file{tests/fate.sh} from the FFmpeg sources. This script needs
|
||||
to be invoked with a configuration file as its first argument.
|
||||
|
||||
@example
|
||||
tests/fate.sh /path/to/fate_config
|
||||
@end example
|
||||
|
||||
A configuration file template with comments describing the individual
|
||||
configuration variables can be found at @file{doc/fate_config.sh.template}.
|
||||
|
||||
@ifhtml
|
||||
The mentioned configuration template is also available here:
|
||||
@verbatiminclude fate_config.sh.template
|
||||
@end ifhtml
|
||||
|
||||
Create a configuration that suits your needs, based on the configuration
|
||||
template. The `slot' configuration variable can be any string that is not
|
||||
yet used, but it is suggested that you name it adhering to the following
|
||||
pattern <arch>-<os>-<compiler>-<compiler version>. The configuration file
|
||||
itself will be sourced in a shell script, therefore all shell features may
|
||||
be used. This enables you to setup the environment as you need it for your
|
||||
build.
|
||||
|
||||
For your first test runs the `fate_recv' variable should be empty or
|
||||
commented out. This will run everything as normal except that it will omit
|
||||
the submission of the results to the server. The following files should be
|
||||
present in $workdir as specified in the configuration file:
|
||||
|
||||
@itemize
|
||||
@item configure.log
|
||||
@item compile.log
|
||||
@item test.log
|
||||
@item report
|
||||
@item version
|
||||
@end itemize
|
||||
|
||||
When you have everything working properly you can create an SSH key pair
|
||||
and send the public key to the FATE server administrator who can be contacted
|
||||
at the email address @email{fate-admin@@ffmpeg.org}.
|
||||
|
||||
Configure your SSH client to use public key authentication with that key
|
||||
when connecting to the FATE server. Also do not forget to check the identity
|
||||
of the server and to accept its host key. This can usually be achieved by
|
||||
running your SSH client manually and killing it after you accepted the key.
|
||||
The FATE server's fingerprint is:
|
||||
|
||||
@table @option
|
||||
@item RSA
|
||||
d3:f1:83:97:a4:75:2b:a6:fb:d6:e8:aa:81:93:97:51
|
||||
@item ECDSA
|
||||
76:9f:68:32:04:1e:d5:d4:ec:47:3f:dc:fc:18:17:86
|
||||
@end table
|
||||
|
||||
If you have problems connecting to the FATE server, it may help to try out
|
||||
the @command{ssh} command with one or more @option{-v} options. You should
|
||||
get detailed output concerning your SSH configuration and the authentication
|
||||
process.
|
||||
|
||||
The only thing left is to automate the execution of the fate.sh script and
|
||||
the synchronisation of the samples directory.
|
||||
|
||||
|
||||
@chapter FATE makefile targets and variables
|
||||
|
||||
@section Makefile targets
|
||||
|
||||
@table @option
|
||||
@item fate-rsync
|
||||
Download/synchronize sample files to the configured samples directory.
|
||||
|
||||
@item fate-list
|
||||
Will list all fate/regression test targets.
|
||||
|
||||
@item fate
|
||||
Run the FATE test suite (requires the fate-suite dataset).
|
||||
@end table
|
||||
|
||||
@section Makefile variables
|
||||
|
||||
@table @option
|
||||
@item V
|
||||
Verbosity level, can be set to 0, 1 or 2.
|
||||
@itemize
|
||||
@item 0: show just the test arguments
|
||||
@item 1: show just the command used in the test
|
||||
@item 2: show everything
|
||||
@end itemize
|
||||
|
||||
@item SAMPLES
|
||||
Specify or override the path to the FATE samples at make time, it has a
|
||||
meaning only while running the regression tests.
|
||||
|
||||
@item THREADS
|
||||
Specify how many threads to use while running regression tests, it is
|
||||
quite useful to detect thread-related regressions.
|
||||
|
||||
@item THREAD_TYPE
|
||||
Specify which threading strategy test, either @var{slice} or @var{frame},
|
||||
by default @var{slice+frame}
|
||||
|
||||
@item CPUFLAGS
|
||||
Specify CPU flags.
|
||||
|
||||
@item TARGET_EXEC
|
||||
Specify or override the wrapper used to run the tests.
|
||||
The @var{TARGET_EXEC} option provides a way to run FATE wrapped in
|
||||
@command{valgrind}, @command{qemu-user} or @command{wine} or on remote targets
|
||||
through @command{ssh}.
|
||||
|
||||
@item GEN
|
||||
Set to @var{1} to generate the missing or mismatched references.
|
||||
@end table
|
||||
|
||||
@section Examples
|
||||
|
||||
@example
|
||||
make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate
|
||||
@end example
|
@ -1,197 +0,0 @@
|
||||
FFmpeg Automated Testing Environment
|
||||
************************************
|
||||
|
||||
Table of Contents
|
||||
*****************
|
||||
|
||||
FFmpeg Automated Testing Environment
|
||||
1 Introduction
|
||||
2 Using FATE from your FFmpeg source directory
|
||||
3 Submitting the results to the FFmpeg result aggregation server
|
||||
4 FATE makefile targets and variables
|
||||
4.1 Makefile targets
|
||||
4.2 Makefile variables
|
||||
4.3 Examples
|
||||
|
||||
|
||||
1 Introduction
|
||||
**************
|
||||
|
||||
FATE is an extended regression suite on the client-side and a means for
|
||||
results aggregation and presentation on the server-side.
|
||||
|
||||
The first part of this document explains how you can use FATE from
|
||||
your FFmpeg source directory to test your ffmpeg binary. The second
|
||||
part describes how you can run FATE to submit the results to FFmpeg's
|
||||
FATE server.
|
||||
|
||||
In any way you can have a look at the publicly viewable FATE results
|
||||
by visiting this website:
|
||||
|
||||
`http://fate.ffmpeg.org/'
|
||||
|
||||
This is especially recommended for all people contributing source
|
||||
code to FFmpeg, as it can be seen if some test on some platform broke
|
||||
with their recent contribution. This usually happens on the platforms
|
||||
the developers could not test on.
|
||||
|
||||
The second part of this document describes how you can run FATE to
|
||||
submit your results to FFmpeg's FATE server. If you want to submit your
|
||||
results be sure to check that your combination of CPU, OS and compiler
|
||||
is not already listed on the above mentioned website.
|
||||
|
||||
In the third part you can find a comprehensive listing of FATE
|
||||
makefile targets and variables.
|
||||
|
||||
2 Using FATE from your FFmpeg source directory
|
||||
**********************************************
|
||||
|
||||
If you want to run FATE on your machine you need to have the samples in
|
||||
place. You can get the samples via the build target fate-rsync. Use
|
||||
this command from the top-level source directory:
|
||||
|
||||
make fate-rsync SAMPLES=fate-suite/
|
||||
make fate SAMPLES=fate-suite/
|
||||
|
||||
The above commands set the samples location by passing a makefile
|
||||
variable via command line. It is also possible to set the samples
|
||||
location at source configuration time by invoking configure with
|
||||
`-samples=<path to the samples directory>'. Afterwards you can invoke
|
||||
the makefile targets without setting the SAMPLES makefile variable.
|
||||
This is illustrated by the following commands:
|
||||
|
||||
./configure --samples=fate-suite/
|
||||
make fate-rsync
|
||||
make fate
|
||||
|
||||
Yet another way to tell FATE about the location of the sample
|
||||
directory is by making sure the environment variable FATE_SAMPLES
|
||||
contains the path to your samples directory. This can be achieved by
|
||||
e.g. putting that variable in your shell profile or by setting it in
|
||||
your interactive session.
|
||||
|
||||
FATE_SAMPLES=fate-suite/ make fate
|
||||
|
||||
Do not put a '~' character in the samples path to indicate a home
|
||||
directory. Because of shell nuances, this will cause FATE to fail.
|
||||
|
||||
NOTE
|
||||
|
||||
To use a custom wrapper to run the test, pass `--target-exec' to
|
||||
`configure' or set the TARGET_EXEC Make variable.
|
||||
|
||||
3 Submitting the results to the FFmpeg result aggregation server
|
||||
****************************************************************
|
||||
|
||||
To submit your results to the server you should run fate through the
|
||||
shell script `tests/fate.sh' from the FFmpeg sources. This script needs
|
||||
to be invoked with a configuration file as its first argument.
|
||||
|
||||
tests/fate.sh /path/to/fate_config
|
||||
|
||||
A configuration file template with comments describing the individual
|
||||
configuration variables can be found at `doc/fate_config.sh.template'.
|
||||
|
||||
Create a configuration that suits your needs, based on the
|
||||
configuration template. The `slot' configuration variable can be any
|
||||
string that is not yet used, but it is suggested that you name it
|
||||
adhering to the following pattern <arch>-<os>-<compiler>-<compiler
|
||||
version>. The configuration file itself will be sourced in a shell
|
||||
script, therefore all shell features may be used. This enables you to
|
||||
setup the environment as you need it for your build.
|
||||
|
||||
For your first test runs the `fate_recv' variable should be empty or
|
||||
commented out. This will run everything as normal except that it will
|
||||
omit the submission of the results to the server. The following files
|
||||
should be present in $workdir as specified in the configuration file:
|
||||
|
||||
* configure.log
|
||||
|
||||
* compile.log
|
||||
|
||||
* test.log
|
||||
|
||||
* report
|
||||
|
||||
* version
|
||||
|
||||
When you have everything working properly you can create an SSH key
|
||||
pair and send the public key to the FATE server administrator who can
|
||||
be contacted at the email address <fate-admin@ffmpeg.org>.
|
||||
|
||||
Configure your SSH client to use public key authentication with that
|
||||
key when connecting to the FATE server. Also do not forget to check the
|
||||
identity of the server and to accept its host key. This can usually be
|
||||
achieved by running your SSH client manually and killing it after you
|
||||
accepted the key. The FATE server's fingerprint is:
|
||||
|
||||
`RSA'
|
||||
d3:f1:83:97:a4:75:2b:a6:fb:d6:e8:aa:81:93:97:51
|
||||
|
||||
`ECDSA'
|
||||
76:9f:68:32:04:1e:d5:d4:ec:47:3f:dc:fc:18:17:86
|
||||
|
||||
If you have problems connecting to the FATE server, it may help to
|
||||
try out the `ssh' command with one or more `-v' options. You should get
|
||||
detailed output concerning your SSH configuration and the authentication
|
||||
process.
|
||||
|
||||
The only thing left is to automate the execution of the fate.sh
|
||||
script and the synchronisation of the samples directory.
|
||||
|
||||
4 FATE makefile targets and variables
|
||||
*************************************
|
||||
|
||||
4.1 Makefile targets
|
||||
====================
|
||||
|
||||
`fate-rsync'
|
||||
Download/synchronize sample files to the configured samples
|
||||
directory.
|
||||
|
||||
`fate-list'
|
||||
Will list all fate/regression test targets.
|
||||
|
||||
`fate'
|
||||
Run the FATE test suite (requires the fate-suite dataset).
|
||||
|
||||
4.2 Makefile variables
|
||||
======================
|
||||
|
||||
`V'
|
||||
Verbosity level, can be set to 0, 1 or 2.
|
||||
* 0: show just the test arguments
|
||||
|
||||
* 1: show just the command used in the test
|
||||
|
||||
* 2: show everything
|
||||
|
||||
`SAMPLES'
|
||||
Specify or override the path to the FATE samples at make time, it
|
||||
has a meaning only while running the regression tests.
|
||||
|
||||
`THREADS'
|
||||
Specify how many threads to use while running regression tests, it
|
||||
is quite useful to detect thread-related regressions.
|
||||
|
||||
`THREAD_TYPE'
|
||||
Specify which threading strategy test, either SLICE or FRAME, by
|
||||
default SLICE+FRAME
|
||||
|
||||
`CPUFLAGS'
|
||||
Specify CPU flags.
|
||||
|
||||
`TARGET_EXEC'
|
||||
Specify or override the wrapper used to run the tests. The
|
||||
TARGET_EXEC option provides a way to run FATE wrapped in
|
||||
`valgrind', `qemu-user' or `wine' or on remote targets through
|
||||
`ssh'.
|
||||
|
||||
`GEN'
|
||||
Set to 1 to generate the missing or mismatched references.
|
||||
|
||||
4.3 Examples
|
||||
============
|
||||
|
||||
make V=1 SAMPLES=/var/fate/samples THREADS=2 CPUFLAGS=mmx fate
|
||||
|
@ -1,29 +0,0 @@
|
||||
slot= # some unique identifier
|
||||
repo=git://source.ffmpeg.org/ffmpeg.git # the source repository
|
||||
samples= # path to samples directory
|
||||
workdir= # directory in which to do all the work
|
||||
#fate_recv="ssh -T fate@fate.ffmpeg.org" # command to submit report
|
||||
comment= # optional description
|
||||
build_only= # set to "yes" for a compile-only instance that skips tests
|
||||
|
||||
# the following are optional and map to configure options
|
||||
arch=
|
||||
cpu=
|
||||
cross_prefix=
|
||||
as=
|
||||
cc=
|
||||
ld=
|
||||
target_os=
|
||||
sysroot=
|
||||
target_exec=
|
||||
target_path=
|
||||
target_samples=
|
||||
extra_cflags=
|
||||
extra_ldflags=
|
||||
extra_libs=
|
||||
extra_conf= # extra configure options not covered above
|
||||
|
||||
#make= # name of GNU make if not 'make'
|
||||
makeopts= # extra options passed to 'make'
|
||||
#tar= # command to create a tar archive from its arguments on stdout,
|
||||
# defaults to 'tar c'
|
@ -1,200 +0,0 @@
|
||||
=head1 NAME
|
||||
|
||||
ffmpeg-bitstream-filters - FFmpeg bitstream filters
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
|
||||
This document describes the bitstream filters provided by the
|
||||
libavcodec library.
|
||||
|
||||
A bitstream filter operates on the encoded stream data, and performs
|
||||
bitstream level modifications without performing decoding.
|
||||
|
||||
|
||||
|
||||
=head1 BITSTREAM FILTERS
|
||||
|
||||
|
||||
When you configure your FFmpeg build, all the supported bitstream
|
||||
filters are enabled by default. You can list all available ones using
|
||||
the configure option C<--list-bsfs>.
|
||||
|
||||
You can disable all the bitstream filters using the configure option
|
||||
C<--disable-bsfs>, and selectively enable any bitstream filter using
|
||||
the option C<--enable-bsf=BSF>, or you can disable a particular
|
||||
bitstream filter using the option C<--disable-bsf=BSF>.
|
||||
|
||||
The option C<-bsfs> of the ff* tools will display the list of
|
||||
all the supported bitstream filters included in your build.
|
||||
|
||||
Below is a description of the currently available bitstream filters.
|
||||
|
||||
|
||||
=head2 aac_adtstoasc
|
||||
|
||||
|
||||
Convert MPEG-2/4 AAC ADTS to MPEG-4 Audio Specific Configuration
|
||||
bitstream filter.
|
||||
|
||||
This filter creates an MPEG-4 AudioSpecificConfig from an MPEG-2/4
|
||||
ADTS header and removes the ADTS header.
|
||||
|
||||
This is required for example when copying an AAC stream from a raw
|
||||
ADTS AAC container to a FLV or a MOV/MP4 file.
|
||||
|
||||
|
||||
=head2 chomp
|
||||
|
||||
|
||||
Remove zero padding at the end of a packet.
|
||||
|
||||
|
||||
=head2 dump_extra
|
||||
|
||||
|
||||
Add extradata to the beginning of the filtered packets.
|
||||
|
||||
The additional argument specifies which packets should be filtered.
|
||||
It accepts the values:
|
||||
|
||||
=over 4
|
||||
|
||||
|
||||
=item B<a>
|
||||
|
||||
add extradata to all key packets, but only if I<local_header> is
|
||||
set in the B<flags2> codec context field
|
||||
|
||||
|
||||
=item B<k>
|
||||
|
||||
add extradata to all key packets
|
||||
|
||||
|
||||
=item B<e>
|
||||
|
||||
add extradata to all packets
|
||||
|
||||
=back
|
||||
|
||||
|
||||
If not specified it is assumed B<k>.
|
||||
|
||||
For example the following B<ffmpeg> command forces a global
|
||||
header (thus disabling individual packet headers) in the H.264 packets
|
||||
generated by the C<libx264> encoder, but corrects them by adding
|
||||
the header stored in extradata to the key packets:
|
||||
|
||||
ffmpeg -i INPUT -map 0 -flags:v +global_header -c:v libx264 -bsf:v dump_extra out.ts
|
||||
|
||||
|
||||
|
||||
=head2 h264_mp4toannexb
|
||||
|
||||
|
||||
Convert an H.264 bitstream from length prefixed mode to start code
|
||||
prefixed mode (as defined in the Annex B of the ITU-T H.264
|
||||
specification).
|
||||
|
||||
This is required by some streaming formats, typically the MPEG-2
|
||||
transport stream format ("mpegts").
|
||||
|
||||
For example to remux an MP4 file containing an H.264 stream to mpegts
|
||||
format with B<ffmpeg>, you can use the command:
|
||||
|
||||
|
||||
ffmpeg -i INPUT.mp4 -codec copy -bsf:v h264_mp4toannexb OUTPUT.ts
|
||||
|
||||
|
||||
|
||||
=head2 imx_dump_header
|
||||
|
||||
|
||||
|
||||
=head2 mjpeg2jpeg
|
||||
|
||||
|
||||
Convert MJPEG/AVI1 packets to full JPEG/JFIF packets.
|
||||
|
||||
MJPEG is a video codec wherein each video frame is essentially a
|
||||
JPEG image. The individual frames can be extracted without loss,
|
||||
e.g. by
|
||||
|
||||
|
||||
ffmpeg -i ../some_mjpeg.avi -c:v copy frames_%d.jpg
|
||||
|
||||
|
||||
Unfortunately, these chunks are incomplete JPEG images, because
|
||||
they lack the DHT segment required for decoding. Quoting from
|
||||
E<lt>B<http://www.digitalpreservation.gov/formats/fdd/fdd000063.shtml>E<gt>:
|
||||
|
||||
Avery Lee, writing in the rec.video.desktop newsgroup in 2001,
|
||||
commented that "MJPEG, or at least the MJPEG in AVIs having the
|
||||
MJPG fourcc, is restricted JPEG with a fixed -- and *omitted* --
|
||||
Huffman table. The JPEG must be YCbCr colorspace, it must be 4:2:2,
|
||||
and it must use basic Huffman encoding, not arithmetic or
|
||||
progressive. . . . You can indeed extract the MJPEG frames and
|
||||
decode them with a regular JPEG decoder, but you have to prepend
|
||||
the DHT segment to them, or else the decoder won't have any idea
|
||||
how to decompress the data. The exact table necessary is given in
|
||||
the OpenDML spec."
|
||||
|
||||
This bitstream filter patches the header of frames extracted from an MJPEG
|
||||
stream (carrying the AVI1 header ID and lacking a DHT segment) to
|
||||
produce fully qualified JPEG images.
|
||||
|
||||
|
||||
ffmpeg -i mjpeg-movie.avi -c:v copy -bsf:v mjpeg2jpeg frame_%d.jpg
|
||||
exiftran -i -9 frame*.jpg
|
||||
ffmpeg -i frame_%d.jpg -c:v copy rotated.avi
|
||||
|
||||
|
||||
|
||||
=head2 mjpega_dump_header
|
||||
|
||||
|
||||
|
||||
=head2 movsub
|
||||
|
||||
|
||||
|
||||
=head2 mp3_header_compress
|
||||
|
||||
|
||||
|
||||
=head2 mp3_header_decompress
|
||||
|
||||
|
||||
|
||||
=head2 noise
|
||||
|
||||
|
||||
|
||||
=head2 remove_extra
|
||||
|
||||
|
||||
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
|
||||
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavcodec(3)
|
||||
|
||||
|
||||
=head1 AUTHORS
|
||||
|
||||
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
B<git log> in the FFmpeg source directory, or browsing the
|
||||
online repository at E<lt>B<http://source.ffmpeg.org>E<gt>.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
F<MAINTAINERS> in the source code tree.
|
||||
|
||||
|
||||
|
@ -1,45 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Bitstream Filters Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Bitstream Filters Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the bitstream filters provided by the
|
||||
libavcodec library.
|
||||
|
||||
A bitstream filter operates on the encoded stream data, and performs
|
||||
bitstream level modifications without performing decoding.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include bitstream_filters.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavcodec.html,libavcodec}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavcodec(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-bitstream-filters
|
||||
@settitle FFmpeg bitstream filters
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
File diff suppressed because it is too large
Load Diff
@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Codecs Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Codecs Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the codecs (decoders and encoders) provided by
|
||||
the libavcodec library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include codecs.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavcodec.html,libavcodec}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavcodec(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-codecs
|
||||
@settitle FFmpeg codecs
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
File diff suppressed because it is too large
Load Diff
@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Devices Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Devices Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the input and output devices provided by the
|
||||
libavdevice library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include devices.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavdevice.html,libavdevice}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavdevice(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-devices
|
||||
@settitle FFmpeg devices
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Filters Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Filters Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes filters, sources, and sinks provided by the
|
||||
libavfilter library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include filters.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavfilter.html,libavfilter}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavfilter(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-filters
|
||||
@settitle FFmpeg filters
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
File diff suppressed because it is too large
Load Diff
@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Formats Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Formats Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the supported formats (muxers and demuxers)
|
||||
provided by the libavformat library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include formats.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavformat.html,libavformat}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavformat(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-formats
|
||||
@settitle FFmpeg formats
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
File diff suppressed because it is too large
Load Diff
@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Protocols Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Protocols Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes the input and output protocols provided by the
|
||||
libavformat library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include protocols.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavformat.html,libavformat}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavformat(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-protocols
|
||||
@settitle FFmpeg protocols
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@ -1,401 +0,0 @@
|
||||
=head1 NAME
|
||||
|
||||
ffmpeg-resampler - FFmpeg Resampler
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
|
||||
The FFmpeg resampler provides a high-level interface to the
|
||||
libswresample library audio resampling utilities. In particular it
|
||||
allows to perform audio resampling, audio channel layout rematrixing,
|
||||
and convert audio format and packing layout.
|
||||
|
||||
|
||||
|
||||
=head1 RESAMPLER OPTIONS
|
||||
|
||||
|
||||
The audio resampler supports the following named options.
|
||||
|
||||
Options may be set by specifying -I<option> I<value> in the
|
||||
FFmpeg tools, I<option>=I<value> for the aresample filter,
|
||||
by setting the value explicitly in the
|
||||
C<SwrContext> options or using the F<libavutil/opt.h> API for
|
||||
programmatic use.
|
||||
|
||||
|
||||
=over 4
|
||||
|
||||
|
||||
|
||||
=item B<ich, in_channel_count>
|
||||
|
||||
Set the number of input channels. Default value is 0. Setting this
|
||||
value is not mandatory if the corresponding channel layout
|
||||
B<in_channel_layout> is set.
|
||||
|
||||
|
||||
=item B<och, out_channel_count>
|
||||
|
||||
Set the number of output channels. Default value is 0. Setting this
|
||||
value is not mandatory if the corresponding channel layout
|
||||
B<out_channel_layout> is set.
|
||||
|
||||
|
||||
=item B<uch, used_channel_count>
|
||||
|
||||
Set the number of used input channels. Default value is 0. This option is
|
||||
only used for special remapping.
|
||||
|
||||
|
||||
=item B<isr, in_sample_rate>
|
||||
|
||||
Set the input sample rate. Default value is 0.
|
||||
|
||||
|
||||
=item B<osr, out_sample_rate>
|
||||
|
||||
Set the output sample rate. Default value is 0.
|
||||
|
||||
|
||||
=item B<isf, in_sample_fmt>
|
||||
|
||||
Specify the input sample format. It is set by default to C<none>.
|
||||
|
||||
|
||||
=item B<osf, out_sample_fmt>
|
||||
|
||||
Specify the output sample format. It is set by default to C<none>.
|
||||
|
||||
|
||||
=item B<tsf, internal_sample_fmt>
|
||||
|
||||
Set the internal sample format. Default value is C<none>.
|
||||
This will automatically be chosen when it is not explicitly set.
|
||||
|
||||
|
||||
=item B<icl, in_channel_layout>
|
||||
|
||||
|
||||
=item B<ocl, out_channel_layout>
|
||||
|
||||
Set the input/output channel layout.
|
||||
|
||||
See the Channel Layout section in the ffmpeg-utils(1) manual
|
||||
for the required syntax.
|
||||
|
||||
|
||||
=item B<clev, center_mix_level>
|
||||
|
||||
Set the center mix level. It is a value expressed in deciBel, and must be
|
||||
in the interval [-32,32].
|
||||
|
||||
|
||||
=item B<slev, surround_mix_level>
|
||||
|
||||
Set the surround mix level. It is a value expressed in deciBel, and must
|
||||
be in the interval [-32,32].
|
||||
|
||||
|
||||
=item B<lfe_mix_level>
|
||||
|
||||
Set LFE mix into non LFE level. It is used when there is a LFE input but no
|
||||
LFE output. It is a value expressed in deciBel, and must
|
||||
be in the interval [-32,32].
|
||||
|
||||
|
||||
=item B<rmvol, rematrix_volume>
|
||||
|
||||
Set rematrix volume. Default value is 1.0.
|
||||
|
||||
|
||||
=item B<rematrix_maxval>
|
||||
|
||||
Set maximum output value for rematrixing.
|
||||
This can be used to prevent clipping vs. preventing volumn reduction
|
||||
A value of 1.0 prevents cliping.
|
||||
|
||||
|
||||
=item B<flags, swr_flags>
|
||||
|
||||
Set flags used by the converter. Default value is 0.
|
||||
|
||||
It supports the following individual flags:
|
||||
|
||||
=over 4
|
||||
|
||||
|
||||
=item B<res>
|
||||
|
||||
force resampling, this flag forces resampling to be used even when the
|
||||
input and output sample rates match.
|
||||
|
||||
=back
|
||||
|
||||
|
||||
|
||||
=item B<dither_scale>
|
||||
|
||||
Set the dither scale. Default value is 1.
|
||||
|
||||
|
||||
=item B<dither_method>
|
||||
|
||||
Set dither method. Default value is 0.
|
||||
|
||||
Supported values:
|
||||
|
||||
=over 4
|
||||
|
||||
|
||||
=item B<rectangular>
|
||||
|
||||
select rectangular dither
|
||||
|
||||
=item B<triangular>
|
||||
|
||||
select triangular dither
|
||||
|
||||
=item B<triangular_hp>
|
||||
|
||||
select triangular dither with high pass
|
||||
|
||||
=item B<lipshitz>
|
||||
|
||||
select lipshitz noise shaping dither
|
||||
|
||||
=item B<shibata>
|
||||
|
||||
select shibata noise shaping dither
|
||||
|
||||
=item B<low_shibata>
|
||||
|
||||
select low shibata noise shaping dither
|
||||
|
||||
=item B<high_shibata>
|
||||
|
||||
select high shibata noise shaping dither
|
||||
|
||||
=item B<f_weighted>
|
||||
|
||||
select f-weighted noise shaping dither
|
||||
|
||||
=item B<modified_e_weighted>
|
||||
|
||||
select modified-e-weighted noise shaping dither
|
||||
|
||||
=item B<improved_e_weighted>
|
||||
|
||||
select improved-e-weighted noise shaping dither
|
||||
|
||||
|
||||
=back
|
||||
|
||||
|
||||
|
||||
=item B<resampler>
|
||||
|
||||
Set resampling engine. Default value is swr.
|
||||
|
||||
Supported values:
|
||||
|
||||
=over 4
|
||||
|
||||
|
||||
=item B<swr>
|
||||
|
||||
select the native SW Resampler; filter options precision and cheby are not
|
||||
applicable in this case.
|
||||
|
||||
=item B<soxr>
|
||||
|
||||
select the SoX Resampler (where available); compensation, and filter options
|
||||
filter_size, phase_shift, filter_type & kaiser_beta, are not applicable in this
|
||||
case.
|
||||
|
||||
=back
|
||||
|
||||
|
||||
|
||||
=item B<filter_size>
|
||||
|
||||
For swr only, set resampling filter size, default value is 32.
|
||||
|
||||
|
||||
=item B<phase_shift>
|
||||
|
||||
For swr only, set resampling phase shift, default value is 10, and must be in
|
||||
the interval [0,30].
|
||||
|
||||
|
||||
=item B<linear_interp>
|
||||
|
||||
Use Linear Interpolation if set to 1, default value is 0.
|
||||
|
||||
|
||||
=item B<cutoff>
|
||||
|
||||
Set cutoff frequency (swr: 6dB point; soxr: 0dB point) ratio; must be a float
|
||||
value between 0 and 1. Default value is 0.97 with swr, and 0.91 with soxr
|
||||
(which, with a sample-rate of 44100, preserves the entire audio band to 20kHz).
|
||||
|
||||
|
||||
=item B<precision>
|
||||
|
||||
For soxr only, the precision in bits to which the resampled signal will be
|
||||
calculated. The default value of 20 (which, with suitable dithering, is
|
||||
appropriate for a destination bit-depth of 16) gives SoX's 'High Quality'; a
|
||||
value of 28 gives SoX's 'Very High Quality'.
|
||||
|
||||
|
||||
=item B<cheby>
|
||||
|
||||
For soxr only, selects passband rolloff none (Chebyshev) & higher-precision
|
||||
approximation for 'irrational' ratios. Default value is 0.
|
||||
|
||||
|
||||
=item B<async>
|
||||
|
||||
For swr only, simple 1 parameter audio sync to timestamps using stretching,
|
||||
squeezing, filling and trimming. Setting this to 1 will enable filling and
|
||||
trimming, larger values represent the maximum amount in samples that the data
|
||||
may be stretched or squeezed for each second.
|
||||
Default value is 0, thus no compensation is applied to make the samples match
|
||||
the audio timestamps.
|
||||
|
||||
|
||||
=item B<first_pts>
|
||||
|
||||
For swr only, assume the first pts should be this value. The time unit is 1 / sample rate.
|
||||
This allows for padding/trimming at the start of stream. By default, no
|
||||
assumption is made about the first frame's expected pts, so no padding or
|
||||
trimming is done. For example, this could be set to 0 to pad the beginning with
|
||||
silence if an audio stream starts after the video stream or to trim any samples
|
||||
with a negative pts due to encoder delay.
|
||||
|
||||
|
||||
=item B<min_comp>
|
||||
|
||||
For swr only, set the minimum difference between timestamps and audio data (in
|
||||
seconds) to trigger stretching/squeezing/filling or trimming of the
|
||||
data to make it match the timestamps. The default is that
|
||||
stretching/squeezing/filling and trimming is disabled
|
||||
(B<min_comp> = C<FLT_MAX>).
|
||||
|
||||
|
||||
=item B<min_hard_comp>
|
||||
|
||||
For swr only, set the minimum difference between timestamps and audio data (in
|
||||
seconds) to trigger adding/dropping samples to make it match the
|
||||
timestamps. This option effectively is a threshold to select between
|
||||
hard (trim/fill) and soft (squeeze/stretch) compensation. Note that
|
||||
all compensation is by default disabled through B<min_comp>.
|
||||
The default is 0.1.
|
||||
|
||||
|
||||
=item B<comp_duration>
|
||||
|
||||
For swr only, set duration (in seconds) over which data is stretched/squeezed
|
||||
to make it match the timestamps. Must be a non-negative double float value,
|
||||
default value is 1.0.
|
||||
|
||||
|
||||
=item B<max_soft_comp>
|
||||
|
||||
For swr only, set maximum factor by which data is stretched/squeezed to make it
|
||||
match the timestamps. Must be a non-negative double float value, default value
|
||||
is 0.
|
||||
|
||||
|
||||
=item B<matrix_encoding>
|
||||
|
||||
Select matrixed stereo encoding.
|
||||
|
||||
It accepts the following values:
|
||||
|
||||
=over 4
|
||||
|
||||
|
||||
=item B<none>
|
||||
|
||||
select none
|
||||
|
||||
=item B<dolby>
|
||||
|
||||
select Dolby
|
||||
|
||||
=item B<dplii>
|
||||
|
||||
select Dolby Pro Logic II
|
||||
|
||||
=back
|
||||
|
||||
|
||||
Default value is C<none>.
|
||||
|
||||
|
||||
=item B<filter_type>
|
||||
|
||||
For swr only, select resampling filter type. This only affects resampling
|
||||
operations.
|
||||
|
||||
It accepts the following values:
|
||||
|
||||
=over 4
|
||||
|
||||
|
||||
=item B<cubic>
|
||||
|
||||
select cubic
|
||||
|
||||
=item B<blackman_nuttall>
|
||||
|
||||
select Blackman Nuttall Windowed Sinc
|
||||
|
||||
=item B<kaiser>
|
||||
|
||||
select Kaiser Windowed Sinc
|
||||
|
||||
=back
|
||||
|
||||
|
||||
|
||||
=item B<kaiser_beta>
|
||||
|
||||
For swr only, set Kaiser Window Beta value. Must be an integer in the
|
||||
interval [2,16], default value is 9.
|
||||
|
||||
|
||||
=item B<output_sample_bits>
|
||||
|
||||
For swr only, set number of used output sample bits for dithering. Must be an integer in the
|
||||
interval [0,64], default value is 0, which means it's not used.
|
||||
|
||||
|
||||
=back
|
||||
|
||||
|
||||
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
|
||||
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswresample(3)
|
||||
|
||||
|
||||
=head1 AUTHORS
|
||||
|
||||
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
B<git log> in the FFmpeg source directory, or browsing the
|
||||
online repository at E<lt>B<http://source.ffmpeg.org>E<gt>.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
F<MAINTAINERS> in the source code tree.
|
||||
|
||||
|
||||
|
@ -1,44 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Resampler Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Resampler Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The FFmpeg resampler provides a high-level interface to the
|
||||
libswresample library audio resampling utilities. In particular it
|
||||
allows to perform audio resampling, audio channel layout rematrixing,
|
||||
and convert audio format and packing layout.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include resampler.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libswresample.html,libswresample}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswresample(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-resampler
|
||||
@settitle FFmpeg Resampler
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@ -1,229 +0,0 @@
|
||||
=head1 NAME
|
||||
|
||||
ffmpeg-scaler - FFmpeg video scaling and pixel format converter
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
|
||||
The FFmpeg rescaler provides a high-level interface to the libswscale
|
||||
library image conversion utilities. In particular it allows to perform
|
||||
image rescaling and pixel format conversion.
|
||||
|
||||
|
||||
|
||||
|
||||
=head1 SCALER OPTIONS
|
||||
|
||||
|
||||
The video scaler supports the following named options.
|
||||
|
||||
Options may be set by specifying -I<option> I<value> in the
|
||||
FFmpeg tools. For programmatic use, they can be set explicitly in the
|
||||
C<SwsContext> options or through the F<libavutil/opt.h> API.
|
||||
|
||||
|
||||
=over 4
|
||||
|
||||
|
||||
|
||||
|
||||
=item B<sws_flags>
|
||||
|
||||
Set the scaler flags. This is also used to set the scaling
|
||||
algorithm. Only a single algorithm should be selected.
|
||||
|
||||
It accepts the following values:
|
||||
|
||||
=over 4
|
||||
|
||||
|
||||
=item B<fast_bilinear>
|
||||
|
||||
Select fast bilinear scaling algorithm.
|
||||
|
||||
|
||||
=item B<bilinear>
|
||||
|
||||
Select bilinear scaling algorithm.
|
||||
|
||||
|
||||
=item B<bicubic>
|
||||
|
||||
Select bicubic scaling algorithm.
|
||||
|
||||
|
||||
=item B<experimental>
|
||||
|
||||
Select experimental scaling algorithm.
|
||||
|
||||
|
||||
=item B<neighbor>
|
||||
|
||||
Select nearest neighbor rescaling algorithm.
|
||||
|
||||
|
||||
=item B<area>
|
||||
|
||||
Select averaging area rescaling algorithm.
|
||||
|
||||
|
||||
=item B<bicubiclin>
|
||||
|
||||
Select bicubic scaling algorithm for the luma component, bilinear for
|
||||
chroma components.
|
||||
|
||||
|
||||
=item B<gauss>
|
||||
|
||||
Select Gaussian rescaling algorithm.
|
||||
|
||||
|
||||
=item B<sinc>
|
||||
|
||||
Select sinc rescaling algorithm.
|
||||
|
||||
|
||||
=item B<lanczos>
|
||||
|
||||
Select lanczos rescaling algorithm.
|
||||
|
||||
|
||||
=item B<spline>
|
||||
|
||||
Select natural bicubic spline rescaling algorithm.
|
||||
|
||||
|
||||
=item B<print_info>
|
||||
|
||||
Enable printing/debug logging.
|
||||
|
||||
|
||||
=item B<accurate_rnd>
|
||||
|
||||
Enable accurate rounding.
|
||||
|
||||
|
||||
=item B<full_chroma_int>
|
||||
|
||||
Enable full chroma interpolation.
|
||||
|
||||
|
||||
=item B<full_chroma_inp>
|
||||
|
||||
Select full chroma input.
|
||||
|
||||
|
||||
=item B<bitexact>
|
||||
|
||||
Enable bitexact output.
|
||||
|
||||
=back
|
||||
|
||||
|
||||
|
||||
=item B<srcw>
|
||||
|
||||
Set source width.
|
||||
|
||||
|
||||
=item B<srch>
|
||||
|
||||
Set source height.
|
||||
|
||||
|
||||
=item B<dstw>
|
||||
|
||||
Set destination width.
|
||||
|
||||
|
||||
=item B<dsth>
|
||||
|
||||
Set destination height.
|
||||
|
||||
|
||||
=item B<src_format>
|
||||
|
||||
Set source pixel format (must be expressed as an integer).
|
||||
|
||||
|
||||
=item B<dst_format>
|
||||
|
||||
Set destination pixel format (must be expressed as an integer).
|
||||
|
||||
|
||||
=item B<src_range>
|
||||
|
||||
Select source range.
|
||||
|
||||
|
||||
=item B<dst_range>
|
||||
|
||||
Select destination range.
|
||||
|
||||
|
||||
=item B<param0, param1>
|
||||
|
||||
Set scaling algorithm parameters. The specified values are specific of
|
||||
some scaling algorithms and ignored by others. The specified values
|
||||
are floating point number values.
|
||||
|
||||
|
||||
=item B<sws_dither>
|
||||
|
||||
Set the dithering algorithm. Accepts one of the following
|
||||
values. Default value is B<auto>.
|
||||
|
||||
|
||||
=over 4
|
||||
|
||||
|
||||
=item B<auto>
|
||||
|
||||
automatic choice
|
||||
|
||||
|
||||
=item B<none>
|
||||
|
||||
no dithering
|
||||
|
||||
|
||||
=item B<bayer>
|
||||
|
||||
bayer dither
|
||||
|
||||
|
||||
=item B<ed>
|
||||
|
||||
error diffusion dither
|
||||
|
||||
=back
|
||||
|
||||
|
||||
|
||||
=back
|
||||
|
||||
|
||||
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
|
||||
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswscale(3)
|
||||
|
||||
|
||||
=head1 AUTHORS
|
||||
|
||||
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
B<git log> in the FFmpeg source directory, or browsing the
|
||||
online repository at E<lt>B<http://source.ffmpeg.org>E<gt>.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
F<MAINTAINERS> in the source code tree.
|
||||
|
||||
|
||||
|
@ -1,43 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Scaler Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Scaler Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The FFmpeg rescaler provides a high-level interface to the libswscale
|
||||
library image conversion utilities. In particular it allows to perform
|
||||
image rescaling and pixel format conversion.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include scaler.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libswscale.html,libswscale}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libswscale(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-scaler
|
||||
@settitle FFmpeg video scaling and pixel format converter
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
File diff suppressed because it is too large
Load Diff
@ -1,42 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle FFmpeg Utilities Documentation
|
||||
@titlepage
|
||||
@center @titlefont{FFmpeg Utilities Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
This document describes some generic features and utilities provided
|
||||
by the libavutil library.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@include utils.texi
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{libavutil.html,libavutil}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1), libavutil(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffmpeg-utils
|
||||
@settitle FFmpeg utilities
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
File diff suppressed because it is too large
Load Diff
@ -1,47 +0,0 @@
|
||||
:
|
||||
ffmpeg.c : libav*
|
||||
======== : ======
|
||||
:
|
||||
:
|
||||
--------------------------------:---> AVStream...
|
||||
InputStream input_streams[] / :
|
||||
/ :
|
||||
InputFile input_files[] +==========================+ / ^ :
|
||||
------> 0 | : st ---:-----------:--/ : :
|
||||
^ +------+-----------+-----+ / +--------------------------+ : :
|
||||
: | :ist_index--:-----:---------/ 1 | : st : | : :
|
||||
: +------+-----------+-----+ +==========================+ : :
|
||||
nb_input_files : | :ist_index--:-----:------------------> 2 | : st : | : :
|
||||
: +------+-----------+-----+ +--------------------------+ : nb_input_streams :
|
||||
: | :ist_index : | 3 | ... | : :
|
||||
v +------+-----------+-----+ +--------------------------+ : :
|
||||
--> 4 | | : :
|
||||
| +--------------------------+ : :
|
||||
| 5 | | : :
|
||||
| +==========================+ v :
|
||||
| :
|
||||
| :
|
||||
| :
|
||||
| :
|
||||
--------- --------------------------------:---> AVStream...
|
||||
\ / :
|
||||
OutputStream output_streams[] / :
|
||||
\ / :
|
||||
+======\======================/======+ ^ :
|
||||
------> 0 | : source_index : st-:--- | : :
|
||||
OutputFile output_files[] / +------------------------------------+ : :
|
||||
/ 1 | : : : | : :
|
||||
^ +------+------------+-----+ / +------------------------------------+ : :
|
||||
: | : ost_index -:-----:------/ 2 | : : : | : :
|
||||
nb_output_files : +------+------------+-----+ +====================================+ : :
|
||||
: | : ost_index -:-----|-----------------> 3 | : : : | : :
|
||||
: +------+------------+-----+ +------------------------------------+ : nb_output_streams :
|
||||
: | : : | 4 | | : :
|
||||
: +------+------------+-----+ +------------------------------------+ : :
|
||||
: | : : | 5 | | : :
|
||||
v +------+------------+-----+ +------------------------------------+ : :
|
||||
6 | | : :
|
||||
+------------------------------------+ : :
|
||||
7 | | : :
|
||||
+====================================+ v :
|
||||
:
|
@ -1,277 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle ffplay Documentation
|
||||
@titlepage
|
||||
@center @titlefont{ffplay Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Synopsis
|
||||
|
||||
ffplay [@var{options}] [@file{input_file}]
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
FFplay is a very simple and portable media player using the FFmpeg
|
||||
libraries and the SDL library. It is mostly used as a testbed for the
|
||||
various FFmpeg APIs.
|
||||
@c man end
|
||||
|
||||
@chapter Options
|
||||
@c man begin OPTIONS
|
||||
|
||||
@include fftools-common-opts.texi
|
||||
|
||||
@section Main options
|
||||
|
||||
@table @option
|
||||
@item -x @var{width}
|
||||
Force displayed width.
|
||||
@item -y @var{height}
|
||||
Force displayed height.
|
||||
@item -s @var{size}
|
||||
Set frame size (WxH or abbreviation), needed for videos which do
|
||||
not contain a header with the frame size like raw YUV. This option
|
||||
has been deprecated in favor of private options, try -video_size.
|
||||
@item -an
|
||||
Disable audio.
|
||||
@item -vn
|
||||
Disable video.
|
||||
@item -ss @var{pos}
|
||||
Seek to a given position in seconds.
|
||||
@item -t @var{duration}
|
||||
play <duration> seconds of audio/video
|
||||
@item -bytes
|
||||
Seek by bytes.
|
||||
@item -nodisp
|
||||
Disable graphical display.
|
||||
@item -f @var{fmt}
|
||||
Force format.
|
||||
@item -window_title @var{title}
|
||||
Set window title (default is the input filename).
|
||||
@item -loop @var{number}
|
||||
Loops movie playback <number> times. 0 means forever.
|
||||
@item -showmode @var{mode}
|
||||
Set the show mode to use.
|
||||
Available values for @var{mode} are:
|
||||
@table @samp
|
||||
@item 0, video
|
||||
show video
|
||||
@item 1, waves
|
||||
show audio waves
|
||||
@item 2, rdft
|
||||
show audio frequency band using RDFT ((Inverse) Real Discrete Fourier Transform)
|
||||
@end table
|
||||
|
||||
Default value is "video", if video is not present or cannot be played
|
||||
"rdft" is automatically selected.
|
||||
|
||||
You can interactively cycle through the available show modes by
|
||||
pressing the key @key{w}.
|
||||
|
||||
@item -vf @var{filtergraph}
|
||||
Create the filtergraph specified by @var{filtergraph} and use it to
|
||||
filter the video stream.
|
||||
|
||||
@var{filtergraph} is a description of the filtergraph to apply to
|
||||
the stream, and must have a single video input and a single video
|
||||
output. In the filtergraph, the input is associated to the label
|
||||
@code{in}, and the output to the label @code{out}. See the
|
||||
ffmpeg-filters manual for more information about the filtergraph
|
||||
syntax.
|
||||
|
||||
@item -af @var{filtergraph}
|
||||
@var{filtergraph} is a description of the filtergraph to apply to
|
||||
the input audio.
|
||||
Use the option "-filters" to show all the available filters (including
|
||||
sources and sinks).
|
||||
|
||||
@item -i @var{input_file}
|
||||
Read @var{input_file}.
|
||||
@end table
|
||||
|
||||
@section Advanced options
|
||||
@table @option
|
||||
@item -pix_fmt @var{format}
|
||||
Set pixel format.
|
||||
This option has been deprecated in favor of private options, try -pixel_format.
|
||||
|
||||
@item -stats
|
||||
Print several playback statistics, in particular show the stream
|
||||
duration, the codec parameters, the current position in the stream and
|
||||
the audio/video synchronisation drift. It is on by default, to
|
||||
explicitly disable it you need to specify @code{-nostats}.
|
||||
|
||||
@item -bug
|
||||
Work around bugs.
|
||||
@item -fast
|
||||
Non-spec-compliant optimizations.
|
||||
@item -genpts
|
||||
Generate pts.
|
||||
@item -rtp_tcp
|
||||
Force RTP/TCP protocol usage instead of RTP/UDP. It is only meaningful
|
||||
if you are streaming with the RTSP protocol.
|
||||
@item -sync @var{type}
|
||||
Set the master clock to audio (@code{type=audio}), video
|
||||
(@code{type=video}) or external (@code{type=ext}). Default is audio. The
|
||||
master clock is used to control audio-video synchronization. Most media
|
||||
players use audio as master clock, but in some cases (streaming or high
|
||||
quality broadcast) it is necessary to change that. This option is mainly
|
||||
used for debugging purposes.
|
||||
@item -threads @var{count}
|
||||
Set the thread count.
|
||||
@item -ast @var{audio_stream_number}
|
||||
Select the desired audio stream number, counting from 0. The number
|
||||
refers to the list of all the input audio streams. If it is greater
|
||||
than the number of audio streams minus one, then the last one is
|
||||
selected, if it is negative the audio playback is disabled.
|
||||
@item -vst @var{video_stream_number}
|
||||
Select the desired video stream number, counting from 0. The number
|
||||
refers to the list of all the input video streams. If it is greater
|
||||
than the number of video streams minus one, then the last one is
|
||||
selected, if it is negative the video playback is disabled.
|
||||
@item -sst @var{subtitle_stream_number}
|
||||
Select the desired subtitle stream number, counting from 0. The number
|
||||
refers to the list of all the input subtitle streams. If it is greater
|
||||
than the number of subtitle streams minus one, then the last one is
|
||||
selected, if it is negative the subtitle rendering is disabled.
|
||||
@item -autoexit
|
||||
Exit when video is done playing.
|
||||
@item -exitonkeydown
|
||||
Exit if any key is pressed.
|
||||
@item -exitonmousedown
|
||||
Exit if any mouse button is pressed.
|
||||
|
||||
@item -codec:@var{media_specifier} @var{codec_name}
|
||||
Force a specific decoder implementation for the stream identified by
|
||||
@var{media_specifier}, which can assume the values @code{a} (audio),
|
||||
@code{v} (video), and @code{s} subtitle.
|
||||
|
||||
@item -acodec @var{codec_name}
|
||||
Force a specific audio decoder.
|
||||
|
||||
@item -vcodec @var{codec_name}
|
||||
Force a specific video decoder.
|
||||
|
||||
@item -scodec @var{codec_name}
|
||||
Force a specific subtitle decoder.
|
||||
@end table
|
||||
|
||||
@section While playing
|
||||
|
||||
@table @key
|
||||
@item q, ESC
|
||||
Quit.
|
||||
|
||||
@item f
|
||||
Toggle full screen.
|
||||
|
||||
@item p, SPC
|
||||
Pause.
|
||||
|
||||
@item a
|
||||
Cycle audio channel in the curret program.
|
||||
|
||||
@item v
|
||||
Cycle video channel.
|
||||
|
||||
@item t
|
||||
Cycle subtitle channel in the current program.
|
||||
|
||||
@item c
|
||||
Cycle program.
|
||||
|
||||
@item w
|
||||
Show audio waves.
|
||||
|
||||
@item left/right
|
||||
Seek backward/forward 10 seconds.
|
||||
|
||||
@item down/up
|
||||
Seek backward/forward 1 minute.
|
||||
|
||||
@item page down/page up
|
||||
Seek backward/forward 10 minutes.
|
||||
|
||||
@item mouse click
|
||||
Seek to percentage in file corresponding to fraction of width.
|
||||
|
||||
@end table
|
||||
|
||||
@c man end
|
||||
|
||||
@include config.texi
|
||||
@ifset config-all
|
||||
@ifset config-avutil
|
||||
@include utils.texi
|
||||
@end ifset
|
||||
@ifset config-avcodec
|
||||
@include codecs.texi
|
||||
@include bitstream_filters.texi
|
||||
@end ifset
|
||||
@ifset config-avformat
|
||||
@include formats.texi
|
||||
@include protocols.texi
|
||||
@end ifset
|
||||
@ifset config-avdevice
|
||||
@include devices.texi
|
||||
@end ifset
|
||||
@ifset config-swresample
|
||||
@include resampler.texi
|
||||
@end ifset
|
||||
@ifset config-swscale
|
||||
@include scaler.texi
|
||||
@end ifset
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@ifset config-all
|
||||
@url{ffplay.html,ffplay},
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
@url{ffplay-all.html,ffmpeg-all},
|
||||
@end ifset
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils},
|
||||
@url{ffmpeg-scaler.html,ffmpeg-scaler},
|
||||
@url{ffmpeg-resampler.html,ffmpeg-resampler},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs},
|
||||
@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
@ifset config-all
|
||||
ffplay(1),
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
ffplay-all(1),
|
||||
@end ifset
|
||||
ffmpeg(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
|
||||
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffplay
|
||||
@settitle FFplay media player
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@ -1,637 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle ffprobe Documentation
|
||||
@titlepage
|
||||
@center @titlefont{ffprobe Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Synopsis
|
||||
|
||||
ffprobe [@var{options}] [@file{input_file}]
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
ffprobe gathers information from multimedia streams and prints it in
|
||||
human- and machine-readable fashion.
|
||||
|
||||
For example it can be used to check the format of the container used
|
||||
by a multimedia stream and the format and type of each media stream
|
||||
contained in it.
|
||||
|
||||
If a filename is specified in input, ffprobe will try to open and
|
||||
probe the file content. If the file cannot be opened or recognized as
|
||||
a multimedia file, a positive exit code is returned.
|
||||
|
||||
ffprobe may be employed both as a standalone application or in
|
||||
combination with a textual filter, which may perform more
|
||||
sophisticated processing, e.g. statistical processing or plotting.
|
||||
|
||||
Options are used to list some of the formats supported by ffprobe or
|
||||
for specifying which information to display, and for setting how
|
||||
ffprobe will show it.
|
||||
|
||||
ffprobe output is designed to be easily parsable by a textual filter,
|
||||
and consists of one or more sections of a form defined by the selected
|
||||
writer, which is specified by the @option{print_format} option.
|
||||
|
||||
Sections may contain other nested sections, and are identified by a
|
||||
name (which may be shared by other sections), and an unique
|
||||
name. See the output of @option{sections}.
|
||||
|
||||
Metadata tags stored in the container or in the streams are recognized
|
||||
and printed in the corresponding "FORMAT", "STREAM" or "PROGRAM_STREAM"
|
||||
section.
|
||||
|
||||
@c man end
|
||||
|
||||
@chapter Options
|
||||
@c man begin OPTIONS
|
||||
|
||||
@include fftools-common-opts.texi
|
||||
|
||||
@section Main options
|
||||
|
||||
@table @option
|
||||
|
||||
@item -f @var{format}
|
||||
Force format to use.
|
||||
|
||||
@item -unit
|
||||
Show the unit of the displayed values.
|
||||
|
||||
@item -prefix
|
||||
Use SI prefixes for the displayed values.
|
||||
Unless the "-byte_binary_prefix" option is used all the prefixes
|
||||
are decimal.
|
||||
|
||||
@item -byte_binary_prefix
|
||||
Force the use of binary prefixes for byte values.
|
||||
|
||||
@item -sexagesimal
|
||||
Use sexagesimal format HH:MM:SS.MICROSECONDS for time values.
|
||||
|
||||
@item -pretty
|
||||
Prettify the format of the displayed values, it corresponds to the
|
||||
options "-unit -prefix -byte_binary_prefix -sexagesimal".
|
||||
|
||||
@item -of, -print_format @var{writer_name}[=@var{writer_options}]
|
||||
Set the output printing format.
|
||||
|
||||
@var{writer_name} specifies the name of the writer, and
|
||||
@var{writer_options} specifies the options to be passed to the writer.
|
||||
|
||||
For example for printing the output in JSON format, specify:
|
||||
@example
|
||||
-print_format json
|
||||
@end example
|
||||
|
||||
For more details on the available output printing formats, see the
|
||||
Writers section below.
|
||||
|
||||
@item -sections
|
||||
Print sections structure and section information, and exit. The output
|
||||
is not meant to be parsed by a machine.
|
||||
|
||||
@item -select_streams @var{stream_specifier}
|
||||
Select only the streams specified by @var{stream_specifier}. This
|
||||
option affects only the options related to streams
|
||||
(e.g. @code{show_streams}, @code{show_packets}, etc.).
|
||||
|
||||
For example to show only audio streams, you can use the command:
|
||||
@example
|
||||
ffprobe -show_streams -select_streams a INPUT
|
||||
@end example
|
||||
|
||||
To show only video packets belonging to the video stream with index 1:
|
||||
@example
|
||||
ffprobe -show_packets -select_streams v:1 INPUT
|
||||
@end example
|
||||
|
||||
@item -show_data
|
||||
Show payload data, as a hexadecimal and ASCII dump. Coupled with
|
||||
@option{-show_packets}, it will dump the packets' data. Coupled with
|
||||
@option{-show_streams}, it will dump the codec extradata.
|
||||
|
||||
The dump is printed as the "data" field. It may contain newlines.
|
||||
|
||||
@item -show_error
|
||||
Show information about the error found when trying to probe the input.
|
||||
|
||||
The error information is printed within a section with name "ERROR".
|
||||
|
||||
@item -show_format
|
||||
Show information about the container format of the input multimedia
|
||||
stream.
|
||||
|
||||
All the container format information is printed within a section with
|
||||
name "FORMAT".
|
||||
|
||||
@item -show_format_entry @var{name}
|
||||
Like @option{-show_format}, but only prints the specified entry of the
|
||||
container format information, rather than all. This option may be given more
|
||||
than once, then all specified entries will be shown.
|
||||
|
||||
This option is deprecated, use @code{show_entries} instead.
|
||||
|
||||
@item -show_entries @var{section_entries}
|
||||
Set list of entries to show.
|
||||
|
||||
Entries are specified according to the following
|
||||
syntax. @var{section_entries} contains a list of section entries
|
||||
separated by @code{:}. Each section entry is composed by a section
|
||||
name (or unique name), optionally followed by a list of entries local
|
||||
to that section, separated by @code{,}.
|
||||
|
||||
If section name is specified but is followed by no @code{=}, all
|
||||
entries are printed to output, together with all the contained
|
||||
sections. Otherwise only the entries specified in the local section
|
||||
entries list are printed. In particular, if @code{=} is specified but
|
||||
the list of local entries is empty, then no entries will be shown for
|
||||
that section.
|
||||
|
||||
Note that the order of specification of the local section entries is
|
||||
not honored in the output, and the usual display order will be
|
||||
retained.
|
||||
|
||||
The formal syntax is given by:
|
||||
@example
|
||||
@var{LOCAL_SECTION_ENTRIES} ::= @var{SECTION_ENTRY_NAME}[,@var{LOCAL_SECTION_ENTRIES}]
|
||||
@var{SECTION_ENTRY} ::= @var{SECTION_NAME}[=[@var{LOCAL_SECTION_ENTRIES}]]
|
||||
@var{SECTION_ENTRIES} ::= @var{SECTION_ENTRY}[:@var{SECTION_ENTRIES}]
|
||||
@end example
|
||||
|
||||
For example, to show only the index and type of each stream, and the PTS
|
||||
time, duration time, and stream index of the packets, you can specify
|
||||
the argument:
|
||||
@example
|
||||
packet=pts_time,duration_time,stream_index : stream=index,codec_type
|
||||
@end example
|
||||
|
||||
To show all the entries in the section "format", but only the codec
|
||||
type in the section "stream", specify the argument:
|
||||
@example
|
||||
format : stream=codec_type
|
||||
@end example
|
||||
|
||||
To show all the tags in the stream and format sections:
|
||||
@example
|
||||
format_tags : format_tags
|
||||
@end example
|
||||
|
||||
To show only the @code{title} tag (if available) in the stream
|
||||
sections:
|
||||
@example
|
||||
stream_tags=title
|
||||
@end example
|
||||
|
||||
@item -show_packets
|
||||
Show information about each packet contained in the input multimedia
|
||||
stream.
|
||||
|
||||
The information for each single packet is printed within a dedicated
|
||||
section with name "PACKET".
|
||||
|
||||
@item -show_frames
|
||||
Show information about each frame contained in the input multimedia
|
||||
stream.
|
||||
|
||||
The information for each single frame is printed within a dedicated
|
||||
section with name "FRAME".
|
||||
|
||||
@item -show_streams
|
||||
Show information about each media stream contained in the input
|
||||
multimedia stream.
|
||||
|
||||
Each media stream information is printed within a dedicated section
|
||||
with name "STREAM".
|
||||
|
||||
@item -show_programs
|
||||
Show information about programs and their streams contained in the input
|
||||
multimedia stream.
|
||||
|
||||
Each media stream information is printed within a dedicated section
|
||||
with name "PROGRAM_STREAM".
|
||||
|
||||
@item -show_chapters
|
||||
Show information about chapters stored in the format.
|
||||
|
||||
Each chapter is printed within a dedicated section with name "CHAPTER".
|
||||
|
||||
@item -count_frames
|
||||
Count the number of frames per stream and report it in the
|
||||
corresponding stream section.
|
||||
|
||||
@item -count_packets
|
||||
Count the number of packets per stream and report it in the
|
||||
corresponding stream section.
|
||||
|
||||
@item -read_intervals @var{read_intervals}
|
||||
|
||||
Read only the specified intervals. @var{read_intervals} must be a
|
||||
sequence of interval specifications separated by ",".
|
||||
@command{ffprobe} will seek to the interval starting point, and will
|
||||
continue reading from that.
|
||||
|
||||
Each interval is specified by two optional parts, separated by "%".
|
||||
|
||||
The first part specifies the interval start position. It is
|
||||
interpreted as an abolute position, or as a relative offset from the
|
||||
current position if it is preceded by the "+" character. If this first
|
||||
part is not specified, no seeking will be performed when reading this
|
||||
interval.
|
||||
|
||||
The second part specifies the interval end position. It is interpreted
|
||||
as an absolute position, or as a relative offset from the current
|
||||
position if it is preceded by the "+" character. If the offset
|
||||
specification starts with "#", it is interpreted as the number of
|
||||
packets to read (not including the flushing packets) from the interval
|
||||
start. If no second part is specified, the program will read until the
|
||||
end of the input.
|
||||
|
||||
Note that seeking is not accurate, thus the actual interval start
|
||||
point may be different from the specified position. Also, when an
|
||||
interval duration is specified, the absolute end time will be computed
|
||||
by adding the duration to the interval start point found by seeking
|
||||
the file, rather than to the specified start value.
|
||||
|
||||
The formal syntax is given by:
|
||||
@example
|
||||
@var{INTERVAL} ::= [@var{START}|+@var{START_OFFSET}][%[@var{END}|+@var{END_OFFSET}]]
|
||||
@var{INTERVALS} ::= @var{INTERVAL}[,@var{INTERVALS}]
|
||||
@end example
|
||||
|
||||
A few examples follow.
|
||||
@itemize
|
||||
@item
|
||||
Seek to time 10, read packets until 20 seconds after the found seek
|
||||
point, then seek to position @code{01:30} (1 minute and thirty
|
||||
seconds) and read packets until position @code{01:45}.
|
||||
@example
|
||||
10%+20,01:30%01:45
|
||||
@end example
|
||||
|
||||
@item
|
||||
Read only 42 packets after seeking to position @code{01:23}:
|
||||
@example
|
||||
01:23%+#42
|
||||
@end example
|
||||
|
||||
@item
|
||||
Read only the first 20 seconds from the start:
|
||||
@example
|
||||
%+20
|
||||
@end example
|
||||
|
||||
@item
|
||||
Read from the start until position @code{02:30}:
|
||||
@example
|
||||
%02:30
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
@item -show_private_data, -private
|
||||
Show private data, that is data depending on the format of the
|
||||
particular shown element.
|
||||
This option is enabled by default, but you may need to disable it
|
||||
for specific uses, for example when creating XSD-compliant XML output.
|
||||
|
||||
@item -show_program_version
|
||||
Show information related to program version.
|
||||
|
||||
Version information is printed within a section with name
|
||||
"PROGRAM_VERSION".
|
||||
|
||||
@item -show_library_versions
|
||||
Show information related to library versions.
|
||||
|
||||
Version information for each library is printed within a section with
|
||||
name "LIBRARY_VERSION".
|
||||
|
||||
@item -show_versions
|
||||
Show information related to program and library versions. This is the
|
||||
equivalent of setting both @option{-show_program_version} and
|
||||
@option{-show_library_versions} options.
|
||||
|
||||
@item -bitexact
|
||||
Force bitexact output, useful to produce output which is not dependent
|
||||
on the specific build.
|
||||
|
||||
@item -i @var{input_file}
|
||||
Read @var{input_file}.
|
||||
|
||||
@end table
|
||||
@c man end
|
||||
|
||||
@chapter Writers
|
||||
@c man begin WRITERS
|
||||
|
||||
A writer defines the output format adopted by @command{ffprobe}, and will be
|
||||
used for printing all the parts of the output.
|
||||
|
||||
A writer may accept one or more arguments, which specify the options
|
||||
to adopt. The options are specified as a list of @var{key}=@var{value}
|
||||
pairs, separated by ":".
|
||||
|
||||
A description of the currently available writers follows.
|
||||
|
||||
@section default
|
||||
Default format.
|
||||
|
||||
Print each section in the form:
|
||||
@example
|
||||
[SECTION]
|
||||
key1=val1
|
||||
...
|
||||
keyN=valN
|
||||
[/SECTION]
|
||||
@end example
|
||||
|
||||
Metadata tags are printed as a line in the corresponding FORMAT, STREAM or
|
||||
PROGRAM_STREAM section, and are prefixed by the string "TAG:".
|
||||
|
||||
A description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item nokey, nk
|
||||
If set to 1 specify not to print the key of each field. Default value
|
||||
is 0.
|
||||
|
||||
@item noprint_wrappers, nw
|
||||
If set to 1 specify not to print the section header and footer.
|
||||
Default value is 0.
|
||||
@end table
|
||||
|
||||
@section compact, csv
|
||||
Compact and CSV format.
|
||||
|
||||
The @code{csv} writer is equivalent to @code{compact}, but supports
|
||||
different defaults.
|
||||
|
||||
Each section is printed on a single line.
|
||||
If no option is specifid, the output has the form:
|
||||
@example
|
||||
section|key1=val1| ... |keyN=valN
|
||||
@end example
|
||||
|
||||
Metadata tags are printed in the corresponding "format" or "stream"
|
||||
section. A metadata tag key, if printed, is prefixed by the string
|
||||
"tag:".
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item item_sep, s
|
||||
Specify the character to use for separating fields in the output line.
|
||||
It must be a single printable character, it is "|" by default ("," for
|
||||
the @code{csv} writer).
|
||||
|
||||
@item nokey, nk
|
||||
If set to 1 specify not to print the key of each field. Its default
|
||||
value is 0 (1 for the @code{csv} writer).
|
||||
|
||||
@item escape, e
|
||||
Set the escape mode to use, default to "c" ("csv" for the @code{csv}
|
||||
writer).
|
||||
|
||||
It can assume one of the following values:
|
||||
@table @option
|
||||
@item c
|
||||
Perform C-like escaping. Strings containing a newline ('\n'), carriage
|
||||
return ('\r'), a tab ('\t'), a form feed ('\f'), the escaping
|
||||
character ('\') or the item separator character @var{SEP} are escaped using C-like fashioned
|
||||
escaping, so that a newline is converted to the sequence "\n", a
|
||||
carriage return to "\r", '\' to "\\" and the separator @var{SEP} is
|
||||
converted to "\@var{SEP}".
|
||||
|
||||
@item csv
|
||||
Perform CSV-like escaping, as described in RFC4180. Strings
|
||||
containing a newline ('\n'), a carriage return ('\r'), a double quote
|
||||
('"'), or @var{SEP} are enclosed in double-quotes.
|
||||
|
||||
@item none
|
||||
Perform no escaping.
|
||||
@end table
|
||||
|
||||
@item print_section, p
|
||||
Print the section name at the begin of each line if the value is
|
||||
@code{1}, disable it with value set to @code{0}. Default value is
|
||||
@code{1}.
|
||||
|
||||
@end table
|
||||
|
||||
@section flat
|
||||
Flat format.
|
||||
|
||||
A free-form output where each line contains an explicit key=value, such as
|
||||
"streams.stream.3.tags.foo=bar". The output is shell escaped, so it can be
|
||||
directly embedded in sh scripts as long as the separator character is an
|
||||
alphanumeric character or an underscore (see @var{sep_char} option).
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
@item sep_char, s
|
||||
Separator character used to separate the chapter, the section name, IDs and
|
||||
potential tags in the printed field key.
|
||||
|
||||
Default value is '.'.
|
||||
|
||||
@item hierarchical, h
|
||||
Specify if the section name specification should be hierarchical. If
|
||||
set to 1, and if there is more than one section in the current
|
||||
chapter, the section name will be prefixed by the name of the
|
||||
chapter. A value of 0 will disable this behavior.
|
||||
|
||||
Default value is 1.
|
||||
@end table
|
||||
|
||||
@section ini
|
||||
INI format output.
|
||||
|
||||
Print output in an INI based format.
|
||||
|
||||
The following conventions are adopted:
|
||||
|
||||
@itemize
|
||||
@item
|
||||
all key and values are UTF-8
|
||||
@item
|
||||
'.' is the subgroup separator
|
||||
@item
|
||||
newline, '\t', '\f', '\b' and the following characters are escaped
|
||||
@item
|
||||
'\' is the escape character
|
||||
@item
|
||||
'#' is the comment indicator
|
||||
@item
|
||||
'=' is the key/value separator
|
||||
@item
|
||||
':' is not used but usually parsed as key/value separator
|
||||
@end itemize
|
||||
|
||||
This writer accepts options as a list of @var{key}=@var{value} pairs,
|
||||
separated by ":".
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
@item hierarchical, h
|
||||
Specify if the section name specification should be hierarchical. If
|
||||
set to 1, and if there is more than one section in the current
|
||||
chapter, the section name will be prefixed by the name of the
|
||||
chapter. A value of 0 will disable this behavior.
|
||||
|
||||
Default value is 1.
|
||||
@end table
|
||||
|
||||
@section json
|
||||
JSON based format.
|
||||
|
||||
Each section is printed using JSON notation.
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item compact, c
|
||||
If set to 1 enable compact output, that is each section will be
|
||||
printed on a single line. Default value is 0.
|
||||
@end table
|
||||
|
||||
For more information about JSON, see @url{http://www.json.org/}.
|
||||
|
||||
@section xml
|
||||
XML based format.
|
||||
|
||||
The XML output is described in the XML schema description file
|
||||
@file{ffprobe.xsd} installed in the FFmpeg datadir.
|
||||
|
||||
An updated version of the schema can be retrieved at the url
|
||||
@url{http://www.ffmpeg.org/schema/ffprobe.xsd}, which redirects to the
|
||||
latest schema committed into the FFmpeg development source code tree.
|
||||
|
||||
Note that the output issued will be compliant to the
|
||||
@file{ffprobe.xsd} schema only when no special global output options
|
||||
(@option{unit}, @option{prefix}, @option{byte_binary_prefix},
|
||||
@option{sexagesimal} etc.) are specified.
|
||||
|
||||
The description of the accepted options follows.
|
||||
|
||||
@table @option
|
||||
|
||||
@item fully_qualified, q
|
||||
If set to 1 specify if the output should be fully qualified. Default
|
||||
value is 0.
|
||||
This is required for generating an XML file which can be validated
|
||||
through an XSD file.
|
||||
|
||||
@item xsd_compliant, x
|
||||
If set to 1 perform more checks for ensuring that the output is XSD
|
||||
compliant. Default value is 0.
|
||||
This option automatically sets @option{fully_qualified} to 1.
|
||||
@end table
|
||||
|
||||
For more information about the XML format, see
|
||||
@url{http://www.w3.org/XML/}.
|
||||
@c man end WRITERS
|
||||
|
||||
@chapter Timecode
|
||||
@c man begin TIMECODE
|
||||
|
||||
@command{ffprobe} supports Timecode extraction:
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
MPEG1/2 timecode is extracted from the GOP, and is available in the video
|
||||
stream details (@option{-show_streams}, see @var{timecode}).
|
||||
|
||||
@item
|
||||
MOV timecode is extracted from tmcd track, so is available in the tmcd
|
||||
stream metadata (@option{-show_streams}, see @var{TAG:timecode}).
|
||||
|
||||
@item
|
||||
DV, GXF and AVI timecodes are available in format metadata
|
||||
(@option{-show_format}, see @var{TAG:timecode}).
|
||||
|
||||
@end itemize
|
||||
@c man end TIMECODE
|
||||
|
||||
@include config.texi
|
||||
@ifset config-all
|
||||
@ifset config-avutil
|
||||
@include utils.texi
|
||||
@end ifset
|
||||
@ifset config-avcodec
|
||||
@include codecs.texi
|
||||
@include bitstream_filters.texi
|
||||
@end ifset
|
||||
@ifset config-avformat
|
||||
@include formats.texi
|
||||
@include protocols.texi
|
||||
@end ifset
|
||||
@ifset config-avdevice
|
||||
@include devices.texi
|
||||
@end ifset
|
||||
@ifset config-swresample
|
||||
@include resampler.texi
|
||||
@end ifset
|
||||
@ifset config-swscale
|
||||
@include scaler.texi
|
||||
@end ifset
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@ifset config-all
|
||||
@url{ffprobe.html,ffprobe},
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
@url{ffprobe-all.html,ffprobe-all},
|
||||
@end ifset
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils},
|
||||
@url{ffmpeg-scaler.html,ffmpeg-scaler},
|
||||
@url{ffmpeg-resampler.html,ffmpeg-resampler},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs},
|
||||
@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
@ifset config-all
|
||||
ffprobe(1),
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
ffprobe-all(1),
|
||||
@end ifset
|
||||
ffmpeg(1), ffplay(1), ffserver(1),
|
||||
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
|
||||
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffprobe
|
||||
@settitle ffprobe media prober
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@ -1,245 +0,0 @@
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
|
||||
<xsd:schema xmlns:xsd="http://www.w3.org/2001/XMLSchema"
|
||||
targetNamespace="http://www.ffmpeg.org/schema/ffprobe"
|
||||
xmlns:ffprobe="http://www.ffmpeg.org/schema/ffprobe">
|
||||
|
||||
<xsd:element name="ffprobe" type="ffprobe:ffprobeType"/>
|
||||
|
||||
<xsd:complexType name="ffprobeType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="packets" type="ffprobe:packetsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="frames" type="ffprobe:framesType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="programs" type="ffprobe:programsType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="chapters" type="ffprobe:chaptersType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="format" type="ffprobe:formatType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="error" type="ffprobe:errorType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="program_version" type="ffprobe:programVersionType" minOccurs="0" maxOccurs="1" />
|
||||
<xsd:element name="library_versions" type="ffprobe:libraryVersionsType" minOccurs="0" maxOccurs="1" />
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="packet" type="ffprobe:packetType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="framesType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="frame" type="ffprobe:frameType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="packetType">
|
||||
<xsd:attribute name="codec_type" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="stream_index" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float" />
|
||||
<xsd:attribute name="dts" type="xsd:long" />
|
||||
<xsd:attribute name="dts_time" type="xsd:float" />
|
||||
<xsd:attribute name="duration" type="xsd:long" />
|
||||
<xsd:attribute name="duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="convergence_duration" type="xsd:long" />
|
||||
<xsd:attribute name="convergence_duration_time" type="xsd:float" />
|
||||
<xsd:attribute name="size" type="xsd:long" use="required" />
|
||||
<xsd:attribute name="pos" type="xsd:long" />
|
||||
<xsd:attribute name="flags" type="xsd:string" use="required" />
|
||||
<xsd:attribute name="data" type="xsd:string" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="frameType">
|
||||
<xsd:attribute name="media_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="key_frame" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pts" type="xsd:long" />
|
||||
<xsd:attribute name="pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_pts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_dts" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_dts_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_duration" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_duration_time" type="xsd:float"/>
|
||||
<xsd:attribute name="pkt_pos" type="xsd:long" />
|
||||
<xsd:attribute name="pkt_size" type="xsd:int" />
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="nb_samples" type="xsd:long" />
|
||||
<xsd:attribute name="channels" type="xsd:int" />
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:long" />
|
||||
<xsd:attribute name="height" type="xsd:long" />
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pict_type" type="xsd:string"/>
|
||||
<xsd:attribute name="coded_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="display_picture_number" type="xsd:long" />
|
||||
<xsd:attribute name="interlaced_frame" type="xsd:int" />
|
||||
<xsd:attribute name="top_field_first" type="xsd:int" />
|
||||
<xsd:attribute name="repeat_pict" type="xsd:int" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="stream" type="ffprobe:streamType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="program" type="ffprobe:programType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamDispositionType">
|
||||
<xsd:attribute name="default" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="dub" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="original" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="comment" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="lyrics" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="karaoke" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="forced" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="hearing_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="visual_impaired" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="clean_effects" type="xsd:int" use="required" />
|
||||
<xsd:attribute name="attached_pic" type="xsd:int" use="required" />
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="streamType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="disposition" type="ffprobe:streamDispositionType" minOccurs="0" maxOccurs="1"/>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="index" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="codec_name" type="xsd:string" />
|
||||
<xsd:attribute name="codec_long_name" type="xsd:string" />
|
||||
<xsd:attribute name="profile" type="xsd:string" />
|
||||
<xsd:attribute name="codec_type" type="xsd:string" />
|
||||
<xsd:attribute name="codec_time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="codec_tag_string" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="extradata" type="xsd:string" />
|
||||
|
||||
<!-- video attributes -->
|
||||
<xsd:attribute name="width" type="xsd:int"/>
|
||||
<xsd:attribute name="height" type="xsd:int"/>
|
||||
<xsd:attribute name="has_b_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="sample_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="display_aspect_ratio" type="xsd:string"/>
|
||||
<xsd:attribute name="pix_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="level" type="xsd:int"/>
|
||||
<xsd:attribute name="timecode" type="xsd:string"/>
|
||||
|
||||
<!-- audio attributes -->
|
||||
<xsd:attribute name="sample_fmt" type="xsd:string"/>
|
||||
<xsd:attribute name="sample_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="channels" type="xsd:int"/>
|
||||
<xsd:attribute name="channel_layout" type="xsd:string"/>
|
||||
<xsd:attribute name="bits_per_sample" type="xsd:int"/>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:string"/>
|
||||
<xsd:attribute name="r_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="avg_frame_rate" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration_ts" type="xsd:long"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_frames" type="xsd:int"/>
|
||||
<xsd:attribute name="nb_read_packets" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
<xsd:element name="streams" type="ffprobe:streamsType" minOccurs="0" maxOccurs="1"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="program_id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="program_num" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="start_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="end_time" type="xsd:float"/>
|
||||
<xsd:attribute name="end_pts" type="xsd:long"/>
|
||||
<xsd:attribute name="pmt_pid" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="pcr_pid" type="xsd:int" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="formatType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="filename" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="nb_streams" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="nb_programs" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="format_name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="format_long_name" type="xsd:string"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="duration" type="xsd:float"/>
|
||||
<xsd:attribute name="size" type="xsd:long"/>
|
||||
<xsd:attribute name="bit_rate" type="xsd:long"/>
|
||||
<xsd:attribute name="probe_score" type="xsd:int"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="tagType">
|
||||
<xsd:attribute name="key" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="value" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="errorType">
|
||||
<xsd:attribute name="code" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="string" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="programVersionType">
|
||||
<xsd:attribute name="version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="copyright" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_date" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="build_time" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="compiler_type" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="compiler_version" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="configuration" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chaptersType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="chapter" type="ffprobe:chapterType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="chapterType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="tag" type="ffprobe:tagType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
|
||||
<xsd:attribute name="id" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="time_base" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="start" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="start_time" type="xsd:float"/>
|
||||
<xsd:attribute name="end" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="end_time" type="xsd:float" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionType">
|
||||
<xsd:attribute name="name" type="xsd:string" use="required"/>
|
||||
<xsd:attribute name="major" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="minor" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="micro" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="version" type="xsd:int" use="required"/>
|
||||
<xsd:attribute name="ident" type="xsd:string" use="required"/>
|
||||
</xsd:complexType>
|
||||
|
||||
<xsd:complexType name="libraryVersionsType">
|
||||
<xsd:sequence>
|
||||
<xsd:element name="library_version" type="ffprobe:libraryVersionType" minOccurs="0" maxOccurs="unbounded"/>
|
||||
</xsd:sequence>
|
||||
</xsd:complexType>
|
||||
</xsd:schema>
|
@ -1,371 +0,0 @@
|
||||
# Port on which the server is listening. You must select a different
|
||||
# port from your standard HTTP web server if it is running on the same
|
||||
# computer.
|
||||
Port 8090
|
||||
|
||||
# Address on which the server is bound. Only useful if you have
|
||||
# several network interfaces.
|
||||
BindAddress 0.0.0.0
|
||||
|
||||
# Number of simultaneous HTTP connections that can be handled. It has
|
||||
# to be defined *before* the MaxClients parameter, since it defines the
|
||||
# MaxClients maximum limit.
|
||||
MaxHTTPConnections 2000
|
||||
|
||||
# Number of simultaneous requests that can be handled. Since FFServer
|
||||
# is very fast, it is more likely that you will want to leave this high
|
||||
# and use MaxBandwidth, below.
|
||||
MaxClients 1000
|
||||
|
||||
# This the maximum amount of kbit/sec that you are prepared to
|
||||
# consume when streaming to clients.
|
||||
MaxBandwidth 1000
|
||||
|
||||
# Access log file (uses standard Apache log file format)
|
||||
# '-' is the standard output.
|
||||
CustomLog -
|
||||
|
||||
##################################################################
|
||||
# Definition of the live feeds. Each live feed contains one video
|
||||
# and/or audio sequence coming from an ffmpeg encoder or another
|
||||
# ffserver. This sequence may be encoded simultaneously with several
|
||||
# codecs at several resolutions.
|
||||
|
||||
<Feed feed1.ffm>
|
||||
|
||||
# You must use 'ffmpeg' to send a live feed to ffserver. In this
|
||||
# example, you can type:
|
||||
#
|
||||
# ffmpeg http://localhost:8090/feed1.ffm
|
||||
|
||||
# ffserver can also do time shifting. It means that it can stream any
|
||||
# previously recorded live stream. The request should contain:
|
||||
# "http://xxxx?date=[YYYY-MM-DDT][[HH:]MM:]SS[.m...]".You must specify
|
||||
# a path where the feed is stored on disk. You also specify the
|
||||
# maximum size of the feed, where zero means unlimited. Default:
|
||||
# File=/tmp/feed_name.ffm FileMaxSize=5M
|
||||
File /tmp/feed1.ffm
|
||||
FileMaxSize 200K
|
||||
|
||||
# You could specify
|
||||
# ReadOnlyFile /saved/specialvideo.ffm
|
||||
# This marks the file as readonly and it will not be deleted or updated.
|
||||
|
||||
# Specify launch in order to start ffmpeg automatically.
|
||||
# First ffmpeg must be defined with an appropriate path if needed,
|
||||
# after that options can follow, but avoid adding the http:// field
|
||||
#Launch ffmpeg
|
||||
|
||||
# Only allow connections from localhost to the feed.
|
||||
ACL allow 127.0.0.1
|
||||
|
||||
</Feed>
|
||||
|
||||
|
||||
##################################################################
|
||||
# Now you can define each stream which will be generated from the
|
||||
# original audio and video stream. Each format has a filename (here
|
||||
# 'test1.mpg'). FFServer will send this stream when answering a
|
||||
# request containing this filename.
|
||||
|
||||
<Stream test1.mpg>
|
||||
|
||||
# coming from live feed 'feed1'
|
||||
Feed feed1.ffm
|
||||
|
||||
# Format of the stream : you can choose among:
|
||||
# mpeg : MPEG-1 multiplexed video and audio
|
||||
# mpegvideo : only MPEG-1 video
|
||||
# mp2 : MPEG-2 audio (use AudioCodec to select layer 2 and 3 codec)
|
||||
# ogg : Ogg format (Vorbis audio codec)
|
||||
# rm : RealNetworks-compatible stream. Multiplexed audio and video.
|
||||
# ra : RealNetworks-compatible stream. Audio only.
|
||||
# mpjpeg : Multipart JPEG (works with Netscape without any plugin)
|
||||
# jpeg : Generate a single JPEG image.
|
||||
# asf : ASF compatible streaming (Windows Media Player format).
|
||||
# swf : Macromedia Flash compatible stream
|
||||
# avi : AVI format (MPEG-4 video, MPEG audio sound)
|
||||
Format mpeg
|
||||
|
||||
# Bitrate for the audio stream. Codecs usually support only a few
|
||||
# different bitrates.
|
||||
AudioBitRate 32
|
||||
|
||||
# Number of audio channels: 1 = mono, 2 = stereo
|
||||
AudioChannels 1
|
||||
|
||||
# Sampling frequency for audio. When using low bitrates, you should
|
||||
# lower this frequency to 22050 or 11025. The supported frequencies
|
||||
# depend on the selected audio codec.
|
||||
AudioSampleRate 44100
|
||||
|
||||
# Bitrate for the video stream
|
||||
VideoBitRate 64
|
||||
|
||||
# Ratecontrol buffer size
|
||||
VideoBufferSize 40
|
||||
|
||||
# Number of frames per second
|
||||
VideoFrameRate 3
|
||||
|
||||
# Size of the video frame: WxH (default: 160x128)
|
||||
# The following abbreviations are defined: sqcif, qcif, cif, 4cif, qqvga,
|
||||
# qvga, vga, svga, xga, uxga, qxga, sxga, qsxga, hsxga, wvga, wxga, wsxga,
|
||||
# wuxga, woxga, wqsxga, wquxga, whsxga, whuxga, cga, ega, hd480, hd720,
|
||||
# hd1080
|
||||
VideoSize 160x128
|
||||
|
||||
# Transmit only intra frames (useful for low bitrates, but kills frame rate).
|
||||
#VideoIntraOnly
|
||||
|
||||
# If non-intra only, an intra frame is transmitted every VideoGopSize
|
||||
# frames. Video synchronization can only begin at an intra frame.
|
||||
VideoGopSize 12
|
||||
|
||||
# More MPEG-4 parameters
|
||||
# VideoHighQuality
|
||||
# Video4MotionVector
|
||||
|
||||
# Choose your codecs:
|
||||
#AudioCodec mp2
|
||||
#VideoCodec mpeg1video
|
||||
|
||||
# Suppress audio
|
||||
#NoAudio
|
||||
|
||||
# Suppress video
|
||||
#NoVideo
|
||||
|
||||
#VideoQMin 3
|
||||
#VideoQMax 31
|
||||
|
||||
# Set this to the number of seconds backwards in time to start. Note that
|
||||
# most players will buffer 5-10 seconds of video, and also you need to allow
|
||||
# for a keyframe to appear in the data stream.
|
||||
#Preroll 15
|
||||
|
||||
# ACL:
|
||||
|
||||
# You can allow ranges of addresses (or single addresses)
|
||||
#ACL ALLOW <first address> <last address>
|
||||
|
||||
# You can deny ranges of addresses (or single addresses)
|
||||
#ACL DENY <first address> <last address>
|
||||
|
||||
# You can repeat the ACL allow/deny as often as you like. It is on a per
|
||||
# stream basis. The first match defines the action. If there are no matches,
|
||||
# then the default is the inverse of the last ACL statement.
|
||||
#
|
||||
# Thus 'ACL allow localhost' only allows access from localhost.
|
||||
# 'ACL deny 1.0.0.0 1.255.255.255' would deny the whole of network 1 and
|
||||
# allow everybody else.
|
||||
|
||||
</Stream>
|
||||
|
||||
|
||||
##################################################################
|
||||
# Example streams
|
||||
|
||||
|
||||
# Multipart JPEG
|
||||
|
||||
#<Stream test.mjpg>
|
||||
#Feed feed1.ffm
|
||||
#Format mpjpeg
|
||||
#VideoFrameRate 2
|
||||
#VideoIntraOnly
|
||||
#NoAudio
|
||||
#Strict -1
|
||||
#</Stream>
|
||||
|
||||
|
||||
# Single JPEG
|
||||
|
||||
#<Stream test.jpg>
|
||||
#Feed feed1.ffm
|
||||
#Format jpeg
|
||||
#VideoFrameRate 2
|
||||
#VideoIntraOnly
|
||||
##VideoSize 352x240
|
||||
#NoAudio
|
||||
#Strict -1
|
||||
#</Stream>
|
||||
|
||||
|
||||
# Flash
|
||||
|
||||
#<Stream test.swf>
|
||||
#Feed feed1.ffm
|
||||
#Format swf
|
||||
#VideoFrameRate 2
|
||||
#VideoIntraOnly
|
||||
#NoAudio
|
||||
#</Stream>
|
||||
|
||||
|
||||
# ASF compatible
|
||||
|
||||
<Stream test.asf>
|
||||
Feed feed1.ffm
|
||||
Format asf
|
||||
VideoFrameRate 15
|
||||
VideoSize 352x240
|
||||
VideoBitRate 256
|
||||
VideoBufferSize 40
|
||||
VideoGopSize 30
|
||||
AudioBitRate 64
|
||||
StartSendOnKey
|
||||
</Stream>
|
||||
|
||||
|
||||
# MP3 audio
|
||||
|
||||
#<Stream test.mp3>
|
||||
#Feed feed1.ffm
|
||||
#Format mp2
|
||||
#AudioCodec mp3
|
||||
#AudioBitRate 64
|
||||
#AudioChannels 1
|
||||
#AudioSampleRate 44100
|
||||
#NoVideo
|
||||
#</Stream>
|
||||
|
||||
|
||||
# Ogg Vorbis audio
|
||||
|
||||
#<Stream test.ogg>
|
||||
#Feed feed1.ffm
|
||||
#Title "Stream title"
|
||||
#AudioBitRate 64
|
||||
#AudioChannels 2
|
||||
#AudioSampleRate 44100
|
||||
#NoVideo
|
||||
#</Stream>
|
||||
|
||||
|
||||
# Real with audio only at 32 kbits
|
||||
|
||||
#<Stream test.ra>
|
||||
#Feed feed1.ffm
|
||||
#Format rm
|
||||
#AudioBitRate 32
|
||||
#NoVideo
|
||||
#NoAudio
|
||||
#</Stream>
|
||||
|
||||
|
||||
# Real with audio and video at 64 kbits
|
||||
|
||||
#<Stream test.rm>
|
||||
#Feed feed1.ffm
|
||||
#Format rm
|
||||
#AudioBitRate 32
|
||||
#VideoBitRate 128
|
||||
#VideoFrameRate 25
|
||||
#VideoGopSize 25
|
||||
#NoAudio
|
||||
#</Stream>
|
||||
|
||||
|
||||
##################################################################
|
||||
# A stream coming from a file: you only need to set the input
|
||||
# filename and optionally a new format. Supported conversions:
|
||||
# AVI -> ASF
|
||||
|
||||
#<Stream file.rm>
|
||||
#File "/usr/local/httpd/htdocs/tlive.rm"
|
||||
#NoAudio
|
||||
#</Stream>
|
||||
|
||||
#<Stream file.asf>
|
||||
#File "/usr/local/httpd/htdocs/test.asf"
|
||||
#NoAudio
|
||||
#Author "Me"
|
||||
#Copyright "Super MegaCorp"
|
||||
#Title "Test stream from disk"
|
||||
#Comment "Test comment"
|
||||
#</Stream>
|
||||
|
||||
|
||||
##################################################################
|
||||
# RTSP examples
|
||||
#
|
||||
# You can access this stream with the RTSP URL:
|
||||
# rtsp://localhost:5454/test1-rtsp.mpg
|
||||
#
|
||||
# A non-standard RTSP redirector is also created. Its URL is:
|
||||
# http://localhost:8090/test1-rtsp.rtsp
|
||||
|
||||
#<Stream test1-rtsp.mpg>
|
||||
#Format rtp
|
||||
#File "/usr/local/httpd/htdocs/test1.mpg"
|
||||
#</Stream>
|
||||
|
||||
|
||||
# Transcode an incoming live feed to another live feed,
|
||||
# using libx264 and video presets
|
||||
|
||||
#<Stream live.h264>
|
||||
#Format rtp
|
||||
#Feed feed1.ffm
|
||||
#VideoCodec libx264
|
||||
#VideoFrameRate 24
|
||||
#VideoBitRate 100
|
||||
#VideoSize 480x272
|
||||
#AVPresetVideo default
|
||||
#AVPresetVideo baseline
|
||||
#AVOptionVideo flags +global_header
|
||||
#
|
||||
#AudioCodec libfaac
|
||||
#AudioBitRate 32
|
||||
#AudioChannels 2
|
||||
#AudioSampleRate 22050
|
||||
#AVOptionAudio flags +global_header
|
||||
#</Stream>
|
||||
|
||||
##################################################################
|
||||
# SDP/multicast examples
|
||||
#
|
||||
# If you want to send your stream in multicast, you must set the
|
||||
# multicast address with MulticastAddress. The port and the TTL can
|
||||
# also be set.
|
||||
#
|
||||
# An SDP file is automatically generated by ffserver by adding the
|
||||
# 'sdp' extension to the stream name (here
|
||||
# http://localhost:8090/test1-sdp.sdp). You should usually give this
|
||||
# file to your player to play the stream.
|
||||
#
|
||||
# The 'NoLoop' option can be used to avoid looping when the stream is
|
||||
# terminated.
|
||||
|
||||
#<Stream test1-sdp.mpg>
|
||||
#Format rtp
|
||||
#File "/usr/local/httpd/htdocs/test1.mpg"
|
||||
#MulticastAddress 224.124.0.1
|
||||
#MulticastPort 5000
|
||||
#MulticastTTL 16
|
||||
#NoLoop
|
||||
#</Stream>
|
||||
|
||||
|
||||
##################################################################
|
||||
# Special streams
|
||||
|
||||
# Server status
|
||||
|
||||
<Stream stat.html>
|
||||
Format status
|
||||
|
||||
# Only allow local people to get the status
|
||||
ACL allow localhost
|
||||
ACL allow 192.168.0.0 192.168.255.255
|
||||
|
||||
#FaviconURL http://pond1.gladstonefamily.net:8080/favicon.ico
|
||||
</Stream>
|
||||
|
||||
|
||||
# Redirect index.html to the appropriate site
|
||||
|
||||
<Redirect index.html>
|
||||
URL http://www.ffmpeg.org/
|
||||
</Redirect>
|
@ -1,320 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle ffserver Documentation
|
||||
@titlepage
|
||||
@center @titlefont{ffserver Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Synopsis
|
||||
|
||||
ffserver [@var{options}]
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
@command{ffserver} is a streaming server for both audio and video. It
|
||||
supports several live feeds, streaming from files and time shifting on
|
||||
live feeds (you can seek to positions in the past on each live feed,
|
||||
provided you specify a big enough feed storage in
|
||||
@file{ffserver.conf}).
|
||||
|
||||
@command{ffserver} receives prerecorded files or FFM streams from some
|
||||
@command{ffmpeg} instance as input, then streams them over
|
||||
RTP/RTSP/HTTP.
|
||||
|
||||
An @command{ffserver} instance will listen on some port as specified
|
||||
in the configuration file. You can launch one or more instances of
|
||||
@command{ffmpeg} and send one or more FFM streams to the port where
|
||||
ffserver is expecting to receive them. Alternately, you can make
|
||||
@command{ffserver} launch such @command{ffmpeg} instances at startup.
|
||||
|
||||
Input streams are called feeds, and each one is specified by a
|
||||
@code{<Feed>} section in the configuration file.
|
||||
|
||||
For each feed you can have different output streams in various
|
||||
formats, each one specified by a @code{<Stream>} section in the
|
||||
configuration file.
|
||||
|
||||
@section Status stream
|
||||
|
||||
ffserver supports an HTTP interface which exposes the current status
|
||||
of the server.
|
||||
|
||||
Simply point your browser to the address of the special status stream
|
||||
specified in the configuration file.
|
||||
|
||||
For example if you have:
|
||||
@example
|
||||
<Stream status.html>
|
||||
Format status
|
||||
|
||||
# Only allow local people to get the status
|
||||
ACL allow localhost
|
||||
ACL allow 192.168.0.0 192.168.255.255
|
||||
</Stream>
|
||||
@end example
|
||||
|
||||
then the server will post a page with the status information when
|
||||
the special stream @file{status.html} is requested.
|
||||
|
||||
@section What can this do?
|
||||
|
||||
When properly configured and running, you can capture video and audio in real
|
||||
time from a suitable capture card, and stream it out over the Internet to
|
||||
either Windows Media Player or RealAudio player (with some restrictions).
|
||||
|
||||
It can also stream from files, though that is currently broken. Very often, a
|
||||
web server can be used to serve up the files just as well.
|
||||
|
||||
It can stream prerecorded video from .ffm files, though it is somewhat tricky
|
||||
to make it work correctly.
|
||||
|
||||
@section How do I make it work?
|
||||
|
||||
First, build the kit. It *really* helps to have installed LAME first. Then when
|
||||
you run the ffserver ./configure, make sure that you have the
|
||||
@code{--enable-libmp3lame} flag turned on.
|
||||
|
||||
LAME is important as it allows for streaming audio to Windows Media Player.
|
||||
Don't ask why the other audio types do not work.
|
||||
|
||||
As a simple test, just run the following two command lines where INPUTFILE
|
||||
is some file which you can decode with ffmpeg:
|
||||
|
||||
@example
|
||||
ffserver -f doc/ffserver.conf &
|
||||
ffmpeg -i INPUTFILE http://localhost:8090/feed1.ffm
|
||||
@end example
|
||||
|
||||
At this point you should be able to go to your Windows machine and fire up
|
||||
Windows Media Player (WMP). Go to Open URL and enter
|
||||
|
||||
@example
|
||||
http://<linuxbox>:8090/test.asf
|
||||
@end example
|
||||
|
||||
You should (after a short delay) see video and hear audio.
|
||||
|
||||
WARNING: trying to stream test1.mpg doesn't work with WMP as it tries to
|
||||
transfer the entire file before starting to play.
|
||||
The same is true of AVI files.
|
||||
|
||||
@section What happens next?
|
||||
|
||||
You should edit the ffserver.conf file to suit your needs (in terms of
|
||||
frame rates etc). Then install ffserver and ffmpeg, write a script to start
|
||||
them up, and off you go.
|
||||
|
||||
@section Troubleshooting
|
||||
|
||||
@subsection I don't hear any audio, but video is fine.
|
||||
|
||||
Maybe you didn't install LAME, or got your ./configure statement wrong. Check
|
||||
the ffmpeg output to see if a line referring to MP3 is present. If not, then
|
||||
your configuration was incorrect. If it is, then maybe your wiring is not
|
||||
set up correctly. Maybe the sound card is not getting data from the right
|
||||
input source. Maybe you have a really awful audio interface (like I do)
|
||||
that only captures in stereo and also requires that one channel be flipped.
|
||||
If you are one of these people, then export 'AUDIO_FLIP_LEFT=1' before
|
||||
starting ffmpeg.
|
||||
|
||||
@subsection The audio and video lose sync after a while.
|
||||
|
||||
Yes, they do.
|
||||
|
||||
@subsection After a long while, the video update rate goes way down in WMP.
|
||||
|
||||
Yes, it does. Who knows why?
|
||||
|
||||
@subsection WMP 6.4 behaves differently to WMP 7.
|
||||
|
||||
Yes, it does. Any thoughts on this would be gratefully received. These
|
||||
differences extend to embedding WMP into a web page. [There are two
|
||||
object IDs that you can use: The old one, which does not play well, and
|
||||
the new one, which does (both tested on the same system). However,
|
||||
I suspect that the new one is not available unless you have installed WMP 7].
|
||||
|
||||
@section What else can it do?
|
||||
|
||||
You can replay video from .ffm files that was recorded earlier.
|
||||
However, there are a number of caveats, including the fact that the
|
||||
ffserver parameters must match the original parameters used to record the
|
||||
file. If they do not, then ffserver deletes the file before recording into it.
|
||||
(Now that I write this, it seems broken).
|
||||
|
||||
You can fiddle with many of the codec choices and encoding parameters, and
|
||||
there are a bunch more parameters that you cannot control. Post a message
|
||||
to the mailing list if there are some 'must have' parameters. Look in
|
||||
ffserver.conf for a list of the currently available controls.
|
||||
|
||||
It will automatically generate the ASX or RAM files that are often used
|
||||
in browsers. These files are actually redirections to the underlying ASF
|
||||
or RM file. The reason for this is that the browser often fetches the
|
||||
entire file before starting up the external viewer. The redirection files
|
||||
are very small and can be transferred quickly. [The stream itself is
|
||||
often 'infinite' and thus the browser tries to download it and never
|
||||
finishes.]
|
||||
|
||||
@section Tips
|
||||
|
||||
* When you connect to a live stream, most players (WMP, RA, etc) want to
|
||||
buffer a certain number of seconds of material so that they can display the
|
||||
signal continuously. However, ffserver (by default) starts sending data
|
||||
in realtime. This means that there is a pause of a few seconds while the
|
||||
buffering is being done by the player. The good news is that this can be
|
||||
cured by adding a '?buffer=5' to the end of the URL. This means that the
|
||||
stream should start 5 seconds in the past -- and so the first 5 seconds
|
||||
of the stream are sent as fast as the network will allow. It will then
|
||||
slow down to real time. This noticeably improves the startup experience.
|
||||
|
||||
You can also add a 'Preroll 15' statement into the ffserver.conf that will
|
||||
add the 15 second prebuffering on all requests that do not otherwise
|
||||
specify a time. In addition, ffserver will skip frames until a key_frame
|
||||
is found. This further reduces the startup delay by not transferring data
|
||||
that will be discarded.
|
||||
|
||||
* You may want to adjust the MaxBandwidth in the ffserver.conf to limit
|
||||
the amount of bandwidth consumed by live streams.
|
||||
|
||||
@section Why does the ?buffer / Preroll stop working after a time?
|
||||
|
||||
It turns out that (on my machine at least) the number of frames successfully
|
||||
grabbed is marginally less than the number that ought to be grabbed. This
|
||||
means that the timestamp in the encoded data stream gets behind realtime.
|
||||
This means that if you say 'Preroll 10', then when the stream gets 10
|
||||
or more seconds behind, there is no Preroll left.
|
||||
|
||||
Fixing this requires a change in the internals of how timestamps are
|
||||
handled.
|
||||
|
||||
@section Does the @code{?date=} stuff work.
|
||||
|
||||
Yes (subject to the limitation outlined above). Also note that whenever you
|
||||
start ffserver, it deletes the ffm file (if any parameters have changed),
|
||||
thus wiping out what you had recorded before.
|
||||
|
||||
The format of the @code{?date=xxxxxx} is fairly flexible. You should use one
|
||||
of the following formats (the 'T' is literal):
|
||||
|
||||
@example
|
||||
* YYYY-MM-DDTHH:MM:SS (localtime)
|
||||
* YYYY-MM-DDTHH:MM:SSZ (UTC)
|
||||
@end example
|
||||
|
||||
You can omit the YYYY-MM-DD, and then it refers to the current day. However
|
||||
note that @samp{?date=16:00:00} refers to 16:00 on the current day -- this
|
||||
may be in the future and so is unlikely to be useful.
|
||||
|
||||
You use this by adding the ?date= to the end of the URL for the stream.
|
||||
For example: @samp{http://localhost:8080/test.asf?date=2002-07-26T23:05:00}.
|
||||
@c man end
|
||||
|
||||
@section What is FFM, FFM2
|
||||
|
||||
FFM and FFM2 are formats used by ffserver. They allow storing a wide variety of
|
||||
video and audio streams and encoding options, and can store a moving time segment
|
||||
of an infinite movie or a whole movie.
|
||||
|
||||
FFM is version specific, and there is limited compatibility of FFM files
|
||||
generated by one version of ffmpeg/ffserver and another version of
|
||||
ffmpeg/ffserver. It may work but it is not guaranteed to work.
|
||||
|
||||
FFM2 is extensible while maintaining compatibility and should work between
|
||||
differing versions of tools. FFM2 is the default.
|
||||
|
||||
@chapter Options
|
||||
@c man begin OPTIONS
|
||||
|
||||
@include fftools-common-opts.texi
|
||||
|
||||
@section Main options
|
||||
|
||||
@table @option
|
||||
@item -f @var{configfile}
|
||||
Use @file{configfile} instead of @file{/etc/ffserver.conf}.
|
||||
@item -n
|
||||
Enable no-launch mode. This option disables all the Launch directives
|
||||
within the various <Stream> sections. Since ffserver will not launch
|
||||
any ffmpeg instances, you will have to launch them manually.
|
||||
@item -d
|
||||
Enable debug mode. This option increases log verbosity, directs log
|
||||
messages to stdout.
|
||||
@end table
|
||||
@c man end
|
||||
|
||||
@include config.texi
|
||||
@ifset config-all
|
||||
@ifset config-avutil
|
||||
@include utils.texi
|
||||
@end ifset
|
||||
@ifset config-avcodec
|
||||
@include codecs.texi
|
||||
@include bitstream_filters.texi
|
||||
@end ifset
|
||||
@ifset config-avformat
|
||||
@include formats.texi
|
||||
@include protocols.texi
|
||||
@end ifset
|
||||
@ifset config-avdevice
|
||||
@include devices.texi
|
||||
@end ifset
|
||||
@ifset config-swresample
|
||||
@include resampler.texi
|
||||
@end ifset
|
||||
@ifset config-swscale
|
||||
@include scaler.texi
|
||||
@end ifset
|
||||
@ifset config-avfilter
|
||||
@include filters.texi
|
||||
@end ifset
|
||||
@end ifset
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@ifset config-all
|
||||
@url{ffserver.html,ffserver},
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
@url{ffserver-all.html,ffserver-all},
|
||||
@end ifset
|
||||
the @file{doc/ffserver.conf} example,
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils},
|
||||
@url{ffmpeg-scaler.html,ffmpeg-scaler},
|
||||
@url{ffmpeg-resampler.html,ffmpeg-resampler},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs},
|
||||
@url{ffmpeg-bitstream-filters.html,ffmpeg-bitstream-filters},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
@ifset config-all
|
||||
ffserver(1),
|
||||
@end ifset
|
||||
@ifset config-not-all
|
||||
ffserver-all(1),
|
||||
@end ifset
|
||||
the @file{doc/ffserver.conf} example, ffmpeg(1), ffplay(1), ffprobe(1),
|
||||
ffmpeg-utils(1), ffmpeg-scaler(1), ffmpeg-resampler(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1), ffmpeg-formats(1),
|
||||
ffmpeg-devices(1), ffmpeg-protocols(1), ffmpeg-filters(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename ffserver
|
||||
@settitle ffserver video server
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@ -1,292 +0,0 @@
|
||||
All the numerical options, if not specified otherwise, accept a string
|
||||
representing a number as input, which may be followed by one of the SI
|
||||
unit prefixes, for example: 'K', 'M', or 'G'.
|
||||
|
||||
If 'i' is appended to the SI unit prefix, the complete prefix will be
|
||||
interpreted as a unit prefix for binary multiplies, which are based on
|
||||
powers of 1024 instead of powers of 1000. Appending 'B' to the SI unit
|
||||
prefix multiplies the value by 8. This allows using, for example:
|
||||
'KB', 'MiB', 'G' and 'B' as number suffixes.
|
||||
|
||||
Options which do not take arguments are boolean options, and set the
|
||||
corresponding value to true. They can be set to false by prefixing
|
||||
the option name with "no". For example using "-nofoo"
|
||||
will set the boolean option with name "foo" to false.
|
||||
|
||||
@anchor{Stream specifiers}
|
||||
@section Stream specifiers
|
||||
Some options are applied per-stream, e.g. bitrate or codec. Stream specifiers
|
||||
are used to precisely specify which stream(s) a given option belongs to.
|
||||
|
||||
A stream specifier is a string generally appended to the option name and
|
||||
separated from it by a colon. E.g. @code{-codec:a:1 ac3} contains the
|
||||
@code{a:1} stream specifier, which matches the second audio stream. Therefore, it
|
||||
would select the ac3 codec for the second audio stream.
|
||||
|
||||
A stream specifier can match several streams, so that the option is applied to all
|
||||
of them. E.g. the stream specifier in @code{-b:a 128k} matches all audio
|
||||
streams.
|
||||
|
||||
An empty stream specifier matches all streams. For example, @code{-codec copy}
|
||||
or @code{-codec: copy} would copy all the streams without reencoding.
|
||||
|
||||
Possible forms of stream specifiers are:
|
||||
@table @option
|
||||
@item @var{stream_index}
|
||||
Matches the stream with this index. E.g. @code{-threads:1 4} would set the
|
||||
thread count for the second stream to 4.
|
||||
@item @var{stream_type}[:@var{stream_index}]
|
||||
@var{stream_type} is one of following: 'v' for video, 'a' for audio, 's' for subtitle,
|
||||
'd' for data, and 't' for attachments. If @var{stream_index} is given, then it matches
|
||||
stream number @var{stream_index} of this type. Otherwise, it matches all
|
||||
streams of this type.
|
||||
@item p:@var{program_id}[:@var{stream_index}]
|
||||
If @var{stream_index} is given, then it matches the stream with number @var{stream_index}
|
||||
in the program with the id @var{program_id}. Otherwise, it matches all streams in the
|
||||
program.
|
||||
@item #@var{stream_id}
|
||||
Matches the stream by a format-specific ID.
|
||||
@end table
|
||||
|
||||
@section Generic options
|
||||
|
||||
These options are shared amongst the ff* tools.
|
||||
|
||||
@table @option
|
||||
|
||||
@item -L
|
||||
Show license.
|
||||
|
||||
@item -h, -?, -help, --help [@var{arg}]
|
||||
Show help. An optional parameter may be specified to print help about a specific
|
||||
item. If no argument is specified, only basic (non advanced) tool
|
||||
options are shown.
|
||||
|
||||
Possible values of @var{arg} are:
|
||||
@table @option
|
||||
@item long
|
||||
Print advanced tool options in addition to the basic tool options.
|
||||
|
||||
@item full
|
||||
Print complete list of options, including shared and private options
|
||||
for encoders, decoders, demuxers, muxers, filters, etc.
|
||||
|
||||
@item decoder=@var{decoder_name}
|
||||
Print detailed information about the decoder named @var{decoder_name}. Use the
|
||||
@option{-decoders} option to get a list of all decoders.
|
||||
|
||||
@item encoder=@var{encoder_name}
|
||||
Print detailed information about the encoder named @var{encoder_name}. Use the
|
||||
@option{-encoders} option to get a list of all encoders.
|
||||
|
||||
@item demuxer=@var{demuxer_name}
|
||||
Print detailed information about the demuxer named @var{demuxer_name}. Use the
|
||||
@option{-formats} option to get a list of all demuxers and muxers.
|
||||
|
||||
@item muxer=@var{muxer_name}
|
||||
Print detailed information about the muxer named @var{muxer_name}. Use the
|
||||
@option{-formats} option to get a list of all muxers and demuxers.
|
||||
|
||||
@item filter=@var{filter_name}
|
||||
Print detailed information about the filter name @var{filter_name}. Use the
|
||||
@option{-filters} option to get a list of all filters.
|
||||
@end table
|
||||
|
||||
@item -version
|
||||
Show version.
|
||||
|
||||
@item -formats
|
||||
Show available formats.
|
||||
|
||||
@item -codecs
|
||||
Show all codecs known to libavcodec.
|
||||
|
||||
Note that the term 'codec' is used throughout this documentation as a shortcut
|
||||
for what is more correctly called a media bitstream format.
|
||||
|
||||
@item -decoders
|
||||
Show available decoders.
|
||||
|
||||
@item -encoders
|
||||
Show all available encoders.
|
||||
|
||||
@item -bsfs
|
||||
Show available bitstream filters.
|
||||
|
||||
@item -protocols
|
||||
Show available protocols.
|
||||
|
||||
@item -filters
|
||||
Show available libavfilter filters.
|
||||
|
||||
@item -pix_fmts
|
||||
Show available pixel formats.
|
||||
|
||||
@item -sample_fmts
|
||||
Show available sample formats.
|
||||
|
||||
@item -layouts
|
||||
Show channel names and standard channel layouts.
|
||||
|
||||
@item -colors
|
||||
Show recognized color names.
|
||||
|
||||
@item -loglevel [repeat+]@var{loglevel} | -v [repeat+]@var{loglevel}
|
||||
Set the logging level used by the library.
|
||||
Adding "repeat+" indicates that repeated log output should not be compressed
|
||||
to the first line and the "Last message repeated n times" line will be
|
||||
omitted. "repeat" can also be used alone.
|
||||
If "repeat" is used alone, and with no prior loglevel set, the default
|
||||
loglevel will be used. If multiple loglevel parameters are given, using
|
||||
'repeat' will not change the loglevel.
|
||||
@var{loglevel} is a number or a string containing one of the following values:
|
||||
@table @samp
|
||||
@item quiet
|
||||
Show nothing at all; be silent.
|
||||
@item panic
|
||||
Only show fatal errors which could lead the process to crash, such as
|
||||
and assert failure. This is not currently used for anything.
|
||||
@item fatal
|
||||
Only show fatal errors. These are errors after which the process absolutely
|
||||
cannot continue after.
|
||||
@item error
|
||||
Show all errors, including ones which can be recovered from.
|
||||
@item warning
|
||||
Show all warnings and errors. Any message related to possibly
|
||||
incorrect or unexpected events will be shown.
|
||||
@item info
|
||||
Show informative messages during processing. This is in addition to
|
||||
warnings and errors. This is the default value.
|
||||
@item verbose
|
||||
Same as @code{info}, except more verbose.
|
||||
@item debug
|
||||
Show everything, including debugging information.
|
||||
@end table
|
||||
|
||||
By default the program logs to stderr, if coloring is supported by the
|
||||
terminal, colors are used to mark errors and warnings. Log coloring
|
||||
can be disabled setting the environment variable
|
||||
@env{AV_LOG_FORCE_NOCOLOR} or @env{NO_COLOR}, or can be forced setting
|
||||
the environment variable @env{AV_LOG_FORCE_COLOR}.
|
||||
The use of the environment variable @env{NO_COLOR} is deprecated and
|
||||
will be dropped in a following FFmpeg version.
|
||||
|
||||
@item -report
|
||||
Dump full command line and console output to a file named
|
||||
@code{@var{program}-@var{YYYYMMDD}-@var{HHMMSS}.log} in the current
|
||||
directory.
|
||||
This file can be useful for bug reports.
|
||||
It also implies @code{-loglevel verbose}.
|
||||
|
||||
Setting the environment variable @code{FFREPORT} to any value has the
|
||||
same effect. If the value is a ':'-separated key=value sequence, these
|
||||
options will affect the report; options values must be escaped if they
|
||||
contain special characters or the options delimiter ':' (see the
|
||||
``Quoting and escaping'' section in the ffmpeg-utils manual). The
|
||||
following option is recognized:
|
||||
@table @option
|
||||
@item file
|
||||
set the file name to use for the report; @code{%p} is expanded to the name
|
||||
of the program, @code{%t} is expanded to a timestamp, @code{%%} is expanded
|
||||
to a plain @code{%}
|
||||
@end table
|
||||
|
||||
Errors in parsing the environment variable are not fatal, and will not
|
||||
appear in the report.
|
||||
|
||||
@item -cpuflags flags (@emph{global})
|
||||
Allows setting and clearing cpu flags. This option is intended
|
||||
for testing. Do not use it unless you know what you're doing.
|
||||
@example
|
||||
ffmpeg -cpuflags -sse+mmx ...
|
||||
ffmpeg -cpuflags mmx ...
|
||||
ffmpeg -cpuflags 0 ...
|
||||
@end example
|
||||
Possible flags for this option are:
|
||||
@table @samp
|
||||
@item x86
|
||||
@table @samp
|
||||
@item mmx
|
||||
@item mmxext
|
||||
@item sse
|
||||
@item sse2
|
||||
@item sse2slow
|
||||
@item sse3
|
||||
@item sse3slow
|
||||
@item ssse3
|
||||
@item atom
|
||||
@item sse4.1
|
||||
@item sse4.2
|
||||
@item avx
|
||||
@item xop
|
||||
@item fma4
|
||||
@item 3dnow
|
||||
@item 3dnowext
|
||||
@item cmov
|
||||
@end table
|
||||
@item ARM
|
||||
@table @samp
|
||||
@item armv5te
|
||||
@item armv6
|
||||
@item armv6t2
|
||||
@item vfp
|
||||
@item vfpv3
|
||||
@item neon
|
||||
@end table
|
||||
@item PowerPC
|
||||
@table @samp
|
||||
@item altivec
|
||||
@end table
|
||||
@item Specific Processors
|
||||
@table @samp
|
||||
@item pentium2
|
||||
@item pentium3
|
||||
@item pentium4
|
||||
@item k6
|
||||
@item k62
|
||||
@item athlon
|
||||
@item athlonxp
|
||||
@item k8
|
||||
@end table
|
||||
@end table
|
||||
|
||||
@item -opencl_options options (@emph{global})
|
||||
Set OpenCL environment options. This option is only available when
|
||||
FFmpeg has been compiled with @code{--enable-opencl}.
|
||||
|
||||
@var{options} must be a list of @var{key}=@var{value} option pairs
|
||||
separated by ':'. See the ``OpenCL Options'' section in the
|
||||
ffmpeg-utils manual for the list of supported options.
|
||||
@end table
|
||||
|
||||
@section AVOptions
|
||||
|
||||
These options are provided directly by the libavformat, libavdevice and
|
||||
libavcodec libraries. To see the list of available AVOptions, use the
|
||||
@option{-help} option. They are separated into two categories:
|
||||
@table @option
|
||||
@item generic
|
||||
These options can be set for any container, codec or device. Generic options
|
||||
are listed under AVFormatContext options for containers/devices and under
|
||||
AVCodecContext options for codecs.
|
||||
@item private
|
||||
These options are specific to the given container, device or codec. Private
|
||||
options are listed under their corresponding containers/devices/codecs.
|
||||
@end table
|
||||
|
||||
For example to write an ID3v2.3 header instead of a default ID3v2.4 to
|
||||
an MP3 file, use the @option{id3v2_version} private option of the MP3
|
||||
muxer:
|
||||
@example
|
||||
ffmpeg -i input.flac -id3v2_version 3 out.mp3
|
||||
@end example
|
||||
|
||||
All codec AVOptions are per-stream, and thus a stream specifier
|
||||
should be attached to them.
|
||||
|
||||
Note: the @option{-nooption} syntax cannot be used for boolean
|
||||
AVOptions, use @option{-option 0}/@option{-option 1}.
|
||||
|
||||
Note: the old undocumented way of specifying per-stream AVOptions by
|
||||
prepending v/a/s to the options name is now obsolete and will be
|
||||
removed soon.
|
@ -1,270 +0,0 @@
|
||||
Filter design
|
||||
=============
|
||||
|
||||
This document explains guidelines that should be observed (or ignored with
|
||||
good reason) when writing filters for libavfilter.
|
||||
|
||||
In this document, the word “frame” indicates either a video frame or a group
|
||||
of audio samples, as stored in an AVFilterBuffer structure.
|
||||
|
||||
|
||||
Format negotiation
|
||||
==================
|
||||
|
||||
The query_formats method should set, for each input and each output links,
|
||||
the list of supported formats.
|
||||
|
||||
For video links, that means pixel format. For audio links, that means
|
||||
channel layout, sample format (the sample packing is implied by the sample
|
||||
format) and sample rate.
|
||||
|
||||
The lists are not just lists, they are references to shared objects. When
|
||||
the negotiation mechanism computes the intersection of the formats
|
||||
supported at each end of a link, all references to both lists are replaced
|
||||
with a reference to the intersection. And when a single format is
|
||||
eventually chosen for a link amongst the remaining list, again, all
|
||||
references to the list are updated.
|
||||
|
||||
That means that if a filter requires that its input and output have the
|
||||
same format amongst a supported list, all it has to do is use a reference
|
||||
to the same list of formats.
|
||||
|
||||
query_formats can leave some formats unset and return AVERROR(EAGAIN) to
|
||||
cause the negotiation mechanism to try again later. That can be used by
|
||||
filters with complex requirements to use the format negotiated on one link
|
||||
to set the formats supported on another.
|
||||
|
||||
|
||||
Buffer references ownership and permissions
|
||||
===========================================
|
||||
|
||||
Principle
|
||||
---------
|
||||
|
||||
Audio and video data are voluminous; the buffer and buffer reference
|
||||
mechanism is intended to avoid, as much as possible, expensive copies of
|
||||
that data while still allowing the filters to produce correct results.
|
||||
|
||||
The data is stored in buffers represented by AVFilterBuffer structures.
|
||||
They must not be accessed directly, but through references stored in
|
||||
AVFilterBufferRef structures. Several references can point to the
|
||||
same buffer; the buffer is automatically deallocated once all
|
||||
corresponding references have been destroyed.
|
||||
|
||||
The characteristics of the data (resolution, sample rate, etc.) are
|
||||
stored in the reference; different references for the same buffer can
|
||||
show different characteristics. In particular, a video reference can
|
||||
point to only a part of a video buffer.
|
||||
|
||||
A reference is usually obtained as input to the start_frame or
|
||||
filter_frame method or requested using the ff_get_video_buffer or
|
||||
ff_get_audio_buffer functions. A new reference on an existing buffer can
|
||||
be created with the avfilter_ref_buffer. A reference is destroyed using
|
||||
the avfilter_unref_bufferp function.
|
||||
|
||||
Reference ownership
|
||||
-------------------
|
||||
|
||||
At any time, a reference “belongs” to a particular piece of code,
|
||||
usually a filter. With a few caveats that will be explained below, only
|
||||
that piece of code is allowed to access it. It is also responsible for
|
||||
destroying it, although this is sometimes done automatically (see the
|
||||
section on link reference fields).
|
||||
|
||||
Here are the (fairly obvious) rules for reference ownership:
|
||||
|
||||
* A reference received by the filter_frame method (or its start_frame
|
||||
deprecated version) belongs to the corresponding filter.
|
||||
|
||||
Special exception: for video references: the reference may be used
|
||||
internally for automatic copying and must not be destroyed before
|
||||
end_frame; it can be given away to ff_start_frame.
|
||||
|
||||
* A reference passed to ff_filter_frame (or the deprecated
|
||||
ff_start_frame) is given away and must no longer be used.
|
||||
|
||||
* A reference created with avfilter_ref_buffer belongs to the code that
|
||||
created it.
|
||||
|
||||
* A reference obtained with ff_get_video_buffer or ff_get_audio_buffer
|
||||
belongs to the code that requested it.
|
||||
|
||||
* A reference given as return value by the get_video_buffer or
|
||||
get_audio_buffer method is given away and must no longer be used.
|
||||
|
||||
Link reference fields
|
||||
---------------------
|
||||
|
||||
The AVFilterLink structure has a few AVFilterBufferRef fields. The
|
||||
cur_buf and out_buf were used with the deprecated
|
||||
start_frame/draw_slice/end_frame API and should no longer be used.
|
||||
src_buf, cur_buf_copy and partial_buf are used by libavfilter internally
|
||||
and must not be accessed by filters.
|
||||
|
||||
Reference permissions
|
||||
---------------------
|
||||
|
||||
The AVFilterBufferRef structure has a perms field that describes what
|
||||
the code that owns the reference is allowed to do to the buffer data.
|
||||
Different references for the same buffer can have different permissions.
|
||||
|
||||
For video filters that implement the deprecated
|
||||
start_frame/draw_slice/end_frame API, the permissions only apply to the
|
||||
parts of the buffer that have already been covered by the draw_slice
|
||||
method.
|
||||
|
||||
The value is a binary OR of the following constants:
|
||||
|
||||
* AV_PERM_READ: the owner can read the buffer data; this is essentially
|
||||
always true and is there for self-documentation.
|
||||
|
||||
* AV_PERM_WRITE: the owner can modify the buffer data.
|
||||
|
||||
* AV_PERM_PRESERVE: the owner can rely on the fact that the buffer data
|
||||
will not be modified by previous filters.
|
||||
|
||||
* AV_PERM_REUSE: the owner can output the buffer several times, without
|
||||
modifying the data in between.
|
||||
|
||||
* AV_PERM_REUSE2: the owner can output the buffer several times and
|
||||
modify the data in between (useless without the WRITE permissions).
|
||||
|
||||
* AV_PERM_ALIGN: the owner can access the data using fast operations
|
||||
that require data alignment.
|
||||
|
||||
The READ, WRITE and PRESERVE permissions are about sharing the same
|
||||
buffer between several filters to avoid expensive copies without them
|
||||
doing conflicting changes on the data.
|
||||
|
||||
The REUSE and REUSE2 permissions are about special memory for direct
|
||||
rendering. For example a buffer directly allocated in video memory must
|
||||
not modified once it is displayed on screen, or it will cause tearing;
|
||||
it will therefore not have the REUSE2 permission.
|
||||
|
||||
The ALIGN permission is about extracting part of the buffer, for
|
||||
copy-less padding or cropping for example.
|
||||
|
||||
|
||||
References received on input pads are guaranteed to have all the
|
||||
permissions stated in the min_perms field and none of the permissions
|
||||
stated in the rej_perms.
|
||||
|
||||
References obtained by ff_get_video_buffer and ff_get_audio_buffer are
|
||||
guaranteed to have at least all the permissions requested as argument.
|
||||
|
||||
References created by avfilter_ref_buffer have the same permissions as
|
||||
the original reference minus the ones explicitly masked; the mask is
|
||||
usually ~0 to keep the same permissions.
|
||||
|
||||
Filters should remove permissions on reference they give to output
|
||||
whenever necessary. It can be automatically done by setting the
|
||||
rej_perms field on the output pad.
|
||||
|
||||
Here are a few guidelines corresponding to common situations:
|
||||
|
||||
* Filters that modify and forward their frame (like drawtext) need the
|
||||
WRITE permission.
|
||||
|
||||
* Filters that read their input to produce a new frame on output (like
|
||||
scale) need the READ permission on input and must request a buffer
|
||||
with the WRITE permission.
|
||||
|
||||
* Filters that intend to keep a reference after the filtering process
|
||||
is finished (after filter_frame returns) must have the PRESERVE
|
||||
permission on it and remove the WRITE permission if they create a new
|
||||
reference to give it away.
|
||||
|
||||
* Filters that intend to modify a reference they have kept after the end
|
||||
of the filtering process need the REUSE2 permission and must remove
|
||||
the PRESERVE permission if they create a new reference to give it
|
||||
away.
|
||||
|
||||
|
||||
Frame scheduling
|
||||
================
|
||||
|
||||
The purpose of these rules is to ensure that frames flow in the filter
|
||||
graph without getting stuck and accumulating somewhere.
|
||||
|
||||
Simple filters that output one frame for each input frame should not have
|
||||
to worry about it.
|
||||
|
||||
filter_frame
|
||||
------------
|
||||
|
||||
This method is called when a frame is pushed to the filter's input. It
|
||||
can be called at any time except in a reentrant way.
|
||||
|
||||
If the input frame is enough to produce output, then the filter should
|
||||
push the output frames on the output link immediately.
|
||||
|
||||
As an exception to the previous rule, if the input frame is enough to
|
||||
produce several output frames, then the filter needs output only at
|
||||
least one per link. The additional frames can be left buffered in the
|
||||
filter; these buffered frames must be flushed immediately if a new input
|
||||
produces new output.
|
||||
|
||||
(Example: frame rate-doubling filter: filter_frame must (1) flush the
|
||||
second copy of the previous frame, if it is still there, (2) push the
|
||||
first copy of the incoming frame, (3) keep the second copy for later.)
|
||||
|
||||
If the input frame is not enough to produce output, the filter must not
|
||||
call request_frame to get more. It must just process the frame or queue
|
||||
it. The task of requesting more frames is left to the filter's
|
||||
request_frame method or the application.
|
||||
|
||||
If a filter has several inputs, the filter must be ready for frames
|
||||
arriving randomly on any input. Therefore, any filter with several inputs
|
||||
will most likely require some kind of queuing mechanism. It is perfectly
|
||||
acceptable to have a limited queue and to drop frames when the inputs
|
||||
are too unbalanced.
|
||||
|
||||
request_frame
|
||||
-------------
|
||||
|
||||
This method is called when a frame is wanted on an output.
|
||||
|
||||
For an input, it should directly call filter_frame on the corresponding
|
||||
output.
|
||||
|
||||
For a filter, if there are queued frames already ready, one of these
|
||||
frames should be pushed. If not, the filter should request a frame on
|
||||
one of its inputs, repeatedly until at least one frame has been pushed.
|
||||
|
||||
Return values:
|
||||
if request_frame could produce a frame, it should return 0;
|
||||
if it could not for temporary reasons, it should return AVERROR(EAGAIN);
|
||||
if it could not because there are no more frames, it should return
|
||||
AVERROR_EOF.
|
||||
|
||||
The typical implementation of request_frame for a filter with several
|
||||
inputs will look like that:
|
||||
|
||||
if (frames_queued) {
|
||||
push_one_frame();
|
||||
return 0;
|
||||
}
|
||||
while (!frame_pushed) {
|
||||
input = input_where_a_frame_is_most_needed();
|
||||
ret = ff_request_frame(input);
|
||||
if (ret == AVERROR_EOF) {
|
||||
process_eof_on_input();
|
||||
} else if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
|
||||
Note that, except for filters that can have queued frames, request_frame
|
||||
does not push frames: it requests them to its input, and as a reaction,
|
||||
the filter_frame method will be called and do the work.
|
||||
|
||||
Legacy API
|
||||
==========
|
||||
|
||||
Until libavfilter 3.23, the filter_frame method was split:
|
||||
|
||||
- for video filters, it was made of start_frame, draw_slice (that could be
|
||||
called several times on distinct parts of the frame) and end_frame;
|
||||
|
||||
- for audio filters, it was called filter_samples.
|
File diff suppressed because it is too large
Load Diff
@ -1,188 +0,0 @@
|
||||
@chapter Format Options
|
||||
@c man begin FORMAT OPTIONS
|
||||
|
||||
The libavformat library provides some generic global options, which
|
||||
can be set on all the muxers and demuxers. In addition each muxer or
|
||||
demuxer may support so-called private options, which are specific for
|
||||
that component.
|
||||
|
||||
Options may be set by specifying -@var{option} @var{value} in the
|
||||
FFmpeg tools, or by setting the value explicitly in the
|
||||
@code{AVFormatContext} options or using the @file{libavutil/opt.h} API
|
||||
for programmatic use.
|
||||
|
||||
The list of supported options follows:
|
||||
|
||||
@table @option
|
||||
@item avioflags @var{flags} (@emph{input/output})
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item direct
|
||||
Reduce buffering.
|
||||
@end table
|
||||
|
||||
@item probesize @var{integer} (@emph{input})
|
||||
Set probing size in bytes, i.e. the size of the data to analyze to get
|
||||
stream information. A higher value will allow to detect more
|
||||
information in case it is dispersed into the stream, but will increase
|
||||
latency. Must be an integer not lesser than 32. It is 5000000 by default.
|
||||
|
||||
@item packetsize @var{integer} (@emph{output})
|
||||
Set packet size.
|
||||
|
||||
@item fflags @var{flags} (@emph{input/output})
|
||||
Set format flags.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item ignidx
|
||||
Ignore index.
|
||||
@item genpts
|
||||
Generate PTS.
|
||||
@item nofillin
|
||||
Do not fill in missing values that can be exactly calculated.
|
||||
@item noparse
|
||||
Disable AVParsers, this needs @code{+nofillin} too.
|
||||
@item igndts
|
||||
Ignore DTS.
|
||||
@item discardcorrupt
|
||||
Discard corrupted frames.
|
||||
@item sortdts
|
||||
Try to interleave output packets by DTS.
|
||||
@item keepside
|
||||
Do not merge side data.
|
||||
@item latm
|
||||
Enable RTP MP4A-LATM payload.
|
||||
@item nobuffer
|
||||
Reduce the latency introduced by optional buffering
|
||||
@end table
|
||||
|
||||
@item seek2any @var{integer} (@emph{input})
|
||||
Allow seeking to non-keyframes on demuxer level when supported if set to 1.
|
||||
Default is 0.
|
||||
|
||||
@item analyzeduration @var{integer} (@emph{input})
|
||||
Specify how many microseconds are analyzed to probe the input. A
|
||||
higher value will allow to detect more accurate information, but will
|
||||
increase latency. It defaults to 5,000,000 microseconds = 5 seconds.
|
||||
|
||||
@item cryptokey @var{hexadecimal string} (@emph{input})
|
||||
Set decryption key.
|
||||
|
||||
@item indexmem @var{integer} (@emph{input})
|
||||
Set max memory used for timestamp index (per stream).
|
||||
|
||||
@item rtbufsize @var{integer} (@emph{input})
|
||||
Set max memory used for buffering real-time frames.
|
||||
|
||||
@item fdebug @var{flags} (@emph{input/output})
|
||||
Print specific debug info.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item ts
|
||||
@end table
|
||||
|
||||
@item max_delay @var{integer} (@emph{input/output})
|
||||
Set maximum muxing or demuxing delay in microseconds.
|
||||
|
||||
@item fpsprobesize @var{integer} (@emph{input})
|
||||
Set number of frames used to probe fps.
|
||||
|
||||
@item audio_preload @var{integer} (@emph{output})
|
||||
Set microseconds by which audio packets should be interleaved earlier.
|
||||
|
||||
@item chunk_duration @var{integer} (@emph{output})
|
||||
Set microseconds for each chunk.
|
||||
|
||||
@item chunk_size @var{integer} (@emph{output})
|
||||
Set size in bytes for each chunk.
|
||||
|
||||
@item err_detect, f_err_detect @var{flags} (@emph{input})
|
||||
Set error detection flags. @code{f_err_detect} is deprecated and
|
||||
should be used only via the @command{ffmpeg} tool.
|
||||
|
||||
Possible values:
|
||||
@table @samp
|
||||
@item crccheck
|
||||
Verify embedded CRCs.
|
||||
@item bitstream
|
||||
Detect bitstream specification deviations.
|
||||
@item buffer
|
||||
Detect improper bitstream length.
|
||||
@item explode
|
||||
Abort decoding on minor error detection.
|
||||
@item careful
|
||||
Consider things that violate the spec and have not been seen in the
|
||||
wild as errors.
|
||||
@item compliant
|
||||
Consider all spec non compliancies as errors.
|
||||
@item aggressive
|
||||
Consider things that a sane encoder should not do as an error.
|
||||
@end table
|
||||
|
||||
@item use_wallclock_as_timestamps @var{integer} (@emph{input})
|
||||
Use wallclock as timestamps.
|
||||
|
||||
@item avoid_negative_ts @var{integer} (@emph{output})
|
||||
Shift timestamps to make them non-negative. A value of 1 enables shifting,
|
||||
a value of 0 disables it, the default value of -1 enables shifting
|
||||
when required by the target format.
|
||||
|
||||
When shifting is enabled, all output timestamps are shifted by the
|
||||
same amount. Audio, video, and subtitles desynching and relative
|
||||
timestamp differences are preserved compared to how they would have
|
||||
been without shifting.
|
||||
|
||||
Also note that this affects only leading negative timestamps, and not
|
||||
non-monotonic negative timestamps.
|
||||
|
||||
@item skip_initial_bytes @var{integer} (@emph{input})
|
||||
Set number of bytes to skip before reading header and frames if set to 1.
|
||||
Default is 0.
|
||||
|
||||
@item correct_ts_overflow @var{integer} (@emph{input})
|
||||
Correct single timestamp overflows if set to 1. Default is 1.
|
||||
|
||||
@item flush_packets @var{integer} (@emph{output})
|
||||
Flush the underlying I/O stream after each packet. Default 1 enables it, and
|
||||
has the effect of reducing the latency; 0 disables it and may slightly
|
||||
increase performance in some cases.
|
||||
@end table
|
||||
|
||||
@c man end FORMAT OPTIONS
|
||||
|
||||
@anchor{Format stream specifiers}
|
||||
@section Format stream specifiers
|
||||
|
||||
Format stream specifiers allow selection of one or more streams that
|
||||
match specific properties.
|
||||
|
||||
Possible forms of stream specifiers are:
|
||||
@table @option
|
||||
@item @var{stream_index}
|
||||
Matches the stream with this index.
|
||||
|
||||
@item @var{stream_type}[:@var{stream_index}]
|
||||
@var{stream_type} is one of following: 'v' for video, 'a' for audio,
|
||||
's' for subtitle, 'd' for data, and 't' for attachments. If
|
||||
@var{stream_index} is given, then it matches the stream number
|
||||
@var{stream_index} of this type. Otherwise, it matches all streams of
|
||||
this type.
|
||||
|
||||
@item p:@var{program_id}[:@var{stream_index}]
|
||||
If @var{stream_index} is given, then it matches the stream with number
|
||||
@var{stream_index} in the program with the id
|
||||
@var{program_id}. Otherwise, it matches all streams in the program.
|
||||
|
||||
@item #@var{stream_id}
|
||||
Matches the stream by a format-specific ID.
|
||||
@end table
|
||||
|
||||
The exact semantics of stream specifiers is defined by the
|
||||
@code{avformat_match_stream_specifier()} function declared in the
|
||||
@file{libavformat/avformat.h} header.
|
||||
|
||||
@include demuxers.texi
|
||||
@include muxers.texi
|
||||
@include metadata.texi
|
File diff suppressed because it is too large
Load Diff
@ -1,415 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Using git to develop FFmpeg
|
||||
|
||||
@titlepage
|
||||
@center @titlefont{Using git to develop FFmpeg}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Introduction
|
||||
|
||||
This document aims in giving some quick references on a set of useful git
|
||||
commands. You should always use the extensive and detailed documentation
|
||||
provided directly by git:
|
||||
|
||||
@example
|
||||
git --help
|
||||
man git
|
||||
@end example
|
||||
|
||||
shows you the available subcommands,
|
||||
|
||||
@example
|
||||
git <command> --help
|
||||
man git-<command>
|
||||
@end example
|
||||
|
||||
shows information about the subcommand <command>.
|
||||
|
||||
Additional information could be found on the
|
||||
@url{http://gitref.org, Git Reference} website
|
||||
|
||||
For more information about the Git project, visit the
|
||||
|
||||
@url{http://git-scm.com/, Git website}
|
||||
|
||||
Consult these resources whenever you have problems, they are quite exhaustive.
|
||||
|
||||
What follows now is a basic introduction to Git and some FFmpeg-specific
|
||||
guidelines to ease the contribution to the project
|
||||
|
||||
@chapter Basics Usage
|
||||
|
||||
@section Get GIT
|
||||
|
||||
You can get git from @url{http://git-scm.com/}
|
||||
Most distribution and operating system provide a package for it.
|
||||
|
||||
|
||||
@section Cloning the source tree
|
||||
|
||||
@example
|
||||
git clone git://source.ffmpeg.org/ffmpeg <target>
|
||||
@end example
|
||||
|
||||
This will put the FFmpeg sources into the directory @var{<target>}.
|
||||
|
||||
@example
|
||||
git clone git@@source.ffmpeg.org:ffmpeg <target>
|
||||
@end example
|
||||
|
||||
This will put the FFmpeg sources into the directory @var{<target>} and let
|
||||
you push back your changes to the remote repository.
|
||||
|
||||
Make sure that you do not have Windows line endings in your checkouts,
|
||||
otherwise you may experience spurious compilation failures. One way to
|
||||
achieve this is to run
|
||||
|
||||
@example
|
||||
git config --global core.autocrlf false
|
||||
@end example
|
||||
|
||||
|
||||
@section Updating the source tree to the latest revision
|
||||
|
||||
@example
|
||||
git pull (--rebase)
|
||||
@end example
|
||||
|
||||
pulls in the latest changes from the tracked branch. The tracked branch
|
||||
can be remote. By default the master branch tracks the branch master in
|
||||
the remote origin.
|
||||
|
||||
@float IMPORTANT
|
||||
@command{--rebase} (see below) is recommended.
|
||||
@end float
|
||||
|
||||
@section Rebasing your local branches
|
||||
|
||||
@example
|
||||
git pull --rebase
|
||||
@end example
|
||||
|
||||
fetches the changes from the main repository and replays your local commits
|
||||
over it. This is required to keep all your local changes at the top of
|
||||
FFmpeg's master tree. The master tree will reject pushes with merge commits.
|
||||
|
||||
|
||||
@section Adding/removing files/directories
|
||||
|
||||
@example
|
||||
git add [-A] <filename/dirname>
|
||||
git rm [-r] <filename/dirname>
|
||||
@end example
|
||||
|
||||
GIT needs to get notified of all changes you make to your working
|
||||
directory that makes files appear or disappear.
|
||||
Line moves across files are automatically tracked.
|
||||
|
||||
|
||||
@section Showing modifications
|
||||
|
||||
@example
|
||||
git diff <filename(s)>
|
||||
@end example
|
||||
|
||||
will show all local modifications in your working directory as unified diff.
|
||||
|
||||
|
||||
@section Inspecting the changelog
|
||||
|
||||
@example
|
||||
git log <filename(s)>
|
||||
@end example
|
||||
|
||||
You may also use the graphical tools like gitview or gitk or the web
|
||||
interface available at http://source.ffmpeg.org/
|
||||
|
||||
@section Checking source tree status
|
||||
|
||||
@example
|
||||
git status
|
||||
@end example
|
||||
|
||||
detects all the changes you made and lists what actions will be taken in case
|
||||
of a commit (additions, modifications, deletions, etc.).
|
||||
|
||||
|
||||
@section Committing
|
||||
|
||||
@example
|
||||
git diff --check
|
||||
@end example
|
||||
|
||||
to double check your changes before committing them to avoid trouble later
|
||||
on. All experienced developers do this on each and every commit, no matter
|
||||
how small.
|
||||
Every one of them has been saved from looking like a fool by this many times.
|
||||
It's very easy for stray debug output or cosmetic modifications to slip in,
|
||||
please avoid problems through this extra level of scrutiny.
|
||||
|
||||
For cosmetics-only commits you should get (almost) empty output from
|
||||
|
||||
@example
|
||||
git diff -w -b <filename(s)>
|
||||
@end example
|
||||
|
||||
Also check the output of
|
||||
|
||||
@example
|
||||
git status
|
||||
@end example
|
||||
|
||||
to make sure you don't have untracked files or deletions.
|
||||
|
||||
@example
|
||||
git add [-i|-p|-A] <filenames/dirnames>
|
||||
@end example
|
||||
|
||||
Make sure you have told git your name and email address
|
||||
|
||||
@example
|
||||
git config --global user.name "My Name"
|
||||
git config --global user.email my@@email.invalid
|
||||
@end example
|
||||
|
||||
Use @var{--global} to set the global configuration for all your git checkouts.
|
||||
|
||||
Git will select the changes to the files for commit. Optionally you can use
|
||||
the interactive or the patch mode to select hunk by hunk what should be
|
||||
added to the commit.
|
||||
|
||||
|
||||
@example
|
||||
git commit
|
||||
@end example
|
||||
|
||||
Git will commit the selected changes to your current local branch.
|
||||
|
||||
You will be prompted for a log message in an editor, which is either
|
||||
set in your personal configuration file through
|
||||
|
||||
@example
|
||||
git config --global core.editor
|
||||
@end example
|
||||
|
||||
or set by one of the following environment variables:
|
||||
@var{GIT_EDITOR}, @var{VISUAL} or @var{EDITOR}.
|
||||
|
||||
Log messages should be concise but descriptive. Explain why you made a change,
|
||||
what you did will be obvious from the changes themselves most of the time.
|
||||
Saying just "bug fix" or "10l" is bad. Remember that people of varying skill
|
||||
levels look at and educate themselves while reading through your code. Don't
|
||||
include filenames in log messages, Git provides that information.
|
||||
|
||||
Possibly make the commit message have a terse, descriptive first line, an
|
||||
empty line and then a full description. The first line will be used to name
|
||||
the patch by git format-patch.
|
||||
|
||||
@section Preparing a patchset
|
||||
|
||||
@example
|
||||
git format-patch <commit> [-o directory]
|
||||
@end example
|
||||
|
||||
will generate a set of patches for each commit between @var{<commit>} and
|
||||
current @var{HEAD}. E.g.
|
||||
|
||||
@example
|
||||
git format-patch origin/master
|
||||
@end example
|
||||
|
||||
will generate patches for all commits on current branch which are not
|
||||
present in upstream.
|
||||
A useful shortcut is also
|
||||
|
||||
@example
|
||||
git format-patch -n
|
||||
@end example
|
||||
|
||||
which will generate patches from last @var{n} commits.
|
||||
By default the patches are created in the current directory.
|
||||
|
||||
@section Sending patches for review
|
||||
|
||||
@example
|
||||
git send-email <commit list|directory>
|
||||
@end example
|
||||
|
||||
will send the patches created by @command{git format-patch} or directly
|
||||
generates them. All the email fields can be configured in the global/local
|
||||
configuration or overridden by command line.
|
||||
Note that this tool must often be installed separately (e.g. @var{git-email}
|
||||
package on Debian-based distros).
|
||||
|
||||
|
||||
@section Renaming/moving/copying files or contents of files
|
||||
|
||||
Git automatically tracks such changes, making those normal commits.
|
||||
|
||||
@example
|
||||
mv/cp path/file otherpath/otherfile
|
||||
git add [-A] .
|
||||
git commit
|
||||
@end example
|
||||
|
||||
|
||||
@chapter Git configuration
|
||||
|
||||
In order to simplify a few workflows, it is advisable to configure both
|
||||
your personal Git installation and your local FFmpeg repository.
|
||||
|
||||
@section Personal Git installation
|
||||
|
||||
Add the following to your @file{~/.gitconfig} to help @command{git send-email}
|
||||
and @command{git format-patch} detect renames:
|
||||
|
||||
@example
|
||||
[diff]
|
||||
renames = copy
|
||||
@end example
|
||||
|
||||
@section Repository configuration
|
||||
|
||||
In order to have @command{git send-email} automatically send patches
|
||||
to the ffmpeg-devel mailing list, add the following stanza
|
||||
to @file{/path/to/ffmpeg/repository/.git/config}:
|
||||
|
||||
@example
|
||||
[sendemail]
|
||||
to = ffmpeg-devel@@ffmpeg.org
|
||||
@end example
|
||||
|
||||
@chapter FFmpeg specific
|
||||
|
||||
@section Reverting broken commits
|
||||
|
||||
@example
|
||||
git reset <commit>
|
||||
@end example
|
||||
|
||||
@command{git reset} will uncommit the changes till @var{<commit>} rewriting
|
||||
the current branch history.
|
||||
|
||||
@example
|
||||
git commit --amend
|
||||
@end example
|
||||
|
||||
allows to amend the last commit details quickly.
|
||||
|
||||
@example
|
||||
git rebase -i origin/master
|
||||
@end example
|
||||
|
||||
will replay local commits over the main repository allowing to edit, merge
|
||||
or remove some of them in the process.
|
||||
|
||||
@float NOTE
|
||||
@command{git reset}, @command{git commit --amend} and @command{git rebase}
|
||||
rewrite history, so you should use them ONLY on your local or topic branches.
|
||||
The main repository will reject those changes.
|
||||
@end float
|
||||
|
||||
@example
|
||||
git revert <commit>
|
||||
@end example
|
||||
|
||||
@command{git revert} will generate a revert commit. This will not make the
|
||||
faulty commit disappear from the history.
|
||||
|
||||
@section Pushing changes to remote trees
|
||||
|
||||
@example
|
||||
git push
|
||||
@end example
|
||||
|
||||
Will push the changes to the default remote (@var{origin}).
|
||||
Git will prevent you from pushing changes if the local and remote trees are
|
||||
out of sync. Refer to and to sync the local tree.
|
||||
|
||||
@example
|
||||
git remote add <name> <url>
|
||||
@end example
|
||||
|
||||
Will add additional remote with a name reference, it is useful if you want
|
||||
to push your local branch for review on a remote host.
|
||||
|
||||
@example
|
||||
git push <remote> <refspec>
|
||||
@end example
|
||||
|
||||
Will push the changes to the @var{<remote>} repository.
|
||||
Omitting @var{<refspec>} makes @command{git push} update all the remote
|
||||
branches matching the local ones.
|
||||
|
||||
@section Finding a specific svn revision
|
||||
|
||||
Since version 1.7.1 git supports @var{:/foo} syntax for specifying commits
|
||||
based on a regular expression. see man gitrevisions
|
||||
|
||||
@example
|
||||
git show :/'as revision 23456'
|
||||
@end example
|
||||
|
||||
will show the svn changeset @var{r23456}. With older git versions searching in
|
||||
the @command{git log} output is the easiest option (especially if a pager with
|
||||
search capabilities is used).
|
||||
This commit can be checked out with
|
||||
|
||||
@example
|
||||
git checkout -b svn_23456 :/'as revision 23456'
|
||||
@end example
|
||||
|
||||
or for git < 1.7.1 with
|
||||
|
||||
@example
|
||||
git checkout -b svn_23456 $SHA1
|
||||
@end example
|
||||
|
||||
where @var{$SHA1} is the commit hash from the @command{git log} output.
|
||||
|
||||
|
||||
@chapter pre-push checklist
|
||||
|
||||
Once you have a set of commits that you feel are ready for pushing,
|
||||
work through the following checklist to doublecheck everything is in
|
||||
proper order. This list tries to be exhaustive. In case you are just
|
||||
pushing a typo in a comment, some of the steps may be unnecessary.
|
||||
Apply your common sense, but if in doubt, err on the side of caution.
|
||||
|
||||
First, make sure that the commits and branches you are going to push
|
||||
match what you want pushed and that nothing is missing, extraneous or
|
||||
wrong. You can see what will be pushed by running the git push command
|
||||
with --dry-run first. And then inspecting the commits listed with
|
||||
@command{git log -p 1234567..987654}. The @command{git status} command
|
||||
may help in finding local changes that have been forgotten to be added.
|
||||
|
||||
Next let the code pass through a full run of our testsuite.
|
||||
|
||||
@itemize
|
||||
@item @command{make distclean}
|
||||
@item @command{/path/to/ffmpeg/configure}
|
||||
@item @command{make check}
|
||||
@item if fate fails due to missing samples run @command{make fate-rsync} and retry
|
||||
@end itemize
|
||||
|
||||
Make sure all your changes have been checked before pushing them, the
|
||||
testsuite only checks against regressions and that only to some extend. It does
|
||||
obviously not check newly added features/code to be working unless you have
|
||||
added a test for that (which is recommended).
|
||||
|
||||
Also note that every single commit should pass the test suite, not just
|
||||
the result of a series of patches.
|
||||
|
||||
Once everything passed, push the changes to your public ffmpeg clone and post a
|
||||
merge request to ffmpeg-devel. You can also push them directly but this is not
|
||||
recommended.
|
||||
|
||||
@chapter Server Issues
|
||||
|
||||
Contact the project admins @email{root@@ffmpeg.org} if you have technical
|
||||
problems with the GIT server.
|
@ -1,273 +0,0 @@
|
||||
|
||||
About Git write access:
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Before everything else, you should know how to use GIT properly.
|
||||
Luckily Git comes with excellent documentation.
|
||||
|
||||
git --help
|
||||
man git
|
||||
|
||||
shows you the available subcommands,
|
||||
|
||||
git <command> --help
|
||||
man git-<command>
|
||||
|
||||
shows information about the subcommand <command>.
|
||||
|
||||
The most comprehensive manual is the website Git Reference
|
||||
|
||||
http://gitref.org/
|
||||
|
||||
For more information about the Git project, visit
|
||||
|
||||
http://git-scm.com/
|
||||
|
||||
Consult these resources whenever you have problems, they are quite exhaustive.
|
||||
|
||||
You do not need a special username or password.
|
||||
All you need is to provide a ssh public key to the Git server admin.
|
||||
|
||||
What follows now is a basic introduction to Git and some FFmpeg-specific
|
||||
guidelines. Read it at least once, if you are granted commit privileges to the
|
||||
FFmpeg project you are expected to be familiar with these rules.
|
||||
|
||||
|
||||
|
||||
I. BASICS:
|
||||
==========
|
||||
|
||||
0. Get GIT:
|
||||
|
||||
Most distributions have a git package, if not
|
||||
You can get git from http://git-scm.com/
|
||||
|
||||
|
||||
1. Cloning the source tree:
|
||||
|
||||
git clone git://source.ffmpeg.org/ffmpeg <target>
|
||||
|
||||
This will put the FFmpeg sources into the directory <target>.
|
||||
|
||||
git clone git@source.ffmpeg.org:ffmpeg <target>
|
||||
|
||||
This will put the FFmpeg sources into the directory <target> and let
|
||||
you push back your changes to the remote repository.
|
||||
|
||||
|
||||
2. Updating the source tree to the latest revision:
|
||||
|
||||
git pull (--ff-only)
|
||||
|
||||
pulls in the latest changes from the tracked branch. The tracked branch
|
||||
can be remote. By default the master branch tracks the branch master in
|
||||
the remote origin.
|
||||
Caveat: Since merge commits are forbidden at least for the initial
|
||||
months of git --ff-only or --rebase (see below) are recommended.
|
||||
--ff-only will fail and not create merge commits if your branch
|
||||
has diverged (has a different history) from the tracked branch.
|
||||
|
||||
2.a Rebasing your local branches:
|
||||
|
||||
git pull --rebase
|
||||
|
||||
fetches the changes from the main repository and replays your local commits
|
||||
over it. This is required to keep all your local changes at the top of
|
||||
FFmpeg's master tree. The master tree will reject pushes with merge commits.
|
||||
|
||||
|
||||
3. Adding/removing files/directories:
|
||||
|
||||
git add [-A] <filename/dirname>
|
||||
git rm [-r] <filename/dirname>
|
||||
|
||||
GIT needs to get notified of all changes you make to your working
|
||||
directory that makes files appear or disappear.
|
||||
Line moves across files are automatically tracked.
|
||||
|
||||
|
||||
4. Showing modifications:
|
||||
|
||||
git diff <filename(s)>
|
||||
|
||||
will show all local modifications in your working directory as unified diff.
|
||||
|
||||
|
||||
5. Inspecting the changelog:
|
||||
|
||||
git log <filename(s)>
|
||||
|
||||
You may also use the graphical tools like gitview or gitk or the web
|
||||
interface available at http://source.ffmpeg.org
|
||||
|
||||
6. Checking source tree status:
|
||||
|
||||
git status
|
||||
|
||||
detects all the changes you made and lists what actions will be taken in case
|
||||
of a commit (additions, modifications, deletions, etc.).
|
||||
|
||||
|
||||
7. Committing:
|
||||
|
||||
git diff --check
|
||||
|
||||
to double check your changes before committing them to avoid trouble later
|
||||
on. All experienced developers do this on each and every commit, no matter
|
||||
how small.
|
||||
Every one of them has been saved from looking like a fool by this many times.
|
||||
It's very easy for stray debug output or cosmetic modifications to slip in,
|
||||
please avoid problems through this extra level of scrutiny.
|
||||
|
||||
For cosmetics-only commits you should get (almost) empty output from
|
||||
|
||||
git diff -w -b <filename(s)>
|
||||
|
||||
Also check the output of
|
||||
|
||||
git status
|
||||
|
||||
to make sure you don't have untracked files or deletions.
|
||||
|
||||
git add [-i|-p|-A] <filenames/dirnames>
|
||||
|
||||
Make sure you have told git your name and email address, e.g. by running
|
||||
git config --global user.name "My Name"
|
||||
git config --global user.email my@email.invalid
|
||||
(--global to set the global configuration for all your git checkouts).
|
||||
|
||||
Git will select the changes to the files for commit. Optionally you can use
|
||||
the interactive or the patch mode to select hunk by hunk what should be
|
||||
added to the commit.
|
||||
|
||||
git commit
|
||||
|
||||
Git will commit the selected changes to your current local branch.
|
||||
|
||||
You will be prompted for a log message in an editor, which is either
|
||||
set in your personal configuration file through
|
||||
|
||||
git config core.editor
|
||||
|
||||
or set by one of the following environment variables:
|
||||
GIT_EDITOR, VISUAL or EDITOR.
|
||||
|
||||
Log messages should be concise but descriptive. Explain why you made a change,
|
||||
what you did will be obvious from the changes themselves most of the time.
|
||||
Saying just "bug fix" or "10l" is bad. Remember that people of varying skill
|
||||
levels look at and educate themselves while reading through your code. Don't
|
||||
include filenames in log messages, Git provides that information.
|
||||
|
||||
Possibly make the commit message have a terse, descriptive first line, an
|
||||
empty line and then a full description. The first line will be used to name
|
||||
the patch by git format-patch.
|
||||
|
||||
|
||||
8. Renaming/moving/copying files or contents of files:
|
||||
|
||||
Git automatically tracks such changes, making those normal commits.
|
||||
|
||||
mv/cp path/file otherpath/otherfile
|
||||
|
||||
git add [-A] .
|
||||
|
||||
git commit
|
||||
|
||||
Do not move, rename or copy files of which you are not the maintainer without
|
||||
discussing it on the mailing list first!
|
||||
|
||||
9. Reverting broken commits
|
||||
|
||||
git revert <commit>
|
||||
|
||||
git revert will generate a revert commit. This will not make the faulty
|
||||
commit disappear from the history.
|
||||
|
||||
git reset <commit>
|
||||
|
||||
git reset will uncommit the changes till <commit> rewriting the current
|
||||
branch history.
|
||||
|
||||
git commit --amend
|
||||
|
||||
allows to amend the last commit details quickly.
|
||||
|
||||
git rebase -i origin/master
|
||||
|
||||
will replay local commits over the main repository allowing to edit,
|
||||
merge or remove some of them in the process.
|
||||
|
||||
Note that the reset, commit --amend and rebase rewrite history, so you
|
||||
should use them ONLY on your local or topic branches.
|
||||
|
||||
The main repository will reject those changes.
|
||||
|
||||
10. Preparing a patchset.
|
||||
|
||||
git format-patch <commit> [-o directory]
|
||||
|
||||
will generate a set of patches for each commit between <commit> and
|
||||
current HEAD. E.g.
|
||||
|
||||
git format-patch origin/master
|
||||
|
||||
will generate patches for all commits on current branch which are not
|
||||
present in upstream.
|
||||
A useful shortcut is also
|
||||
|
||||
git format-patch -n
|
||||
|
||||
which will generate patches from last n commits.
|
||||
By default the patches are created in the current directory.
|
||||
|
||||
11. Sending patches for review
|
||||
|
||||
git send-email <commit list|directory>
|
||||
|
||||
will send the patches created by git format-patch or directly generates
|
||||
them. All the email fields can be configured in the global/local
|
||||
configuration or overridden by command line.
|
||||
Note that this tool must often be installed separately (e.g. git-email
|
||||
package on Debian-based distros).
|
||||
|
||||
12. Pushing changes to remote trees
|
||||
|
||||
git push
|
||||
|
||||
Will push the changes to the default remote (origin).
|
||||
Git will prevent you from pushing changes if the local and remote trees are
|
||||
out of sync. Refer to 2 and 2.a to sync the local tree.
|
||||
|
||||
git remote add <name> <url>
|
||||
|
||||
Will add additional remote with a name reference, it is useful if you want
|
||||
to push your local branch for review on a remote host.
|
||||
|
||||
git push <remote> <refspec>
|
||||
|
||||
Will push the changes to the remote repository. Omitting refspec makes git
|
||||
push update all the remote branches matching the local ones.
|
||||
|
||||
13. Finding a specific svn revision
|
||||
|
||||
Since version 1.7.1 git supports ':/foo' syntax for specifying commits
|
||||
based on a regular expression. see man gitrevisions
|
||||
|
||||
git show :/'as revision 23456'
|
||||
|
||||
will show the svn changeset r23456. With older git versions searching in
|
||||
the git log output is the easiest option (especially if a pager with
|
||||
search capabilities is used).
|
||||
This commit can be checked out with
|
||||
|
||||
git checkout -b svn_23456 :/'as revision 23456'
|
||||
|
||||
or for git < 1.7.1 with
|
||||
|
||||
git checkout -b svn_23456 $SHA1
|
||||
|
||||
where $SHA1 is the commit SHA1 from the 'git log' output.
|
||||
|
||||
|
||||
Contact the project admins <root at ffmpeg dot org> if you have technical
|
||||
problems with the GIT server.
|
@ -1,763 +0,0 @@
|
||||
@chapter Input Devices
|
||||
@c man begin INPUT DEVICES
|
||||
|
||||
Input devices are configured elements in FFmpeg which allow to access
|
||||
the data coming from a multimedia device attached to your system.
|
||||
|
||||
When you configure your FFmpeg build, all the supported input devices
|
||||
are enabled by default. You can list all available ones using the
|
||||
configure option "--list-indevs".
|
||||
|
||||
You can disable all the input devices using the configure option
|
||||
"--disable-indevs", and selectively enable an input device using the
|
||||
option "--enable-indev=@var{INDEV}", or you can disable a particular
|
||||
input device using the option "--disable-indev=@var{INDEV}".
|
||||
|
||||
The option "-formats" of the ff* tools will display the list of
|
||||
supported input devices (amongst the demuxers).
|
||||
|
||||
A description of the currently available input devices follows.
|
||||
|
||||
@section alsa
|
||||
|
||||
ALSA (Advanced Linux Sound Architecture) input device.
|
||||
|
||||
To enable this input device during configuration you need libasound
|
||||
installed on your system.
|
||||
|
||||
This device allows capturing from an ALSA device. The name of the
|
||||
device to capture has to be an ALSA card identifier.
|
||||
|
||||
An ALSA identifier has the syntax:
|
||||
@example
|
||||
hw:@var{CARD}[,@var{DEV}[,@var{SUBDEV}]]
|
||||
@end example
|
||||
|
||||
where the @var{DEV} and @var{SUBDEV} components are optional.
|
||||
|
||||
The three arguments (in order: @var{CARD},@var{DEV},@var{SUBDEV})
|
||||
specify card number or identifier, device number and subdevice number
|
||||
(-1 means any).
|
||||
|
||||
To see the list of cards currently recognized by your system check the
|
||||
files @file{/proc/asound/cards} and @file{/proc/asound/devices}.
|
||||
|
||||
For example to capture with @command{ffmpeg} from an ALSA device with
|
||||
card id 0, you may run the command:
|
||||
@example
|
||||
ffmpeg -f alsa -i hw:0 alsaout.wav
|
||||
@end example
|
||||
|
||||
For more information see:
|
||||
@url{http://www.alsa-project.org/alsa-doc/alsa-lib/pcm.html}
|
||||
|
||||
@section bktr
|
||||
|
||||
BSD video input device.
|
||||
|
||||
@section dshow
|
||||
|
||||
Windows DirectShow input device.
|
||||
|
||||
DirectShow support is enabled when FFmpeg is built with the mingw-w64 project.
|
||||
Currently only audio and video devices are supported.
|
||||
|
||||
Multiple devices may be opened as separate inputs, but they may also be
|
||||
opened on the same input, which should improve synchronism between them.
|
||||
|
||||
The input name should be in the format:
|
||||
|
||||
@example
|
||||
@var{TYPE}=@var{NAME}[:@var{TYPE}=@var{NAME}]
|
||||
@end example
|
||||
|
||||
where @var{TYPE} can be either @var{audio} or @var{video},
|
||||
and @var{NAME} is the device's name.
|
||||
|
||||
@subsection Options
|
||||
|
||||
If no options are specified, the device's defaults are used.
|
||||
If the device does not support the requested options, it will
|
||||
fail to open.
|
||||
|
||||
@table @option
|
||||
|
||||
@item video_size
|
||||
Set the video size in the captured video.
|
||||
|
||||
@item framerate
|
||||
Set the frame rate in the captured video.
|
||||
|
||||
@item sample_rate
|
||||
Set the sample rate (in Hz) of the captured audio.
|
||||
|
||||
@item sample_size
|
||||
Set the sample size (in bits) of the captured audio.
|
||||
|
||||
@item channels
|
||||
Set the number of channels in the captured audio.
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
|
||||
@item list_options
|
||||
If set to @option{true}, print a list of selected device's options
|
||||
and exit.
|
||||
|
||||
@item video_device_number
|
||||
Set video device number for devices with same name (starts at 0,
|
||||
defaults to 0).
|
||||
|
||||
@item audio_device_number
|
||||
Set audio device number for devices with same name (starts at 0,
|
||||
defaults to 0).
|
||||
|
||||
@item pixel_format
|
||||
Select pixel format to be used by DirectShow. This may only be set when
|
||||
the video codec is not set or set to rawvideo.
|
||||
|
||||
@item audio_buffer_size
|
||||
Set audio device buffer size in milliseconds (which can directly
|
||||
impact latency, depending on the device).
|
||||
Defaults to using the audio device's
|
||||
default buffer size (typically some multiple of 500ms).
|
||||
Setting this value too low can degrade performance.
|
||||
See also
|
||||
@url{http://msdn.microsoft.com/en-us/library/windows/desktop/dd377582(v=vs.85).aspx}
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Print the list of DirectShow supported devices and exit:
|
||||
@example
|
||||
$ ffmpeg -list_devices true -f dshow -i dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
Open video device @var{Camera}:
|
||||
@example
|
||||
$ ffmpeg -f dshow -i video="Camera"
|
||||
@end example
|
||||
|
||||
@item
|
||||
Open second video device with name @var{Camera}:
|
||||
@example
|
||||
$ ffmpeg -f dshow -video_device_number 1 -i video="Camera"
|
||||
@end example
|
||||
|
||||
@item
|
||||
Open video device @var{Camera} and audio device @var{Microphone}:
|
||||
@example
|
||||
$ ffmpeg -f dshow -i video="Camera":audio="Microphone"
|
||||
@end example
|
||||
|
||||
@item
|
||||
Print the list of supported options in selected device and exit:
|
||||
@example
|
||||
$ ffmpeg -list_options true -f dshow -i video="Camera"
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section dv1394
|
||||
|
||||
Linux DV 1394 input device.
|
||||
|
||||
@section fbdev
|
||||
|
||||
Linux framebuffer input device.
|
||||
|
||||
The Linux framebuffer is a graphic hardware-independent abstraction
|
||||
layer to show graphics on a computer monitor, typically on the
|
||||
console. It is accessed through a file device node, usually
|
||||
@file{/dev/fb0}.
|
||||
|
||||
For more detailed information read the file
|
||||
Documentation/fb/framebuffer.txt included in the Linux source tree.
|
||||
|
||||
To record from the framebuffer device @file{/dev/fb0} with
|
||||
@command{ffmpeg}:
|
||||
@example
|
||||
ffmpeg -f fbdev -r 10 -i /dev/fb0 out.avi
|
||||
@end example
|
||||
|
||||
You can take a single screenshot image with the command:
|
||||
@example
|
||||
ffmpeg -f fbdev -frames:v 1 -r 1 -i /dev/fb0 screenshot.jpeg
|
||||
@end example
|
||||
|
||||
See also @url{http://linux-fbdev.sourceforge.net/}, and fbset(1).
|
||||
|
||||
@section iec61883
|
||||
|
||||
FireWire DV/HDV input device using libiec61883.
|
||||
|
||||
To enable this input device, you need libiec61883, libraw1394 and
|
||||
libavc1394 installed on your system. Use the configure option
|
||||
@code{--enable-libiec61883} to compile with the device enabled.
|
||||
|
||||
The iec61883 capture device supports capturing from a video device
|
||||
connected via IEEE1394 (FireWire), using libiec61883 and the new Linux
|
||||
FireWire stack (juju). This is the default DV/HDV input method in Linux
|
||||
Kernel 2.6.37 and later, since the old FireWire stack was removed.
|
||||
|
||||
Specify the FireWire port to be used as input file, or "auto"
|
||||
to choose the first port connected.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item dvtype
|
||||
Override autodetection of DV/HDV. This should only be used if auto
|
||||
detection does not work, or if usage of a different device type
|
||||
should be prohibited. Treating a DV device as HDV (or vice versa) will
|
||||
not work and result in undefined behavior.
|
||||
The values @option{auto}, @option{dv} and @option{hdv} are supported.
|
||||
|
||||
@item dvbuffer
|
||||
Set maxiumum size of buffer for incoming data, in frames. For DV, this
|
||||
is an exact value. For HDV, it is not frame exact, since HDV does
|
||||
not have a fixed frame size.
|
||||
|
||||
@item dvguid
|
||||
Select the capture device by specifying it's GUID. Capturing will only
|
||||
be performed from the specified device and fails if no device with the
|
||||
given GUID is found. This is useful to select the input if multiple
|
||||
devices are connected at the same time.
|
||||
Look at /sys/bus/firewire/devices to find out the GUIDs.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
|
||||
@item
|
||||
Grab and show the input of a FireWire DV/HDV device.
|
||||
@example
|
||||
ffplay -f iec61883 -i auto
|
||||
@end example
|
||||
|
||||
@item
|
||||
Grab and record the input of a FireWire DV/HDV device,
|
||||
using a packet buffer of 100000 packets if the source is HDV.
|
||||
@example
|
||||
ffmpeg -f iec61883 -i auto -hdvbuffer 100000 out.mpg
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section jack
|
||||
|
||||
JACK input device.
|
||||
|
||||
To enable this input device during configuration you need libjack
|
||||
installed on your system.
|
||||
|
||||
A JACK input device creates one or more JACK writable clients, one for
|
||||
each audio channel, with name @var{client_name}:input_@var{N}, where
|
||||
@var{client_name} is the name provided by the application, and @var{N}
|
||||
is a number which identifies the channel.
|
||||
Each writable client will send the acquired data to the FFmpeg input
|
||||
device.
|
||||
|
||||
Once you have created one or more JACK readable clients, you need to
|
||||
connect them to one or more JACK writable clients.
|
||||
|
||||
To connect or disconnect JACK clients you can use the @command{jack_connect}
|
||||
and @command{jack_disconnect} programs, or do it through a graphical interface,
|
||||
for example with @command{qjackctl}.
|
||||
|
||||
To list the JACK clients and their properties you can invoke the command
|
||||
@command{jack_lsp}.
|
||||
|
||||
Follows an example which shows how to capture a JACK readable client
|
||||
with @command{ffmpeg}.
|
||||
@example
|
||||
# Create a JACK writable client with name "ffmpeg".
|
||||
$ ffmpeg -f jack -i ffmpeg -y out.wav
|
||||
|
||||
# Start the sample jack_metro readable client.
|
||||
$ jack_metro -b 120 -d 0.2 -f 4000
|
||||
|
||||
# List the current JACK clients.
|
||||
$ jack_lsp -c
|
||||
system:capture_1
|
||||
system:capture_2
|
||||
system:playback_1
|
||||
system:playback_2
|
||||
ffmpeg:input_1
|
||||
metro:120_bpm
|
||||
|
||||
# Connect metro to the ffmpeg writable client.
|
||||
$ jack_connect metro:120_bpm ffmpeg:input_1
|
||||
@end example
|
||||
|
||||
For more information read:
|
||||
@url{http://jackaudio.org/}
|
||||
|
||||
@section lavfi
|
||||
|
||||
Libavfilter input virtual device.
|
||||
|
||||
This input device reads data from the open output pads of a libavfilter
|
||||
filtergraph.
|
||||
|
||||
For each filtergraph open output, the input device will create a
|
||||
corresponding stream which is mapped to the generated output. Currently
|
||||
only video data is supported. The filtergraph is specified through the
|
||||
option @option{graph}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item graph
|
||||
Specify the filtergraph to use as input. Each video open output must be
|
||||
labelled by a unique string of the form "out@var{N}", where @var{N} is a
|
||||
number starting from 0 corresponding to the mapped input stream
|
||||
generated by the device.
|
||||
The first unlabelled output is automatically assigned to the "out0"
|
||||
label, but all the others need to be specified explicitly.
|
||||
|
||||
If not specified defaults to the filename specified for the input
|
||||
device.
|
||||
|
||||
@item graph_file
|
||||
Set the filename of the filtergraph to be read and sent to the other
|
||||
filters. Syntax of the filtergraph is the same as the one specified by
|
||||
the option @var{graph}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
@itemize
|
||||
@item
|
||||
Create a color video stream and play it back with @command{ffplay}:
|
||||
@example
|
||||
ffplay -f lavfi -graph "color=c=pink [out0]" dummy
|
||||
@end example
|
||||
|
||||
@item
|
||||
As the previous example, but use filename for specifying the graph
|
||||
description, and omit the "out0" label:
|
||||
@example
|
||||
ffplay -f lavfi color=c=pink
|
||||
@end example
|
||||
|
||||
@item
|
||||
Create three different video test filtered sources and play them:
|
||||
@example
|
||||
ffplay -f lavfi -graph "testsrc [out0]; testsrc,hflip [out1]; testsrc,negate [out2]" test3
|
||||
@end example
|
||||
|
||||
@item
|
||||
Read an audio stream from a file using the amovie source and play it
|
||||
back with @command{ffplay}:
|
||||
@example
|
||||
ffplay -f lavfi "amovie=test.wav"
|
||||
@end example
|
||||
|
||||
@item
|
||||
Read an audio stream and a video stream and play it back with
|
||||
@command{ffplay}:
|
||||
@example
|
||||
ffplay -f lavfi "movie=test.avi[out0];amovie=test.wav[out1]"
|
||||
@end example
|
||||
|
||||
@end itemize
|
||||
|
||||
@section libdc1394
|
||||
|
||||
IIDC1394 input device, based on libdc1394 and libraw1394.
|
||||
|
||||
@section openal
|
||||
|
||||
The OpenAL input device provides audio capture on all systems with a
|
||||
working OpenAL 1.1 implementation.
|
||||
|
||||
To enable this input device during configuration, you need OpenAL
|
||||
headers and libraries installed on your system, and need to configure
|
||||
FFmpeg with @code{--enable-openal}.
|
||||
|
||||
OpenAL headers and libraries should be provided as part of your OpenAL
|
||||
implementation, or as an additional download (an SDK). Depending on your
|
||||
installation you may need to specify additional flags via the
|
||||
@code{--extra-cflags} and @code{--extra-ldflags} for allowing the build
|
||||
system to locate the OpenAL headers and libraries.
|
||||
|
||||
An incomplete list of OpenAL implementations follows:
|
||||
|
||||
@table @strong
|
||||
@item Creative
|
||||
The official Windows implementation, providing hardware acceleration
|
||||
with supported devices and software fallback.
|
||||
See @url{http://openal.org/}.
|
||||
@item OpenAL Soft
|
||||
Portable, open source (LGPL) software implementation. Includes
|
||||
backends for the most common sound APIs on the Windows, Linux,
|
||||
Solaris, and BSD operating systems.
|
||||
See @url{http://kcat.strangesoft.net/openal.html}.
|
||||
@item Apple
|
||||
OpenAL is part of Core Audio, the official Mac OS X Audio interface.
|
||||
See @url{http://developer.apple.com/technologies/mac/audio-and-video.html}
|
||||
@end table
|
||||
|
||||
This device allows to capture from an audio input device handled
|
||||
through OpenAL.
|
||||
|
||||
You need to specify the name of the device to capture in the provided
|
||||
filename. If the empty string is provided, the device will
|
||||
automatically select the default device. You can get the list of the
|
||||
supported devices by using the option @var{list_devices}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
|
||||
@item channels
|
||||
Set the number of channels in the captured audio. Only the values
|
||||
@option{1} (monaural) and @option{2} (stereo) are currently supported.
|
||||
Defaults to @option{2}.
|
||||
|
||||
@item sample_size
|
||||
Set the sample size (in bits) of the captured audio. Only the values
|
||||
@option{8} and @option{16} are currently supported. Defaults to
|
||||
@option{16}.
|
||||
|
||||
@item sample_rate
|
||||
Set the sample rate (in Hz) of the captured audio.
|
||||
Defaults to @option{44.1k}.
|
||||
|
||||
@item list_devices
|
||||
If set to @option{true}, print a list of devices and exit.
|
||||
Defaults to @option{false}.
|
||||
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
|
||||
Print the list of OpenAL supported devices and exit:
|
||||
@example
|
||||
$ ffmpeg -list_devices true -f openal -i dummy out.ogg
|
||||
@end example
|
||||
|
||||
Capture from the OpenAL device @file{DR-BT101 via PulseAudio}:
|
||||
@example
|
||||
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out.ogg
|
||||
@end example
|
||||
|
||||
Capture from the default device (note the empty string '' as filename):
|
||||
@example
|
||||
$ ffmpeg -f openal -i '' out.ogg
|
||||
@end example
|
||||
|
||||
Capture from two devices simultaneously, writing to two different files,
|
||||
within the same @command{ffmpeg} command:
|
||||
@example
|
||||
$ ffmpeg -f openal -i 'DR-BT101 via PulseAudio' out1.ogg -f openal -i 'ALSA Default' out2.ogg
|
||||
@end example
|
||||
Note: not all OpenAL implementations support multiple simultaneous capture -
|
||||
try the latest OpenAL Soft if the above does not work.
|
||||
|
||||
@section oss
|
||||
|
||||
Open Sound System input device.
|
||||
|
||||
The filename to provide to the input device is the device node
|
||||
representing the OSS input device, and is usually set to
|
||||
@file{/dev/dsp}.
|
||||
|
||||
For example to grab from @file{/dev/dsp} using @command{ffmpeg} use the
|
||||
command:
|
||||
@example
|
||||
ffmpeg -f oss -i /dev/dsp /tmp/oss.wav
|
||||
@end example
|
||||
|
||||
For more information about OSS see:
|
||||
@url{http://manuals.opensound.com/usersguide/dsp.html}
|
||||
|
||||
@section pulse
|
||||
|
||||
PulseAudio input device.
|
||||
|
||||
To enable this output device you need to configure FFmpeg with @code{--enable-libpulse}.
|
||||
|
||||
The filename to provide to the input device is a source device or the
|
||||
string "default"
|
||||
|
||||
To list the PulseAudio source devices and their properties you can invoke
|
||||
the command @command{pactl list sources}.
|
||||
|
||||
More information about PulseAudio can be found on @url{http://www.pulseaudio.org}.
|
||||
|
||||
@subsection Options
|
||||
@table @option
|
||||
@item server
|
||||
Connect to a specific PulseAudio server, specified by an IP address.
|
||||
Default server is used when not provided.
|
||||
|
||||
@item name
|
||||
Specify the application name PulseAudio will use when showing active clients,
|
||||
by default it is the @code{LIBAVFORMAT_IDENT} string.
|
||||
|
||||
@item stream_name
|
||||
Specify the stream name PulseAudio will use when showing active streams,
|
||||
by default it is "record".
|
||||
|
||||
@item sample_rate
|
||||
Specify the samplerate in Hz, by default 48kHz is used.
|
||||
|
||||
@item channels
|
||||
Specify the channels in use, by default 2 (stereo) is set.
|
||||
|
||||
@item frame_size
|
||||
Specify the number of bytes per frame, by default it is set to 1024.
|
||||
|
||||
@item fragment_size
|
||||
Specify the minimal buffering fragment in PulseAudio, it will affect the
|
||||
audio latency. By default it is unset.
|
||||
@end table
|
||||
|
||||
@subsection Examples
|
||||
Record a stream from default device:
|
||||
@example
|
||||
ffmpeg -f pulse -i default /tmp/pulse.wav
|
||||
@end example
|
||||
|
||||
@section sndio
|
||||
|
||||
sndio input device.
|
||||
|
||||
To enable this input device during configuration you need libsndio
|
||||
installed on your system.
|
||||
|
||||
The filename to provide to the input device is the device node
|
||||
representing the sndio input device, and is usually set to
|
||||
@file{/dev/audio0}.
|
||||
|
||||
For example to grab from @file{/dev/audio0} using @command{ffmpeg} use the
|
||||
command:
|
||||
@example
|
||||
ffmpeg -f sndio -i /dev/audio0 /tmp/oss.wav
|
||||
@end example
|
||||
|
||||
@section video4linux2, v4l2
|
||||
|
||||
Video4Linux2 input video device.
|
||||
|
||||
"v4l2" can be used as alias for "video4linux2".
|
||||
|
||||
If FFmpeg is built with v4l-utils support (by using the
|
||||
@code{--enable-libv4l2} configure option), it is possible to use it with the
|
||||
@code{-use_libv4l2} input device option.
|
||||
|
||||
The name of the device to grab is a file device node, usually Linux
|
||||
systems tend to automatically create such nodes when the device
|
||||
(e.g. an USB webcam) is plugged into the system, and has a name of the
|
||||
kind @file{/dev/video@var{N}}, where @var{N} is a number associated to
|
||||
the device.
|
||||
|
||||
Video4Linux2 devices usually support a limited set of
|
||||
@var{width}x@var{height} sizes and frame rates. You can check which are
|
||||
supported using @command{-list_formats all} for Video4Linux2 devices.
|
||||
Some devices, like TV cards, support one or more standards. It is possible
|
||||
to list all the supported standards using @command{-list_standards all}.
|
||||
|
||||
The time base for the timestamps is 1 microsecond. Depending on the kernel
|
||||
version and configuration, the timestamps may be derived from the real time
|
||||
clock (origin at the Unix Epoch) or the monotonic clock (origin usually at
|
||||
boot time, unaffected by NTP or manual changes to the clock). The
|
||||
@option{-timestamps abs} or @option{-ts abs} option can be used to force
|
||||
conversion into the real time clock.
|
||||
|
||||
Some usage examples of the video4linux2 device with @command{ffmpeg}
|
||||
and @command{ffplay}:
|
||||
@itemize
|
||||
@item
|
||||
Grab and show the input of a video4linux2 device:
|
||||
@example
|
||||
ffplay -f video4linux2 -framerate 30 -video_size hd720 /dev/video0
|
||||
@end example
|
||||
|
||||
@item
|
||||
Grab and record the input of a video4linux2 device, leave the
|
||||
frame rate and size as previously set:
|
||||
@example
|
||||
ffmpeg -f video4linux2 -input_format mjpeg -i /dev/video0 out.mpeg
|
||||
@end example
|
||||
@end itemize
|
||||
|
||||
For more information about Video4Linux, check @url{http://linuxtv.org/}.
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item standard
|
||||
Set the standard. Must be the name of a supported standard. To get a
|
||||
list of the supported standards, use the @option{list_standards}
|
||||
option.
|
||||
|
||||
@item channel
|
||||
Set the input channel number. Default to -1, which means using the
|
||||
previously selected channel.
|
||||
|
||||
@item video_size
|
||||
Set the video frame size. The argument must be a string in the form
|
||||
@var{WIDTH}x@var{HEIGHT} or a valid size abbreviation.
|
||||
|
||||
@item pixel_format
|
||||
Select the pixel format (only valid for raw video input).
|
||||
|
||||
@item input_format
|
||||
Set the preferred pixel format (for raw video) or a codec name.
|
||||
This option allows to select the input format, when several are
|
||||
available.
|
||||
|
||||
@item framerate
|
||||
Set the preferred video frame rate.
|
||||
|
||||
@item list_formats
|
||||
List available formats (supported pixel formats, codecs, and frame
|
||||
sizes) and exit.
|
||||
|
||||
Available values are:
|
||||
@table @samp
|
||||
@item all
|
||||
Show all available (compressed and non-compressed) formats.
|
||||
|
||||
@item raw
|
||||
Show only raw video (non-compressed) formats.
|
||||
|
||||
@item compressed
|
||||
Show only compressed formats.
|
||||
@end table
|
||||
|
||||
@item list_standards
|
||||
List supported standards and exit.
|
||||
|
||||
Available values are:
|
||||
@table @samp
|
||||
@item all
|
||||
Show all supported standards.
|
||||
@end table
|
||||
|
||||
@item timestamps, ts
|
||||
Set type of timestamps for grabbed frames.
|
||||
|
||||
Available values are:
|
||||
@table @samp
|
||||
@item default
|
||||
Use timestamps from the kernel.
|
||||
|
||||
@item abs
|
||||
Use absolute timestamps (wall clock).
|
||||
|
||||
@item mono2abs
|
||||
Force conversion from monotonic to absolute timestamps.
|
||||
@end table
|
||||
|
||||
Default value is @code{default}.
|
||||
@end table
|
||||
|
||||
@section vfwcap
|
||||
|
||||
VfW (Video for Windows) capture input device.
|
||||
|
||||
The filename passed as input is the capture driver number, ranging from
|
||||
0 to 9. You may use "list" as filename to print a list of drivers. Any
|
||||
other filename will be interpreted as device number 0.
|
||||
|
||||
@section x11grab
|
||||
|
||||
X11 video input device.
|
||||
|
||||
This device allows to capture a region of an X11 display.
|
||||
|
||||
The filename passed as input has the syntax:
|
||||
@example
|
||||
[@var{hostname}]:@var{display_number}.@var{screen_number}[+@var{x_offset},@var{y_offset}]
|
||||
@end example
|
||||
|
||||
@var{hostname}:@var{display_number}.@var{screen_number} specifies the
|
||||
X11 display name of the screen to grab from. @var{hostname} can be
|
||||
omitted, and defaults to "localhost". The environment variable
|
||||
@env{DISPLAY} contains the default display name.
|
||||
|
||||
@var{x_offset} and @var{y_offset} specify the offsets of the grabbed
|
||||
area with respect to the top-left border of the X11 screen. They
|
||||
default to 0.
|
||||
|
||||
Check the X11 documentation (e.g. man X) for more detailed information.
|
||||
|
||||
Use the @command{dpyinfo} program for getting basic information about the
|
||||
properties of your X11 display (e.g. grep for "name" or "dimensions").
|
||||
|
||||
For example to grab from @file{:0.0} using @command{ffmpeg}:
|
||||
@example
|
||||
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0 out.mpg
|
||||
@end example
|
||||
|
||||
Grab at position @code{10,20}:
|
||||
@example
|
||||
ffmpeg -f x11grab -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
|
||||
@end example
|
||||
|
||||
@subsection Options
|
||||
|
||||
@table @option
|
||||
@item draw_mouse
|
||||
Specify whether to draw the mouse pointer. A value of @code{0} specify
|
||||
not to draw the pointer. Default value is @code{1}.
|
||||
|
||||
@item follow_mouse
|
||||
Make the grabbed area follow the mouse. The argument can be
|
||||
@code{centered} or a number of pixels @var{PIXELS}.
|
||||
|
||||
When it is specified with "centered", the grabbing region follows the mouse
|
||||
pointer and keeps the pointer at the center of region; otherwise, the region
|
||||
follows only when the mouse pointer reaches within @var{PIXELS} (greater than
|
||||
zero) to the edge of region.
|
||||
|
||||
For example:
|
||||
@example
|
||||
ffmpeg -f x11grab -follow_mouse centered -framerate 25 -video_size cif -i :0.0 out.mpg
|
||||
@end example
|
||||
|
||||
To follow only when the mouse pointer reaches within 100 pixels to edge:
|
||||
@example
|
||||
ffmpeg -f x11grab -follow_mouse 100 -framerate 25 -video_size cif -i :0.0 out.mpg
|
||||
@end example
|
||||
|
||||
@item framerate
|
||||
Set the grabbing frame rate. Default value is @code{ntsc},
|
||||
corresponding to a frame rate of @code{30000/1001}.
|
||||
|
||||
@item show_region
|
||||
Show grabbed region on screen.
|
||||
|
||||
If @var{show_region} is specified with @code{1}, then the grabbing
|
||||
region will be indicated on screen. With this option, it is easy to
|
||||
know what is being grabbed if only a portion of the screen is grabbed.
|
||||
|
||||
For example:
|
||||
@example
|
||||
ffmpeg -f x11grab -show_region 1 -framerate 25 -video_size cif -i :0.0+10,20 out.mpg
|
||||
@end example
|
||||
|
||||
With @var{follow_mouse}:
|
||||
@example
|
||||
ffmpeg -f x11grab -follow_mouse centered -show_region 1 -framerate 25 -video_size cif -i :0.0 out.mpg
|
||||
@end example
|
||||
|
||||
@item video_size
|
||||
Set the video frame size. Default value is @code{vga}.
|
||||
@end table
|
||||
|
||||
@c man end INPUT DEVICES
|
@ -1,194 +0,0 @@
|
||||
FFmpeg's bug/feature request tracker manual
|
||||
=================================================
|
||||
|
||||
NOTE: This is a draft.
|
||||
|
||||
Overview:
|
||||
---------
|
||||
|
||||
FFmpeg uses Trac for tracking issues, new issues and changes to
|
||||
existing issues can be done through a web interface.
|
||||
|
||||
Issues can be different kinds of things we want to keep track of
|
||||
but that do not belong into the source tree itself. This includes
|
||||
bug reports, feature requests and license violations. We
|
||||
might add more items to this list in the future, so feel free to
|
||||
propose a new `type of issue' on the ffmpeg-devel mailing list if
|
||||
you feel it is worth tracking.
|
||||
|
||||
It is possible to subscribe to individual issues by adding yourself to the
|
||||
Cc list or to subscribe to the ffmpeg-trac mailing list which receives
|
||||
a mail for every change to every issue.
|
||||
(the above does all work already after light testing)
|
||||
|
||||
The subscription URL for the ffmpeg-trac list is:
|
||||
http(s)://ffmpeg.org/mailman/listinfo/ffmpeg-trac
|
||||
The URL of the webinterface of the tracker is:
|
||||
http(s)://trac.ffmpeg.org
|
||||
|
||||
Type:
|
||||
-----
|
||||
art
|
||||
Artwork such as photos, music, banners, and logos.
|
||||
|
||||
bug / defect
|
||||
An error, flaw, mistake, failure, or fault in FFmpeg or libav* that
|
||||
prevents it from behaving as intended.
|
||||
|
||||
feature request / enhancement
|
||||
Request of support for encoding or decoding of a new codec, container
|
||||
or variant.
|
||||
Request of support for more, less or plain different output or behavior
|
||||
where the current implementation cannot be considered wrong.
|
||||
|
||||
license violation
|
||||
ticket to keep track of (L)GPL violations of ffmpeg by others
|
||||
|
||||
sponsoring request
|
||||
Developer requests for hardware, software, specifications, money,
|
||||
refunds, etc.
|
||||
|
||||
Priority:
|
||||
---------
|
||||
critical
|
||||
Bugs about data loss and security issues.
|
||||
No feature request can be critical.
|
||||
|
||||
important
|
||||
Bugs which make FFmpeg unusable for a significant number of users.
|
||||
Examples here might be completely broken MPEG-4 decoding or a build issue
|
||||
on Linux.
|
||||
While broken 4xm decoding or a broken OS/2 build would not be important,
|
||||
the separation to normal is somewhat fuzzy.
|
||||
For feature requests this priority would be used for things many people
|
||||
want.
|
||||
Regressions also should be marked as important, regressions are bugs that
|
||||
don't exist in a past revision or another branch.
|
||||
|
||||
normal
|
||||
|
||||
|
||||
minor
|
||||
Bugs about things like spelling errors, "mp2" instead of
|
||||
"mp3" being shown and such.
|
||||
Feature requests about things few people want or which do not make a big
|
||||
difference.
|
||||
|
||||
wish
|
||||
Something that is desirable to have but that there is no urgency at
|
||||
all to implement, e.g. something completely cosmetic like a website
|
||||
restyle or a personalized doxy template or the FFmpeg logo.
|
||||
This priority is not valid for bugs.
|
||||
|
||||
|
||||
Status:
|
||||
-------
|
||||
new
|
||||
initial state
|
||||
|
||||
open
|
||||
intermediate states
|
||||
|
||||
closed
|
||||
final state
|
||||
|
||||
|
||||
Analyzed flag:
|
||||
--------------
|
||||
Bugs which have been analyzed and where it is understood what causes them
|
||||
and which exact chain of events triggers them. This analysis should be
|
||||
available as a message in the bug report.
|
||||
Note, do not change the status to analyzed without also providing a clear
|
||||
and understandable analysis.
|
||||
This state implicates that the bug either has been reproduced or that
|
||||
reproduction is not needed as the bug is already understood.
|
||||
|
||||
|
||||
Type/Status:
|
||||
----------
|
||||
*/new
|
||||
Initial state of new bugs and feature requests submitted by
|
||||
users.
|
||||
|
||||
*/open
|
||||
Issues which have been briefly looked at and which did not look outright
|
||||
invalid.
|
||||
This implicates that no real more detailed state applies yet. Conversely,
|
||||
the more detailed states below implicate that the issue has been briefly
|
||||
looked at.
|
||||
|
||||
*/closed/duplicate
|
||||
Bugs or feature requests which are duplicates.
|
||||
Note, if you mark something as duplicate, do not forget setting the
|
||||
superseder so bug reports are properly linked.
|
||||
|
||||
*/closed/invalid
|
||||
Bugs caused by user errors, random ineligible or otherwise nonsense stuff.
|
||||
|
||||
*/closed/needs_more_info
|
||||
Issues for which some information has been requested by the developers,
|
||||
but which has not been provided by anyone within reasonable time.
|
||||
|
||||
|
||||
bug/closed/fixed
|
||||
Bugs which have to the best of our knowledge been fixed.
|
||||
|
||||
bug/closed/wontfix
|
||||
Bugs which we will not fix. Possible reasons include legality, high
|
||||
complexity for the sake of supporting obscure corner cases, speed loss
|
||||
for similarly esoteric purposes, et cetera.
|
||||
This also means that we would reject a patch.
|
||||
If we are just too lazy to fix a bug then the correct state is open
|
||||
and unassigned. Closed means that the case is closed which is not
|
||||
the case if we are just waiting for a patch.
|
||||
|
||||
bug/closed/works_for_me
|
||||
Bugs for which sufficient information was provided to reproduce but
|
||||
reproduction failed - that is the code seems to work correctly to the
|
||||
best of our knowledge.
|
||||
|
||||
feature_request/closed/fixed
|
||||
Feature requests which have been implemented.
|
||||
|
||||
feature_request/closed/wontfix
|
||||
Feature requests which will not be implemented. The reasons here could
|
||||
be legal, philosophical or others.
|
||||
|
||||
Note2, if you provide the requested info do not forget to remove the
|
||||
needs_more_info resolution.
|
||||
|
||||
Component:
|
||||
----------
|
||||
|
||||
avcodec
|
||||
issues in libavcodec/*
|
||||
|
||||
avformat
|
||||
issues in libavformat/*
|
||||
|
||||
avutil
|
||||
issues in libavutil/*
|
||||
|
||||
regression test
|
||||
issues in tests/*
|
||||
|
||||
ffmpeg
|
||||
issues in or related to ffmpeg.c
|
||||
|
||||
ffplay
|
||||
issues in or related to ffplay.c
|
||||
|
||||
ffprobe
|
||||
issues in or related to ffprobe.c
|
||||
|
||||
ffserver
|
||||
issues in or related to ffserver.c
|
||||
|
||||
build system
|
||||
issues in or related to configure/Makefile
|
||||
|
||||
regression
|
||||
bugs which were not present in a past revision
|
||||
|
||||
trac
|
||||
issues related to our issue tracker
|
@ -1,41 +0,0 @@
|
||||
=head1 NAME
|
||||
|
||||
libavcodec - media streams decoding and encoding library
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
|
||||
The libavcodec library provides a generic encoding/decoding framework
|
||||
and contains multiple decoders and encoders for audio, video and
|
||||
subtitle streams, and several bitstream filters.
|
||||
|
||||
The shared architecture provides various services ranging from bit
|
||||
stream I/O to DSP optimizations, and makes it suitable for
|
||||
implementing robust and fast codecs as well as for experimentation.
|
||||
|
||||
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
|
||||
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1),
|
||||
libavutil(3)
|
||||
|
||||
|
||||
=head1 AUTHORS
|
||||
|
||||
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
B<git log> in the FFmpeg source directory, or browsing the
|
||||
online repository at E<lt>B<http://source.ffmpeg.org>E<gt>.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
F<MAINTAINERS> in the source code tree.
|
||||
|
||||
|
||||
|
@ -1,48 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libavcodec Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libavcodec Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libavcodec library provides a generic encoding/decoding framework
|
||||
and contains multiple decoders and encoders for audio, video and
|
||||
subtitle streams, and several bitstream filters.
|
||||
|
||||
The shared architecture provides various services ranging from bit
|
||||
stream I/O to DSP optimizations, and makes it suitable for
|
||||
implementing robust and fast codecs as well as for experimentation.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-codecs.html,ffmpeg-codecs}, @url{ffmpeg-bitstream-filters.html,bitstream-filters},
|
||||
@url{libavutil.html,libavutil}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-codecs(1), ffmpeg-bitstream-filters(1),
|
||||
libavutil(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libavcodec
|
||||
@settitle media streams decoding and encoding library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@ -1,38 +0,0 @@
|
||||
=head1 NAME
|
||||
|
||||
libavdevice - multimedia device handling library
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
|
||||
The libavdevice library provides a generic framework for grabbing from
|
||||
and rendering to many common multimedia input/output devices, and
|
||||
supports several input and output devices, including Video4Linux2,
|
||||
VfW, DShow, and ALSA.
|
||||
|
||||
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
|
||||
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-devices(1),
|
||||
libavutil(3), libavcodec(3), libavformat(3)
|
||||
|
||||
|
||||
=head1 AUTHORS
|
||||
|
||||
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
B<git log> in the FFmpeg source directory, or browsing the
|
||||
online repository at E<lt>B<http://source.ffmpeg.org>E<gt>.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
F<MAINTAINERS> in the source code tree.
|
||||
|
||||
|
||||
|
@ -1,45 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libavdevice Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libavdevice Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libavdevice library provides a generic framework for grabbing from
|
||||
and rendering to many common multimedia input/output devices, and
|
||||
supports several input and output devices, including Video4Linux2,
|
||||
VfW, DShow, and ALSA.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-devices.html,ffmpeg-devices},
|
||||
@url{libavutil.html,libavutil}, @url{libavcodec.html,libavcodec}, @url{libavformat.html,libavformat}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-devices(1),
|
||||
libavutil(3), libavcodec(3), libavformat(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libavdevice
|
||||
@settitle multimedia device handling library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@ -1,44 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libavfilter Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libavfilter Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libavfilter library provides a generic audio/video filtering
|
||||
framework containing several filters, sources and sinks.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-filters.html,ffmpeg-filters},
|
||||
@url{libavutil.html,libavutil}, @url{libswscale.html,libswscale}, @url{libswresample.html,libswresample},
|
||||
@url{libavcodec.html,libavcodec}, @url{libavformat.html,libavformat}, @url{libavdevice.html,libavdevice}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-filters(1),
|
||||
libavutil(3), libswscale(3), libswresample(3), libavcodec(3), libavformat(3), libavdevice(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libavfilter
|
||||
@settitle multimedia filtering library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@ -1,41 +0,0 @@
|
||||
=head1 NAME
|
||||
|
||||
libavformat - multimedia muxing and demuxing library
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
|
||||
The libavformat library provides a generic framework for multiplexing
|
||||
and demultiplexing (muxing and demuxing) audio, video and subtitle
|
||||
streams. It encompasses multiple muxers and demuxers for multimedia
|
||||
container formats.
|
||||
|
||||
It also supports several input and output protocols to access a media
|
||||
resource.
|
||||
|
||||
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
|
||||
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-formats(1), ffmpeg-protocols(1),
|
||||
libavutil(3), libavcodec(3)
|
||||
|
||||
|
||||
=head1 AUTHORS
|
||||
|
||||
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
B<git log> in the FFmpeg source directory, or browsing the
|
||||
online repository at E<lt>B<http://source.ffmpeg.org>E<gt>.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
F<MAINTAINERS> in the source code tree.
|
||||
|
||||
|
||||
|
@ -1,48 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libavformat Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libavformat Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libavformat library provides a generic framework for multiplexing
|
||||
and demultiplexing (muxing and demuxing) audio, video and subtitle
|
||||
streams. It encompasses multiple muxers and demuxers for multimedia
|
||||
container formats.
|
||||
|
||||
It also supports several input and output protocols to access a media
|
||||
resource.
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-formats.html,ffmpeg-formats}, @url{ffmpeg-protocols.html,ffmpeg-protocols},
|
||||
@url{libavutil.html,libavutil}, @url{libavcodec.html,libavcodec}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-formats(1), ffmpeg-protocols(1),
|
||||
libavutil(3), libavcodec(3)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libavformat
|
||||
@settitle multimedia muxing and demuxing library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
@ -1,38 +0,0 @@
|
||||
=head1 NAME
|
||||
|
||||
libavutil - multimedia-biased utility library
|
||||
|
||||
=head1 DESCRIPTION
|
||||
|
||||
|
||||
The libavutil library is a utility library to aid portable
|
||||
multimedia programming. It contains safe portable string functions,
|
||||
random number generators, data structures, additional mathematics
|
||||
functions, cryptography and multimedia related functionality (like
|
||||
enumerations for pixel and sample formats).
|
||||
|
||||
|
||||
|
||||
=head1 SEE ALSO
|
||||
|
||||
|
||||
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-utils(1)
|
||||
|
||||
|
||||
=head1 AUTHORS
|
||||
|
||||
|
||||
The FFmpeg developers.
|
||||
|
||||
For details about the authorship, see the Git history of the project
|
||||
(git://source.ffmpeg.org/ffmpeg), e.g. by typing the command
|
||||
B<git log> in the FFmpeg source directory, or browsing the
|
||||
online repository at E<lt>B<http://source.ffmpeg.org>E<gt>.
|
||||
|
||||
Maintainers for the specific components are listed in the file
|
||||
F<MAINTAINERS> in the source code tree.
|
||||
|
||||
|
||||
|
@ -1,44 +0,0 @@
|
||||
\input texinfo @c -*- texinfo -*-
|
||||
|
||||
@settitle Libavutil Documentation
|
||||
@titlepage
|
||||
@center @titlefont{Libavutil Documentation}
|
||||
@end titlepage
|
||||
|
||||
@top
|
||||
|
||||
@contents
|
||||
|
||||
@chapter Description
|
||||
@c man begin DESCRIPTION
|
||||
|
||||
The libavutil library is a utility library to aid portable
|
||||
multimedia programming. It contains safe portable string functions,
|
||||
random number generators, data structures, additional mathematics
|
||||
functions, cryptography and multimedia related functionality (like
|
||||
enumerations for pixel and sample formats).
|
||||
|
||||
@c man end DESCRIPTION
|
||||
|
||||
@chapter See Also
|
||||
|
||||
@ifhtml
|
||||
@url{ffmpeg.html,ffmpeg}, @url{ffplay.html,ffplay}, @url{ffprobe.html,ffprobe}, @url{ffserver.html,ffserver},
|
||||
@url{ffmpeg-utils.html,ffmpeg-utils}
|
||||
@end ifhtml
|
||||
|
||||
@ifnothtml
|
||||
ffmpeg(1), ffplay(1), ffprobe(1), ffserver(1),
|
||||
ffmpeg-utils(1)
|
||||
@end ifnothtml
|
||||
|
||||
@include authors.texi
|
||||
|
||||
@ignore
|
||||
|
||||
@setfilename libavutil
|
||||
@settitle multimedia-biased utility library
|
||||
|
||||
@end ignore
|
||||
|
||||
@bye
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user