forked from KolibriOS/kolibrios
ffmpeg-2.8.5
git-svn-id: svn://kolibrios.org@6147 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
46
contrib/sdk/sources/ffmpeg/ffmpeg-2.8/doc/examples/Makefile
Normal file
46
contrib/sdk/sources/ffmpeg/ffmpeg-2.8/doc/examples/Makefile
Normal file
@@ -0,0 +1,46 @@
|
||||
# use pkg-config for getting CFLAGS and LDLIBS
|
||||
FFMPEG_LIBS= libavdevice \
|
||||
libavformat \
|
||||
libavfilter \
|
||||
libavcodec \
|
||||
libswresample \
|
||||
libswscale \
|
||||
libavutil \
|
||||
|
||||
CFLAGS += -Wall -g
|
||||
CFLAGS := $(shell pkg-config --cflags $(FFMPEG_LIBS)) $(CFLAGS)
|
||||
LDLIBS := $(shell pkg-config --libs $(FFMPEG_LIBS)) $(LDLIBS)
|
||||
|
||||
EXAMPLES= avio_dir_cmd \
|
||||
avio_reading \
|
||||
decoding_encoding \
|
||||
demuxing_decoding \
|
||||
extract_mvs \
|
||||
filtering_video \
|
||||
filtering_audio \
|
||||
http_multiclient \
|
||||
metadata \
|
||||
muxing \
|
||||
remuxing \
|
||||
resampling_audio \
|
||||
scaling_video \
|
||||
transcode_aac \
|
||||
transcoding \
|
||||
|
||||
OBJS=$(addsuffix .o,$(EXAMPLES))
|
||||
|
||||
# the following examples make explicit use of the math library
|
||||
avcodec: LDLIBS += -lm
|
||||
decoding_encoding: LDLIBS += -lm
|
||||
muxing: LDLIBS += -lm
|
||||
resampling_audio: LDLIBS += -lm
|
||||
|
||||
.phony: all clean-test clean
|
||||
|
||||
all: $(OBJS) $(EXAMPLES)
|
||||
|
||||
clean-test:
|
||||
$(RM) test*.pgm test.h264 test.mp2 test.sw test.mpg
|
||||
|
||||
clean: clean-test
|
||||
$(RM) $(EXAMPLES) $(OBJS)
|
23
contrib/sdk/sources/ffmpeg/ffmpeg-2.8/doc/examples/README
Normal file
23
contrib/sdk/sources/ffmpeg/ffmpeg-2.8/doc/examples/README
Normal file
@@ -0,0 +1,23 @@
|
||||
FFmpeg examples README
|
||||
----------------------
|
||||
|
||||
Both following use cases rely on pkg-config and make, thus make sure
|
||||
that you have them installed and working on your system.
|
||||
|
||||
|
||||
Method 1: build the installed examples in a generic read/write user directory
|
||||
|
||||
Copy to a read/write user directory and just use "make", it will link
|
||||
to the libraries on your system, assuming the PKG_CONFIG_PATH is
|
||||
correctly configured.
|
||||
|
||||
Method 2: build the examples in-tree
|
||||
|
||||
Assuming you are in the source FFmpeg checkout directory, you need to build
|
||||
FFmpeg (no need to make install in any prefix). Then just run "make examples".
|
||||
This will build the examples using the FFmpeg build system. You can clean those
|
||||
examples using "make examplesclean"
|
||||
|
||||
If you want to try the dedicated Makefile examples (to emulate the first
|
||||
method), go into doc/examples and run a command such as
|
||||
PKG_CONFIG_PATH=pc-uninstalled make.
|
@@ -0,0 +1,180 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Lukasz Marek
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
|
||||
static const char *type_string(int type)
|
||||
{
|
||||
switch (type) {
|
||||
case AVIO_ENTRY_DIRECTORY:
|
||||
return "<DIR>";
|
||||
case AVIO_ENTRY_FILE:
|
||||
return "<FILE>";
|
||||
case AVIO_ENTRY_BLOCK_DEVICE:
|
||||
return "<BLOCK DEVICE>";
|
||||
case AVIO_ENTRY_CHARACTER_DEVICE:
|
||||
return "<CHARACTER DEVICE>";
|
||||
case AVIO_ENTRY_NAMED_PIPE:
|
||||
return "<PIPE>";
|
||||
case AVIO_ENTRY_SYMBOLIC_LINK:
|
||||
return "<LINK>";
|
||||
case AVIO_ENTRY_SOCKET:
|
||||
return "<SOCKET>";
|
||||
case AVIO_ENTRY_SERVER:
|
||||
return "<SERVER>";
|
||||
case AVIO_ENTRY_SHARE:
|
||||
return "<SHARE>";
|
||||
case AVIO_ENTRY_WORKGROUP:
|
||||
return "<WORKGROUP>";
|
||||
case AVIO_ENTRY_UNKNOWN:
|
||||
default:
|
||||
break;
|
||||
}
|
||||
return "<UNKNOWN>";
|
||||
}
|
||||
|
||||
static int list_op(const char *input_dir)
|
||||
{
|
||||
AVIODirEntry *entry = NULL;
|
||||
AVIODirContext *ctx = NULL;
|
||||
int cnt, ret;
|
||||
char filemode[4], uid_and_gid[20];
|
||||
|
||||
if ((ret = avio_open_dir(&ctx, input_dir, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open directory: %s.\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
|
||||
cnt = 0;
|
||||
for (;;) {
|
||||
if ((ret = avio_read_dir(ctx, &entry)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot list directory: %s.\n", av_err2str(ret));
|
||||
goto fail;
|
||||
}
|
||||
if (!entry)
|
||||
break;
|
||||
if (entry->filemode == -1) {
|
||||
snprintf(filemode, 4, "???");
|
||||
} else {
|
||||
snprintf(filemode, 4, "%3"PRIo64, entry->filemode);
|
||||
}
|
||||
snprintf(uid_and_gid, 20, "%"PRId64"(%"PRId64")", entry->user_id, entry->group_id);
|
||||
if (cnt == 0)
|
||||
av_log(NULL, AV_LOG_INFO, "%-9s %12s %30s %10s %s %16s %16s %16s\n",
|
||||
"TYPE", "SIZE", "NAME", "UID(GID)", "UGO", "MODIFIED",
|
||||
"ACCESSED", "STATUS_CHANGED");
|
||||
av_log(NULL, AV_LOG_INFO, "%-9s %12"PRId64" %30s %10s %s %16"PRId64" %16"PRId64" %16"PRId64"\n",
|
||||
type_string(entry->type),
|
||||
entry->size,
|
||||
entry->name,
|
||||
uid_and_gid,
|
||||
filemode,
|
||||
entry->modification_timestamp,
|
||||
entry->access_timestamp,
|
||||
entry->status_change_timestamp);
|
||||
avio_free_directory_entry(&entry);
|
||||
cnt++;
|
||||
};
|
||||
|
||||
fail:
|
||||
avio_close_dir(&ctx);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int del_op(const char *url)
|
||||
{
|
||||
int ret = avpriv_io_delete(url);
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot delete '%s': %s.\n", url, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int move_op(const char *src, const char *dst)
|
||||
{
|
||||
int ret = avpriv_io_move(src, dst);
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot move '%s' into '%s': %s.\n", src, dst, av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
|
||||
static void usage(const char *program_name)
|
||||
{
|
||||
fprintf(stderr, "usage: %s OPERATION entry1 [entry2]\n"
|
||||
"API example program to show how to manipulate resources "
|
||||
"accessed through AVIOContext.\n"
|
||||
"OPERATIONS:\n"
|
||||
"list list content of the directory\n"
|
||||
"move rename content in directory\n"
|
||||
"del delete content in directory\n",
|
||||
program_name);
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
const char *op = NULL;
|
||||
int ret;
|
||||
|
||||
av_log_set_level(AV_LOG_DEBUG);
|
||||
|
||||
if (argc < 2) {
|
||||
usage(argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* register codecs and formats and other lavf/lavc components*/
|
||||
av_register_all();
|
||||
avformat_network_init();
|
||||
|
||||
op = argv[1];
|
||||
if (strcmp(op, "list") == 0) {
|
||||
if (argc < 3) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for list operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = list_op(argv[2]);
|
||||
}
|
||||
} else if (strcmp(op, "del") == 0) {
|
||||
if (argc < 3) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for del operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = del_op(argv[2]);
|
||||
}
|
||||
} else if (strcmp(op, "move") == 0) {
|
||||
if (argc < 4) {
|
||||
av_log(NULL, AV_LOG_INFO, "Missing argument for move operation.\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
} else {
|
||||
ret = move_op(argv[2], argv[3]);
|
||||
}
|
||||
} else {
|
||||
av_log(NULL, AV_LOG_INFO, "Invalid operation %s\n", op);
|
||||
ret = AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
avformat_network_deinit();
|
||||
|
||||
return ret < 0 ? 1 : 0;
|
||||
}
|
@@ -0,0 +1,134 @@
|
||||
/*
|
||||
* Copyright (c) 2014 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat AVIOContext API example.
|
||||
*
|
||||
* Make libavformat demuxer access media content through a custom
|
||||
* AVIOContext read callback.
|
||||
* @example avio_reading.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavformat/avio.h>
|
||||
#include <libavutil/file.h>
|
||||
|
||||
struct buffer_data {
|
||||
uint8_t *ptr;
|
||||
size_t size; ///< size left in the buffer
|
||||
};
|
||||
|
||||
static int read_packet(void *opaque, uint8_t *buf, int buf_size)
|
||||
{
|
||||
struct buffer_data *bd = (struct buffer_data *)opaque;
|
||||
buf_size = FFMIN(buf_size, bd->size);
|
||||
|
||||
printf("ptr:%p size:%zu\n", bd->ptr, bd->size);
|
||||
|
||||
/* copy internal buffer data to buf */
|
||||
memcpy(buf, bd->ptr, buf_size);
|
||||
bd->ptr += buf_size;
|
||||
bd->size -= buf_size;
|
||||
|
||||
return buf_size;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVIOContext *avio_ctx = NULL;
|
||||
uint8_t *buffer = NULL, *avio_ctx_buffer = NULL;
|
||||
size_t buffer_size, avio_ctx_buffer_size = 4096;
|
||||
char *input_filename = NULL;
|
||||
int ret = 0;
|
||||
struct buffer_data bd = { 0 };
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "usage: %s input_file\n"
|
||||
"API example program to show how to read from a custom buffer "
|
||||
"accessed through AVIOContext.\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
input_filename = argv[1];
|
||||
|
||||
/* register codecs and formats and other lavf/lavc components*/
|
||||
av_register_all();
|
||||
|
||||
/* slurp file content into buffer */
|
||||
ret = av_file_map(input_filename, &buffer, &buffer_size, 0, NULL);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
/* fill opaque structure used by the AVIOContext read callback */
|
||||
bd.ptr = buffer;
|
||||
bd.size = buffer_size;
|
||||
|
||||
if (!(fmt_ctx = avformat_alloc_context())) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
avio_ctx_buffer = av_malloc(avio_ctx_buffer_size);
|
||||
if (!avio_ctx_buffer) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
avio_ctx = avio_alloc_context(avio_ctx_buffer, avio_ctx_buffer_size,
|
||||
0, &bd, &read_packet, NULL, NULL);
|
||||
if (!avio_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
fmt_ctx->pb = avio_ctx;
|
||||
|
||||
ret = avformat_open_input(&fmt_ctx, NULL, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open input\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avformat_find_stream_info(fmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_dump_format(fmt_ctx, 0, input_filename, 0);
|
||||
|
||||
end:
|
||||
avformat_close_input(&fmt_ctx);
|
||||
/* note: the internal buffer could have changed, and be != avio_ctx_buffer */
|
||||
if (avio_ctx) {
|
||||
av_freep(&avio_ctx->buffer);
|
||||
av_freep(&avio_ctx);
|
||||
}
|
||||
av_file_unmap(buffer, buffer_size);
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@@ -0,0 +1,665 @@
|
||||
/*
|
||||
* Copyright (c) 2001 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavcodec API use example.
|
||||
*
|
||||
* @example decoding_encoding.c
|
||||
* Note that libavcodec only handles codecs (mpeg, mpeg4, etc...),
|
||||
* not file formats (avi, vob, mp4, mov, mkv, mxf, flv, mpegts, mpegps, etc...). See library 'libavformat' for the
|
||||
* format handling
|
||||
*/
|
||||
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/common.h>
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
|
||||
#define INBUF_SIZE 4096
|
||||
#define AUDIO_INBUF_SIZE 20480
|
||||
#define AUDIO_REFILL_THRESH 4096
|
||||
|
||||
/* check that a given sample format is supported by the encoder */
|
||||
static int check_sample_fmt(AVCodec *codec, enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
const enum AVSampleFormat *p = codec->sample_fmts;
|
||||
|
||||
while (*p != AV_SAMPLE_FMT_NONE) {
|
||||
if (*p == sample_fmt)
|
||||
return 1;
|
||||
p++;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* just pick the highest supported samplerate */
|
||||
static int select_sample_rate(AVCodec *codec)
|
||||
{
|
||||
const int *p;
|
||||
int best_samplerate = 0;
|
||||
|
||||
if (!codec->supported_samplerates)
|
||||
return 44100;
|
||||
|
||||
p = codec->supported_samplerates;
|
||||
while (*p) {
|
||||
best_samplerate = FFMAX(*p, best_samplerate);
|
||||
p++;
|
||||
}
|
||||
return best_samplerate;
|
||||
}
|
||||
|
||||
/* select layout with the highest channel count */
|
||||
static int select_channel_layout(AVCodec *codec)
|
||||
{
|
||||
const uint64_t *p;
|
||||
uint64_t best_ch_layout = 0;
|
||||
int best_nb_channels = 0;
|
||||
|
||||
if (!codec->channel_layouts)
|
||||
return AV_CH_LAYOUT_STEREO;
|
||||
|
||||
p = codec->channel_layouts;
|
||||
while (*p) {
|
||||
int nb_channels = av_get_channel_layout_nb_channels(*p);
|
||||
|
||||
if (nb_channels > best_nb_channels) {
|
||||
best_ch_layout = *p;
|
||||
best_nb_channels = nb_channels;
|
||||
}
|
||||
p++;
|
||||
}
|
||||
return best_ch_layout;
|
||||
}
|
||||
|
||||
/*
|
||||
* Audio encoding example
|
||||
*/
|
||||
static void audio_encode_example(const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
int i, j, k, ret, got_output;
|
||||
int buffer_size;
|
||||
FILE *f;
|
||||
uint16_t *samples;
|
||||
float t, tincr;
|
||||
|
||||
printf("Encode audio file %s\n", filename);
|
||||
|
||||
/* find the MP2 encoder */
|
||||
codec = avcodec_find_encoder(AV_CODEC_ID_MP2);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate audio codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 64000;
|
||||
|
||||
/* check that the encoder supports s16 pcm input */
|
||||
c->sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
if (!check_sample_fmt(codec, c->sample_fmt)) {
|
||||
fprintf(stderr, "Encoder does not support sample format %s",
|
||||
av_get_sample_fmt_name(c->sample_fmt));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* select other audio parameters supported by the encoder */
|
||||
c->sample_rate = select_sample_rate(codec);
|
||||
c->channel_layout = select_channel_layout(codec);
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "wb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* frame containing input raw audio */
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->nb_samples = c->frame_size;
|
||||
frame->format = c->sample_fmt;
|
||||
frame->channel_layout = c->channel_layout;
|
||||
|
||||
/* the codec gives us the frame size, in samples,
|
||||
* we calculate the size of the samples buffer in bytes */
|
||||
buffer_size = av_samples_get_buffer_size(NULL, c->channels, c->frame_size,
|
||||
c->sample_fmt, 0);
|
||||
if (buffer_size < 0) {
|
||||
fprintf(stderr, "Could not get sample buffer size\n");
|
||||
exit(1);
|
||||
}
|
||||
samples = av_malloc(buffer_size);
|
||||
if (!samples) {
|
||||
fprintf(stderr, "Could not allocate %d bytes for samples buffer\n",
|
||||
buffer_size);
|
||||
exit(1);
|
||||
}
|
||||
/* setup the data pointers in the AVFrame */
|
||||
ret = avcodec_fill_audio_frame(frame, c->channels, c->sample_fmt,
|
||||
(const uint8_t*)samples, buffer_size, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not setup audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* encode a single tone sound */
|
||||
t = 0;
|
||||
tincr = 2 * M_PI * 440.0 / c->sample_rate;
|
||||
for (i = 0; i < 200; i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
for (j = 0; j < c->frame_size; j++) {
|
||||
samples[2*j] = (int)(sin(t) * 10000);
|
||||
|
||||
for (k = 1; k < c->channels; k++)
|
||||
samples[2*j + k] = samples[2*j];
|
||||
t += tincr;
|
||||
}
|
||||
/* encode the samples */
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
ret = avcodec_encode_audio2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
fclose(f);
|
||||
|
||||
av_freep(&samples);
|
||||
av_frame_free(&frame);
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
}
|
||||
|
||||
/*
|
||||
* Audio decoding.
|
||||
*/
|
||||
static void audio_decode_example(const char *outfilename, const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int len;
|
||||
FILE *f, *outfile;
|
||||
uint8_t inbuf[AUDIO_INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
|
||||
AVPacket avpkt;
|
||||
AVFrame *decoded_frame = NULL;
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
printf("Decode audio file %s to %s\n", filename, outfilename);
|
||||
|
||||
/* find the mpeg audio decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MP2);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate audio codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "rb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
outfile = fopen(outfilename, "wb");
|
||||
if (!outfile) {
|
||||
av_free(c);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* decode until eof */
|
||||
avpkt.data = inbuf;
|
||||
avpkt.size = fread(inbuf, 1, AUDIO_INBUF_SIZE, f);
|
||||
|
||||
while (avpkt.size > 0) {
|
||||
int i, ch;
|
||||
int got_frame = 0;
|
||||
|
||||
if (!decoded_frame) {
|
||||
if (!(decoded_frame = av_frame_alloc())) {
|
||||
fprintf(stderr, "Could not allocate audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
len = avcodec_decode_audio4(c, decoded_frame, &got_frame, &avpkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding\n");
|
||||
exit(1);
|
||||
}
|
||||
if (got_frame) {
|
||||
/* if a frame has been decoded, output it */
|
||||
int data_size = av_get_bytes_per_sample(c->sample_fmt);
|
||||
if (data_size < 0) {
|
||||
/* This should not occur, checking just for paranoia */
|
||||
fprintf(stderr, "Failed to calculate data size\n");
|
||||
exit(1);
|
||||
}
|
||||
for (i=0; i<decoded_frame->nb_samples; i++)
|
||||
for (ch=0; ch<c->channels; ch++)
|
||||
fwrite(decoded_frame->data[ch] + data_size*i, 1, data_size, outfile);
|
||||
}
|
||||
avpkt.size -= len;
|
||||
avpkt.data += len;
|
||||
avpkt.dts =
|
||||
avpkt.pts = AV_NOPTS_VALUE;
|
||||
if (avpkt.size < AUDIO_REFILL_THRESH) {
|
||||
/* Refill the input buffer, to avoid trying to decode
|
||||
* incomplete frames. Instead of this, one could also use
|
||||
* a parser, or use a proper container format through
|
||||
* libavformat. */
|
||||
memmove(inbuf, avpkt.data, avpkt.size);
|
||||
avpkt.data = inbuf;
|
||||
len = fread(avpkt.data + avpkt.size, 1,
|
||||
AUDIO_INBUF_SIZE - avpkt.size, f);
|
||||
if (len > 0)
|
||||
avpkt.size += len;
|
||||
}
|
||||
}
|
||||
|
||||
fclose(outfile);
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_frame_free(&decoded_frame);
|
||||
}
|
||||
|
||||
/*
|
||||
* Video encoding example
|
||||
*/
|
||||
static void video_encode_example(const char *filename, int codec_id)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int i, ret, x, y, got_output;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
AVPacket pkt;
|
||||
uint8_t endcode[] = { 0, 0, 1, 0xb7 };
|
||||
|
||||
printf("Encode video file %s\n", filename);
|
||||
|
||||
/* find the mpeg1 video encoder */
|
||||
codec = avcodec_find_encoder(codec_id);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* put sample parameters */
|
||||
c->bit_rate = 400000;
|
||||
/* resolution must be a multiple of two */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* frames per second */
|
||||
c->time_base = (AVRational){1,25};
|
||||
/* emit one intra frame every ten frames
|
||||
* check frame pict_type before passing frame
|
||||
* to encoder, if frame->pict_type is AV_PICTURE_TYPE_I
|
||||
* then gop_size is ignored and the output of encoder
|
||||
* will always be I frame irrespective to gop_size
|
||||
*/
|
||||
c->gop_size = 10;
|
||||
c->max_b_frames = 1;
|
||||
c->pix_fmt = AV_PIX_FMT_YUV420P;
|
||||
|
||||
if (codec_id == AV_CODEC_ID_H264)
|
||||
av_opt_set(c->priv_data, "preset", "slow", 0);
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "wb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
frame->format = c->pix_fmt;
|
||||
frame->width = c->width;
|
||||
frame->height = c->height;
|
||||
|
||||
/* the image can be allocated by any means and av_image_alloc() is
|
||||
* just the most convenient way if av_malloc() is to be used */
|
||||
ret = av_image_alloc(frame->data, frame->linesize, c->width, c->height,
|
||||
c->pix_fmt, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw picture buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* encode 1 second of video */
|
||||
for (i = 0; i < 25; i++) {
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL; // packet data will be allocated by the encoder
|
||||
pkt.size = 0;
|
||||
|
||||
fflush(stdout);
|
||||
/* prepare a dummy image */
|
||||
/* Y */
|
||||
for (y = 0; y < c->height; y++) {
|
||||
for (x = 0; x < c->width; x++) {
|
||||
frame->data[0][y * frame->linesize[0] + x] = x + y + i * 3;
|
||||
}
|
||||
}
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < c->height/2; y++) {
|
||||
for (x = 0; x < c->width/2; x++) {
|
||||
frame->data[1][y * frame->linesize[1] + x] = 128 + y + i * 2;
|
||||
frame->data[2][y * frame->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
}
|
||||
|
||||
frame->pts = i;
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* get the delayed frames */
|
||||
for (got_output = 1; got_output; i++) {
|
||||
fflush(stdout);
|
||||
|
||||
ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_output) {
|
||||
printf("Write frame %3d (size=%5d)\n", i, pkt.size);
|
||||
fwrite(pkt.data, 1, pkt.size, f);
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
}
|
||||
|
||||
/* add sequence end code to have a real mpeg file */
|
||||
fwrite(endcode, 1, sizeof(endcode), f);
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_freep(&frame->data[0]);
|
||||
av_frame_free(&frame);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
/*
|
||||
* Video decoding example
|
||||
*/
|
||||
|
||||
static void pgm_save(unsigned char *buf, int wrap, int xsize, int ysize,
|
||||
char *filename)
|
||||
{
|
||||
FILE *f;
|
||||
int i;
|
||||
|
||||
f = fopen(filename,"w");
|
||||
fprintf(f, "P5\n%d %d\n%d\n", xsize, ysize, 255);
|
||||
for (i = 0; i < ysize; i++)
|
||||
fwrite(buf + i * wrap, 1, xsize, f);
|
||||
fclose(f);
|
||||
}
|
||||
|
||||
static int decode_write_frame(const char *outfilename, AVCodecContext *avctx,
|
||||
AVFrame *frame, int *frame_count, AVPacket *pkt, int last)
|
||||
{
|
||||
int len, got_frame;
|
||||
char buf[1024];
|
||||
|
||||
len = avcodec_decode_video2(avctx, frame, &got_frame, pkt);
|
||||
if (len < 0) {
|
||||
fprintf(stderr, "Error while decoding frame %d\n", *frame_count);
|
||||
return len;
|
||||
}
|
||||
if (got_frame) {
|
||||
printf("Saving %sframe %3d\n", last ? "last " : "", *frame_count);
|
||||
fflush(stdout);
|
||||
|
||||
/* the picture is allocated by the decoder, no need to free it */
|
||||
snprintf(buf, sizeof(buf), outfilename, *frame_count);
|
||||
pgm_save(frame->data[0], frame->linesize[0],
|
||||
frame->width, frame->height, buf);
|
||||
(*frame_count)++;
|
||||
}
|
||||
if (pkt->data) {
|
||||
pkt->size -= len;
|
||||
pkt->data += len;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void video_decode_example(const char *outfilename, const char *filename)
|
||||
{
|
||||
AVCodec *codec;
|
||||
AVCodecContext *c= NULL;
|
||||
int frame_count;
|
||||
FILE *f;
|
||||
AVFrame *frame;
|
||||
uint8_t inbuf[INBUF_SIZE + AV_INPUT_BUFFER_PADDING_SIZE];
|
||||
AVPacket avpkt;
|
||||
|
||||
av_init_packet(&avpkt);
|
||||
|
||||
/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
|
||||
memset(inbuf + INBUF_SIZE, 0, AV_INPUT_BUFFER_PADDING_SIZE);
|
||||
|
||||
printf("Decode video file %s to %s\n", filename, outfilename);
|
||||
|
||||
/* find the mpeg1 video decoder */
|
||||
codec = avcodec_find_decoder(AV_CODEC_ID_MPEG1VIDEO);
|
||||
if (!codec) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
c = avcodec_alloc_context3(codec);
|
||||
if (!c) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (codec->capabilities & AV_CODEC_CAP_TRUNCATED)
|
||||
c->flags |= AV_CODEC_FLAG_TRUNCATED; // we do not send complete frames
|
||||
|
||||
/* For some codecs, such as msmpeg4 and mpeg4, width and height
|
||||
MUST be initialized there because this information is not
|
||||
available in the bitstream. */
|
||||
|
||||
/* open it */
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
f = fopen(filename, "rb");
|
||||
if (!f) {
|
||||
fprintf(stderr, "Could not open %s\n", filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame_count = 0;
|
||||
for (;;) {
|
||||
avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
|
||||
if (avpkt.size == 0)
|
||||
break;
|
||||
|
||||
/* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
|
||||
and this is the only method to use them because you cannot
|
||||
know the compressed data size before analysing it.
|
||||
|
||||
BUT some other codecs (msmpeg4, mpeg4) are inherently frame
|
||||
based, so you must call them with all the data for one
|
||||
frame exactly. You must also initialize 'width' and
|
||||
'height' before initializing them. */
|
||||
|
||||
/* NOTE2: some codecs allow the raw parameters (frame size,
|
||||
sample rate) to be changed at any frame. We handle this, so
|
||||
you should also take care of it */
|
||||
|
||||
/* here, we use a stream based decoder (mpeg1video), so we
|
||||
feed decoder and see if it could decode a frame */
|
||||
avpkt.data = inbuf;
|
||||
while (avpkt.size > 0)
|
||||
if (decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 0) < 0)
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* some codecs, such as MPEG, transmit the I and P frame with a
|
||||
latency of one frame. You must do the following to have a
|
||||
chance to get the last frame of the video */
|
||||
avpkt.data = NULL;
|
||||
avpkt.size = 0;
|
||||
decode_write_frame(outfilename, c, frame, &frame_count, &avpkt, 1);
|
||||
|
||||
fclose(f);
|
||||
|
||||
avcodec_close(c);
|
||||
av_free(c);
|
||||
av_frame_free(&frame);
|
||||
printf("\n");
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const char *output_type;
|
||||
|
||||
/* register all the codecs */
|
||||
avcodec_register_all();
|
||||
|
||||
if (argc < 2) {
|
||||
printf("usage: %s output_type\n"
|
||||
"API example program to decode/encode a media stream with libavcodec.\n"
|
||||
"This program generates a synthetic stream and encodes it to a file\n"
|
||||
"named test.h264, test.mp2 or test.mpg depending on output_type.\n"
|
||||
"The encoded stream is then decoded and written to a raw data output.\n"
|
||||
"output_type must be chosen between 'h264', 'mp2', 'mpg'.\n",
|
||||
argv[0]);
|
||||
return 1;
|
||||
}
|
||||
output_type = argv[1];
|
||||
|
||||
if (!strcmp(output_type, "h264")) {
|
||||
video_encode_example("test.h264", AV_CODEC_ID_H264);
|
||||
} else if (!strcmp(output_type, "mp2")) {
|
||||
audio_encode_example("test.mp2");
|
||||
audio_decode_example("test.pcm", "test.mp2");
|
||||
} else if (!strcmp(output_type, "mpg")) {
|
||||
video_encode_example("test.mpg", AV_CODEC_ID_MPEG1VIDEO);
|
||||
video_decode_example("test%02d.pgm", "test.mpg");
|
||||
} else {
|
||||
fprintf(stderr, "Invalid output type '%s', choose between 'h264', 'mp2', or 'mpg'\n",
|
||||
output_type);
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@@ -0,0 +1,407 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Demuxing and decoding example.
|
||||
*
|
||||
* Show how to use the libavformat and libavcodec API to demux and
|
||||
* decode audio and video data.
|
||||
* @example demuxing_decoding.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL, *audio_dec_ctx;
|
||||
static int width, height;
|
||||
static enum AVPixelFormat pix_fmt;
|
||||
static AVStream *video_stream = NULL, *audio_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
static const char *video_dst_filename = NULL;
|
||||
static const char *audio_dst_filename = NULL;
|
||||
static FILE *video_dst_file = NULL;
|
||||
static FILE *audio_dst_file = NULL;
|
||||
|
||||
static uint8_t *video_dst_data[4] = {NULL};
|
||||
static int video_dst_linesize[4];
|
||||
static int video_dst_bufsize;
|
||||
|
||||
static int video_stream_idx = -1, audio_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
static int audio_frame_count = 0;
|
||||
|
||||
/* The different ways of decoding and managing data memory. You are not
|
||||
* supposed to support all the modes in your application but pick the one most
|
||||
* appropriate to your needs. Look for the use of api_mode in this example to
|
||||
* see what are the differences of API usage between them */
|
||||
enum {
|
||||
API_MODE_OLD = 0, /* old method, deprecated */
|
||||
API_MODE_NEW_API_REF_COUNT = 1, /* new method, using the frame reference counting */
|
||||
API_MODE_NEW_API_NO_REF_COUNT = 2, /* new method, without reference counting */
|
||||
};
|
||||
|
||||
static int api_mode = API_MODE_OLD;
|
||||
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int ret = 0;
|
||||
int decoded = pkt.size;
|
||||
|
||||
*got_frame = 0;
|
||||
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
/* decode video frame */
|
||||
ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
|
||||
if (frame->width != width || frame->height != height ||
|
||||
frame->format != pix_fmt) {
|
||||
/* To handle this change, one could call av_image_alloc again and
|
||||
* decode the following frames into another rawvideo file. */
|
||||
fprintf(stderr, "Error: Width, height and pixel format have to be "
|
||||
"constant in a rawvideo file, but the width, height or "
|
||||
"pixel format of the input video changed:\n"
|
||||
"old: width = %d, height = %d, format = %s\n"
|
||||
"new: width = %d, height = %d, format = %s\n",
|
||||
width, height, av_get_pix_fmt_name(pix_fmt),
|
||||
frame->width, frame->height,
|
||||
av_get_pix_fmt_name(frame->format));
|
||||
return -1;
|
||||
}
|
||||
|
||||
printf("video_frame%s n:%d coded_n:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
video_frame_count++, frame->coded_picture_number,
|
||||
av_ts2timestr(frame->pts, &video_dec_ctx->time_base));
|
||||
|
||||
/* copy decoded frame to destination buffer:
|
||||
* this is required since rawvideo expects non aligned data */
|
||||
av_image_copy(video_dst_data, video_dst_linesize,
|
||||
(const uint8_t **)(frame->data), frame->linesize,
|
||||
pix_fmt, width, height);
|
||||
|
||||
/* write to rawvideo file */
|
||||
fwrite(video_dst_data[0], 1, video_dst_bufsize, video_dst_file);
|
||||
}
|
||||
} else if (pkt.stream_index == audio_stream_idx) {
|
||||
/* decode audio frame */
|
||||
ret = avcodec_decode_audio4(audio_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding audio frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
/* Some audio decoders decode only part of the packet, and have to be
|
||||
* called again with the remainder of the packet data.
|
||||
* Sample: fate-suite/lossless-audio/luckynight-partial.shn
|
||||
* Also, some decoders might over-read the packet. */
|
||||
decoded = FFMIN(ret, pkt.size);
|
||||
|
||||
if (*got_frame) {
|
||||
size_t unpadded_linesize = frame->nb_samples * av_get_bytes_per_sample(frame->format);
|
||||
printf("audio_frame%s n:%d nb_samples:%d pts:%s\n",
|
||||
cached ? "(cached)" : "",
|
||||
audio_frame_count++, frame->nb_samples,
|
||||
av_ts2timestr(frame->pts, &audio_dec_ctx->time_base));
|
||||
|
||||
/* Write the raw audio data samples of the first plane. This works
|
||||
* fine for packed formats (e.g. AV_SAMPLE_FMT_S16). However,
|
||||
* most audio decoders output planar audio, which uses a separate
|
||||
* plane of audio samples for each channel (e.g. AV_SAMPLE_FMT_S16P).
|
||||
* In other words, this code will write only the first audio channel
|
||||
* in these cases.
|
||||
* You should use libswresample or libavfilter to convert the frame
|
||||
* to packed data. */
|
||||
fwrite(frame->extended_data[0], 1, unpadded_linesize, audio_dst_file);
|
||||
}
|
||||
}
|
||||
|
||||
/* If we use the new API with reference counting, we own the data and need
|
||||
* to de-reference it when we don't use it anymore */
|
||||
if (*got_frame && api_mode == API_MODE_NEW_API_REF_COUNT)
|
||||
av_frame_unref(frame);
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret, stream_index;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
stream_index = ret;
|
||||
st = fmt_ctx->streams[stream_index];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
dec = avcodec_find_decoder(dec_ctx->codec_id);
|
||||
if (!dec) {
|
||||
fprintf(stderr, "Failed to find %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* Init the decoders, with or without reference counting */
|
||||
if (api_mode == API_MODE_NEW_API_REF_COUNT)
|
||||
av_dict_set(&opts, "refcounted_frames", "1", 0);
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
*stream_idx = stream_index;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"sample format %s is not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return -1;
|
||||
}
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 4 && argc != 5) {
|
||||
fprintf(stderr, "usage: %s [-refcount=<old|new_norefcount|new_refcount>] "
|
||||
"input_file video_output_file audio_output_file\n"
|
||||
"API example program to show how to read frames from an input file.\n"
|
||||
"This program reads frames from a file, decodes them, and writes decoded\n"
|
||||
"video frames to a rawvideo file named video_output_file, and decoded\n"
|
||||
"audio frames to a rawaudio file named audio_output_file.\n\n"
|
||||
"If the -refcount option is specified, the program use the\n"
|
||||
"reference counting frame system which allows keeping a copy of\n"
|
||||
"the data for longer than one decode call. If unset, it's using\n"
|
||||
"the classic old method.\n"
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
if (argc == 5) {
|
||||
const char *mode = argv[1] + strlen("-refcount=");
|
||||
if (!strcmp(mode, "old")) api_mode = API_MODE_OLD;
|
||||
else if (!strcmp(mode, "new_norefcount")) api_mode = API_MODE_NEW_API_NO_REF_COUNT;
|
||||
else if (!strcmp(mode, "new_refcount")) api_mode = API_MODE_NEW_API_REF_COUNT;
|
||||
else {
|
||||
fprintf(stderr, "unknow mode '%s'\n", mode);
|
||||
exit(1);
|
||||
}
|
||||
argv++;
|
||||
}
|
||||
src_filename = argv[1];
|
||||
video_dst_filename = argv[2];
|
||||
audio_dst_filename = argv[3];
|
||||
|
||||
/* register all formats and codecs */
|
||||
av_register_all();
|
||||
|
||||
/* open input file, and allocate format context */
|
||||
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open source file %s\n", src_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* retrieve stream information */
|
||||
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
|
||||
video_stream = fmt_ctx->streams[video_stream_idx];
|
||||
video_dec_ctx = video_stream->codec;
|
||||
|
||||
video_dst_file = fopen(video_dst_filename, "wb");
|
||||
if (!video_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate image where the decoded image will be put */
|
||||
width = video_dec_ctx->width;
|
||||
height = video_dec_ctx->height;
|
||||
pix_fmt = video_dec_ctx->pix_fmt;
|
||||
ret = av_image_alloc(video_dst_data, video_dst_linesize,
|
||||
width, height, pix_fmt, 1);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate raw video buffer\n");
|
||||
goto end;
|
||||
}
|
||||
video_dst_bufsize = ret;
|
||||
}
|
||||
|
||||
if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
|
||||
audio_stream = fmt_ctx->streams[audio_stream_idx];
|
||||
audio_dec_ctx = audio_stream->codec;
|
||||
audio_dst_file = fopen(audio_dst_filename, "wb");
|
||||
if (!audio_dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
/* dump input information to stderr */
|
||||
av_dump_format(fmt_ctx, 0, src_filename, 0);
|
||||
|
||||
if (!audio_stream && !video_stream) {
|
||||
fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* When using the new API, you need to use the libavutil/frame.h API, while
|
||||
* the classic frame management is available in libavcodec */
|
||||
if (api_mode == API_MODE_OLD)
|
||||
frame = avcodec_alloc_frame();
|
||||
else
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
if (video_stream)
|
||||
printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
|
||||
if (audio_stream)
|
||||
printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_free_packet(&orig_pkt);
|
||||
}
|
||||
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
printf("Demuxing succeeded.\n");
|
||||
|
||||
if (video_stream) {
|
||||
printf("Play the output video file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(pix_fmt), width, height,
|
||||
video_dst_filename);
|
||||
}
|
||||
|
||||
if (audio_stream) {
|
||||
enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
|
||||
int n_channels = audio_dec_ctx->channels;
|
||||
const char *fmt;
|
||||
|
||||
if (av_sample_fmt_is_planar(sfmt)) {
|
||||
const char *packed = av_get_sample_fmt_name(sfmt);
|
||||
printf("Warning: the sample format the decoder produced is planar "
|
||||
"(%s). This example will output the first channel only.\n",
|
||||
packed ? packed : "?");
|
||||
sfmt = av_get_packed_sample_fmt(sfmt);
|
||||
n_channels = 1;
|
||||
}
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
|
||||
goto end;
|
||||
|
||||
printf("Play the output audio file with the command:\n"
|
||||
"ffplay -f %s -ac %d -ar %d %s\n",
|
||||
fmt, n_channels, audio_dec_ctx->sample_rate,
|
||||
audio_dst_filename);
|
||||
}
|
||||
|
||||
end:
|
||||
avcodec_close(video_dec_ctx);
|
||||
avcodec_close(audio_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
if (video_dst_file)
|
||||
fclose(video_dst_file);
|
||||
if (audio_dst_file)
|
||||
fclose(audio_dst_file);
|
||||
if (api_mode == API_MODE_OLD)
|
||||
avcodec_free_frame(&frame);
|
||||
else
|
||||
av_frame_free(&frame);
|
||||
av_free(video_dst_data[0]);
|
||||
|
||||
return ret < 0;
|
||||
}
|
185
contrib/sdk/sources/ffmpeg/ffmpeg-2.8/doc/examples/extract_mvs.c
Normal file
185
contrib/sdk/sources/ffmpeg/ffmpeg-2.8/doc/examples/extract_mvs.c
Normal file
@@ -0,0 +1,185 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
* Copyright (c) 2014 Clément Bœsch
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
#include <libavutil/motion_vector.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static AVFormatContext *fmt_ctx = NULL;
|
||||
static AVCodecContext *video_dec_ctx = NULL;
|
||||
static AVStream *video_stream = NULL;
|
||||
static const char *src_filename = NULL;
|
||||
|
||||
static int video_stream_idx = -1;
|
||||
static AVFrame *frame = NULL;
|
||||
static AVPacket pkt;
|
||||
static int video_frame_count = 0;
|
||||
|
||||
static int decode_packet(int *got_frame, int cached)
|
||||
{
|
||||
int decoded = pkt.size;
|
||||
|
||||
*got_frame = 0;
|
||||
|
||||
if (pkt.stream_index == video_stream_idx) {
|
||||
int ret = avcodec_decode_video2(video_dec_ctx, frame, got_frame, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error decoding video frame (%s)\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (*got_frame) {
|
||||
int i;
|
||||
AVFrameSideData *sd;
|
||||
|
||||
video_frame_count++;
|
||||
sd = av_frame_get_side_data(frame, AV_FRAME_DATA_MOTION_VECTORS);
|
||||
if (sd) {
|
||||
const AVMotionVector *mvs = (const AVMotionVector *)sd->data;
|
||||
for (i = 0; i < sd->size / sizeof(*mvs); i++) {
|
||||
const AVMotionVector *mv = &mvs[i];
|
||||
printf("%d,%2d,%2d,%2d,%4d,%4d,%4d,%4d,0x%"PRIx64"\n",
|
||||
video_frame_count, mv->source,
|
||||
mv->w, mv->h, mv->src_x, mv->src_y,
|
||||
mv->dst_x, mv->dst_y, mv->flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return decoded;
|
||||
}
|
||||
|
||||
static int open_codec_context(int *stream_idx,
|
||||
AVFormatContext *fmt_ctx, enum AVMediaType type)
|
||||
{
|
||||
int ret;
|
||||
AVStream *st;
|
||||
AVCodecContext *dec_ctx = NULL;
|
||||
AVCodec *dec = NULL;
|
||||
AVDictionary *opts = NULL;
|
||||
|
||||
ret = av_find_best_stream(fmt_ctx, type, -1, -1, NULL, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not find %s stream in input file '%s'\n",
|
||||
av_get_media_type_string(type), src_filename);
|
||||
return ret;
|
||||
} else {
|
||||
*stream_idx = ret;
|
||||
st = fmt_ctx->streams[*stream_idx];
|
||||
|
||||
/* find decoder for the stream */
|
||||
dec_ctx = st->codec;
|
||||
dec = avcodec_find_decoder(dec_ctx->codec_id);
|
||||
if (!dec) {
|
||||
fprintf(stderr, "Failed to find %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/* Init the video decoder */
|
||||
av_dict_set(&opts, "flags2", "+export_mvs", 0);
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, &opts)) < 0) {
|
||||
fprintf(stderr, "Failed to open %s codec\n",
|
||||
av_get_media_type_string(type));
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret = 0, got_frame;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s <video>\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
src_filename = argv[1];
|
||||
|
||||
av_register_all();
|
||||
|
||||
if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
|
||||
fprintf(stderr, "Could not open source file %s\n", src_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
|
||||
fprintf(stderr, "Could not find stream information\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
|
||||
video_stream = fmt_ctx->streams[video_stream_idx];
|
||||
video_dec_ctx = video_stream->codec;
|
||||
}
|
||||
|
||||
av_dump_format(fmt_ctx, 0, src_filename, 0);
|
||||
|
||||
if (!video_stream) {
|
||||
fprintf(stderr, "Could not find video stream in the input, aborting\n");
|
||||
ret = 1;
|
||||
goto end;
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Could not allocate frame\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
printf("framenum,source,blockw,blockh,srcx,srcy,dstx,dsty,flags\n");
|
||||
|
||||
/* initialize packet, set data to NULL, let the demuxer fill it */
|
||||
av_init_packet(&pkt);
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
|
||||
/* read frames from the file */
|
||||
while (av_read_frame(fmt_ctx, &pkt) >= 0) {
|
||||
AVPacket orig_pkt = pkt;
|
||||
do {
|
||||
ret = decode_packet(&got_frame, 0);
|
||||
if (ret < 0)
|
||||
break;
|
||||
pkt.data += ret;
|
||||
pkt.size -= ret;
|
||||
} while (pkt.size > 0);
|
||||
av_free_packet(&orig_pkt);
|
||||
}
|
||||
|
||||
/* flush cached frames */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
do {
|
||||
decode_packet(&got_frame, 1);
|
||||
} while (got_frame);
|
||||
|
||||
end:
|
||||
avcodec_close(video_dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
return ret < 0;
|
||||
}
|
@@ -0,0 +1,365 @@
|
||||
/*
|
||||
* copyright (c) 2013 Andrew Kelley
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavfilter API usage example.
|
||||
*
|
||||
* @example filter_audio.c
|
||||
* This example will generate a sine wave audio,
|
||||
* pass it through a simple filter chain, and then compute the MD5 checksum of
|
||||
* the output data.
|
||||
*
|
||||
* The filter chain it uses is:
|
||||
* (input) -> abuffer -> volume -> aformat -> abuffersink -> (output)
|
||||
*
|
||||
* abuffer: This provides the endpoint where you can feed the decoded samples.
|
||||
* volume: In this example we hardcode it to 0.90.
|
||||
* aformat: This converts the samples to the samplefreq, channel layout,
|
||||
* and sample format required by the audio device.
|
||||
* abuffersink: This provides the endpoint where you can read the samples after
|
||||
* they have passed through the filter chain.
|
||||
*/
|
||||
|
||||
#include <inttypes.h>
|
||||
#include <math.h>
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "libavutil/channel_layout.h"
|
||||
#include "libavutil/md5.h"
|
||||
#include "libavutil/mem.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/samplefmt.h"
|
||||
|
||||
#include "libavfilter/avfilter.h"
|
||||
#include "libavfilter/buffersink.h"
|
||||
#include "libavfilter/buffersrc.h"
|
||||
|
||||
#define INPUT_SAMPLERATE 48000
|
||||
#define INPUT_FORMAT AV_SAMPLE_FMT_FLTP
|
||||
#define INPUT_CHANNEL_LAYOUT AV_CH_LAYOUT_5POINT0
|
||||
|
||||
#define VOLUME_VAL 0.90
|
||||
|
||||
static int init_filter_graph(AVFilterGraph **graph, AVFilterContext **src,
|
||||
AVFilterContext **sink)
|
||||
{
|
||||
AVFilterGraph *filter_graph;
|
||||
AVFilterContext *abuffer_ctx;
|
||||
AVFilter *abuffer;
|
||||
AVFilterContext *volume_ctx;
|
||||
AVFilter *volume;
|
||||
AVFilterContext *aformat_ctx;
|
||||
AVFilter *aformat;
|
||||
AVFilterContext *abuffersink_ctx;
|
||||
AVFilter *abuffersink;
|
||||
|
||||
AVDictionary *options_dict = NULL;
|
||||
uint8_t options_str[1024];
|
||||
uint8_t ch_layout[64];
|
||||
|
||||
int err;
|
||||
|
||||
/* Create a new filtergraph, which will contain all the filters. */
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!filter_graph) {
|
||||
fprintf(stderr, "Unable to create filter graph.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* Create the abuffer filter;
|
||||
* it will be used for feeding the data into the graph. */
|
||||
abuffer = avfilter_get_by_name("abuffer");
|
||||
if (!abuffer) {
|
||||
fprintf(stderr, "Could not find the abuffer filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
abuffer_ctx = avfilter_graph_alloc_filter(filter_graph, abuffer, "src");
|
||||
if (!abuffer_ctx) {
|
||||
fprintf(stderr, "Could not allocate the abuffer instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* Set the filter options through the AVOptions API. */
|
||||
av_get_channel_layout_string(ch_layout, sizeof(ch_layout), 0, INPUT_CHANNEL_LAYOUT);
|
||||
av_opt_set (abuffer_ctx, "channel_layout", ch_layout, AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set (abuffer_ctx, "sample_fmt", av_get_sample_fmt_name(INPUT_FORMAT), AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set_q (abuffer_ctx, "time_base", (AVRational){ 1, INPUT_SAMPLERATE }, AV_OPT_SEARCH_CHILDREN);
|
||||
av_opt_set_int(abuffer_ctx, "sample_rate", INPUT_SAMPLERATE, AV_OPT_SEARCH_CHILDREN);
|
||||
|
||||
/* Now initialize the filter; we pass NULL options, since we have already
|
||||
* set all the options above. */
|
||||
err = avfilter_init_str(abuffer_ctx, NULL);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Could not initialize the abuffer filter.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Create volume filter. */
|
||||
volume = avfilter_get_by_name("volume");
|
||||
if (!volume) {
|
||||
fprintf(stderr, "Could not find the volume filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
volume_ctx = avfilter_graph_alloc_filter(filter_graph, volume, "volume");
|
||||
if (!volume_ctx) {
|
||||
fprintf(stderr, "Could not allocate the volume instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* A different way of passing the options is as key/value pairs in a
|
||||
* dictionary. */
|
||||
av_dict_set(&options_dict, "volume", AV_STRINGIFY(VOLUME_VAL), 0);
|
||||
err = avfilter_init_dict(volume_ctx, &options_dict);
|
||||
av_dict_free(&options_dict);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Could not initialize the volume filter.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Create the aformat filter;
|
||||
* it ensures that the output is of the format we want. */
|
||||
aformat = avfilter_get_by_name("aformat");
|
||||
if (!aformat) {
|
||||
fprintf(stderr, "Could not find the aformat filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
aformat_ctx = avfilter_graph_alloc_filter(filter_graph, aformat, "aformat");
|
||||
if (!aformat_ctx) {
|
||||
fprintf(stderr, "Could not allocate the aformat instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* A third way of passing the options is in a string of the form
|
||||
* key1=value1:key2=value2.... */
|
||||
snprintf(options_str, sizeof(options_str),
|
||||
"sample_fmts=%s:sample_rates=%d:channel_layouts=0x%"PRIx64,
|
||||
av_get_sample_fmt_name(AV_SAMPLE_FMT_S16), 44100,
|
||||
(uint64_t)AV_CH_LAYOUT_STEREO);
|
||||
err = avfilter_init_str(aformat_ctx, options_str);
|
||||
if (err < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not initialize the aformat filter.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Finally create the abuffersink filter;
|
||||
* it will be used to get the filtered data out of the graph. */
|
||||
abuffersink = avfilter_get_by_name("abuffersink");
|
||||
if (!abuffersink) {
|
||||
fprintf(stderr, "Could not find the abuffersink filter.\n");
|
||||
return AVERROR_FILTER_NOT_FOUND;
|
||||
}
|
||||
|
||||
abuffersink_ctx = avfilter_graph_alloc_filter(filter_graph, abuffersink, "sink");
|
||||
if (!abuffersink_ctx) {
|
||||
fprintf(stderr, "Could not allocate the abuffersink instance.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/* This filter takes no options. */
|
||||
err = avfilter_init_str(abuffersink_ctx, NULL);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Could not initialize the abuffersink instance.\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Connect the filters;
|
||||
* in this simple case the filters just form a linear chain. */
|
||||
err = avfilter_link(abuffer_ctx, 0, volume_ctx, 0);
|
||||
if (err >= 0)
|
||||
err = avfilter_link(volume_ctx, 0, aformat_ctx, 0);
|
||||
if (err >= 0)
|
||||
err = avfilter_link(aformat_ctx, 0, abuffersink_ctx, 0);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error connecting filters\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
/* Configure the graph. */
|
||||
err = avfilter_graph_config(filter_graph, NULL);
|
||||
if (err < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error configuring the filter graph\n");
|
||||
return err;
|
||||
}
|
||||
|
||||
*graph = filter_graph;
|
||||
*src = abuffer_ctx;
|
||||
*sink = abuffersink_ctx;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Do something useful with the filtered data: this simple
|
||||
* example just prints the MD5 checksum of each plane to stdout. */
|
||||
static int process_output(struct AVMD5 *md5, AVFrame *frame)
|
||||
{
|
||||
int planar = av_sample_fmt_is_planar(frame->format);
|
||||
int channels = av_get_channel_layout_nb_channels(frame->channel_layout);
|
||||
int planes = planar ? channels : 1;
|
||||
int bps = av_get_bytes_per_sample(frame->format);
|
||||
int plane_size = bps * frame->nb_samples * (planar ? 1 : channels);
|
||||
int i, j;
|
||||
|
||||
for (i = 0; i < planes; i++) {
|
||||
uint8_t checksum[16];
|
||||
|
||||
av_md5_init(md5);
|
||||
av_md5_sum(checksum, frame->extended_data[i], plane_size);
|
||||
|
||||
fprintf(stdout, "plane %d: 0x", i);
|
||||
for (j = 0; j < sizeof(checksum); j++)
|
||||
fprintf(stdout, "%02X", checksum[j]);
|
||||
fprintf(stdout, "\n");
|
||||
}
|
||||
fprintf(stdout, "\n");
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/* Construct a frame of audio data to be filtered;
|
||||
* this simple example just synthesizes a sine wave. */
|
||||
static int get_input(AVFrame *frame, int frame_num)
|
||||
{
|
||||
int err, i, j;
|
||||
|
||||
#define FRAME_SIZE 1024
|
||||
|
||||
/* Set up the frame properties and allocate the buffer for the data. */
|
||||
frame->sample_rate = INPUT_SAMPLERATE;
|
||||
frame->format = INPUT_FORMAT;
|
||||
frame->channel_layout = INPUT_CHANNEL_LAYOUT;
|
||||
frame->nb_samples = FRAME_SIZE;
|
||||
frame->pts = frame_num * FRAME_SIZE;
|
||||
|
||||
err = av_frame_get_buffer(frame, 0);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
/* Fill the data for each channel. */
|
||||
for (i = 0; i < 5; i++) {
|
||||
float *data = (float*)frame->extended_data[i];
|
||||
|
||||
for (j = 0; j < frame->nb_samples; j++)
|
||||
data[j] = sin(2 * M_PI * (frame_num + j) * (i + 1) / FRAME_SIZE);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char *argv[])
|
||||
{
|
||||
struct AVMD5 *md5;
|
||||
AVFilterGraph *graph;
|
||||
AVFilterContext *src, *sink;
|
||||
AVFrame *frame;
|
||||
uint8_t errstr[1024];
|
||||
float duration;
|
||||
int err, nb_frames, i;
|
||||
|
||||
if (argc < 2) {
|
||||
fprintf(stderr, "Usage: %s <duration>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
duration = atof(argv[1]);
|
||||
nb_frames = duration * INPUT_SAMPLERATE / FRAME_SIZE;
|
||||
if (nb_frames <= 0) {
|
||||
fprintf(stderr, "Invalid duration: %s\n", argv[1]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
avfilter_register_all();
|
||||
|
||||
/* Allocate the frame we will be using to store the data. */
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error allocating the frame\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
md5 = av_md5_alloc();
|
||||
if (!md5) {
|
||||
fprintf(stderr, "Error allocating the MD5 context\n");
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* Set up the filtergraph. */
|
||||
err = init_filter_graph(&graph, &src, &sink);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Unable to init filter graph:");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* the main filtering loop */
|
||||
for (i = 0; i < nb_frames; i++) {
|
||||
/* get an input frame to be filtered */
|
||||
err = get_input(frame, i);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error generating input frame:");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Send the frame to the input of the filtergraph. */
|
||||
err = av_buffersrc_add_frame(src, frame);
|
||||
if (err < 0) {
|
||||
av_frame_unref(frame);
|
||||
fprintf(stderr, "Error submitting the frame to the filtergraph:");
|
||||
goto fail;
|
||||
}
|
||||
|
||||
/* Get all the filtered output that is available. */
|
||||
while ((err = av_buffersink_get_frame(sink, frame)) >= 0) {
|
||||
/* now do something with our filtered frame */
|
||||
err = process_output(md5, frame);
|
||||
if (err < 0) {
|
||||
fprintf(stderr, "Error processing the filtered frame:");
|
||||
goto fail;
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
|
||||
if (err == AVERROR(EAGAIN)) {
|
||||
/* Need to feed more frames in. */
|
||||
continue;
|
||||
} else if (err == AVERROR_EOF) {
|
||||
/* Nothing more to do, finish. */
|
||||
break;
|
||||
} else if (err < 0) {
|
||||
/* An error occurred. */
|
||||
fprintf(stderr, "Error filtering the data:");
|
||||
goto fail;
|
||||
}
|
||||
}
|
||||
|
||||
avfilter_graph_free(&graph);
|
||||
av_frame_free(&frame);
|
||||
av_freep(&md5);
|
||||
|
||||
return 0;
|
||||
|
||||
fail:
|
||||
av_strerror(err, errstr, sizeof(errstr));
|
||||
fprintf(stderr, "%s\n", errstr);
|
||||
return 1;
|
||||
}
|
@@ -0,0 +1,296 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2012 Clément Bœsch
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for audio decoding and filtering
|
||||
* @example filtering_audio.c
|
||||
*/
|
||||
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
static const char *filter_descr = "aresample=8000,aformat=sample_fmts=s16:channel_layouts=mono";
|
||||
static const char *player = "ffplay -f s16le -ar 8000 -ac 1 -";
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int audio_stream_index = -1;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the audio stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_AUDIO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find a audio stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
audio_stream_index = ret;
|
||||
dec_ctx = fmt_ctx->streams[audio_stream_index]->codec;
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the audio decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open audio decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
AVFilter *abuffersrc = avfilter_get_by_name("abuffer");
|
||||
AVFilter *abuffersink = avfilter_get_by_name("abuffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
static const enum AVSampleFormat out_sample_fmts[] = { AV_SAMPLE_FMT_S16, -1 };
|
||||
static const int64_t out_channel_layouts[] = { AV_CH_LAYOUT_MONO, -1 };
|
||||
static const int out_sample_rates[] = { 8000, -1 };
|
||||
const AVFilterLink *outlink;
|
||||
AVRational time_base = fmt_ctx->streams[audio_stream_index]->time_base;
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer audio source: the decoded frames from the decoder will be inserted here. */
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
time_base.num, time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt), dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, abuffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer audio sink: to terminate the filter chain. */
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, abuffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_fmts", out_sample_fmts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "channel_layouts", out_channel_layouts, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "sample_rates", out_sample_rates, -1,
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The buffer source output must be connected to the input pad of
|
||||
* the first filter described by filters_descr; since the first
|
||||
* filter input label is not specified, it is set to "in" by
|
||||
* default.
|
||||
*/
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
/*
|
||||
* The buffer sink input must be connected to the output pad of
|
||||
* the last filter described by filters_descr; since the last
|
||||
* filter output label is not specified, it is set to "out" by
|
||||
* default.
|
||||
*/
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
/* Print summary of the sink buffer
|
||||
* Note: args buffer is reused to store channel layout string */
|
||||
outlink = buffersink_ctx->inputs[0];
|
||||
av_get_channel_layout_string(args, sizeof(args), -1, outlink->channel_layout);
|
||||
av_log(NULL, AV_LOG_INFO, "Output: srate:%dHz fmt:%s chlayout:%s\n",
|
||||
(int)outlink->sample_rate,
|
||||
(char *)av_x_if_null(av_get_sample_fmt_name(outlink->format), "?"),
|
||||
args);
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void print_frame(const AVFrame *frame)
|
||||
{
|
||||
const int n = frame->nb_samples * av_get_channel_layout_nb_channels(av_frame_get_channel_layout(frame));
|
||||
const uint16_t *p = (uint16_t*)frame->data[0];
|
||||
const uint16_t *p_end = p + n;
|
||||
|
||||
while (p < p_end) {
|
||||
fputc(*p & 0xff, stdout);
|
||||
fputc(*p>>8 & 0xff, stdout);
|
||||
p++;
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet0, packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
int got_frame;
|
||||
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
packet0.data = NULL;
|
||||
packet.data = NULL;
|
||||
while (1) {
|
||||
if (!packet0.data) {
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
packet0 = packet;
|
||||
}
|
||||
|
||||
if (packet.stream_index == audio_stream_index) {
|
||||
got_frame = 0;
|
||||
ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
|
||||
continue;
|
||||
}
|
||||
packet.size -= ret;
|
||||
packet.data += ret;
|
||||
|
||||
if (got_frame) {
|
||||
/* push the audio data from decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered audio from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
print_frame(filt_frame);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
}
|
||||
|
||||
if (packet.size <= 0)
|
||||
av_free_packet(&packet0);
|
||||
} else {
|
||||
/* discard non-wanted packets */
|
||||
av_free_packet(&packet0);
|
||||
}
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
avcodec_close(dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
@@ -0,0 +1,281 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for decoding and filtering
|
||||
* @example filtering_video.c
|
||||
*/
|
||||
|
||||
#define _XOPEN_SOURCE 600 /* for usleep */
|
||||
#include <unistd.h>
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
|
||||
const char *filter_descr = "scale=78:24,transpose=cclock";
|
||||
/* other way:
|
||||
scale=78:24 [scl]; [scl] transpose=cclock // assumes "[in]" and "[out]" to be input output pads respectively
|
||||
*/
|
||||
|
||||
static AVFormatContext *fmt_ctx;
|
||||
static AVCodecContext *dec_ctx;
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
static int video_stream_index = -1;
|
||||
static int64_t last_pts = AV_NOPTS_VALUE;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
AVCodec *dec;
|
||||
|
||||
if ((ret = avformat_open_input(&fmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(fmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* select the video stream */
|
||||
ret = av_find_best_stream(fmt_ctx, AVMEDIA_TYPE_VIDEO, -1, -1, &dec, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find a video stream in the input file\n");
|
||||
return ret;
|
||||
}
|
||||
video_stream_index = ret;
|
||||
dec_ctx = fmt_ctx->streams[video_stream_index]->codec;
|
||||
av_opt_set_int(dec_ctx, "refcounted_frames", 1, 0);
|
||||
|
||||
/* init the video decoder */
|
||||
if ((ret = avcodec_open2(dec_ctx, dec, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video decoder\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filters(const char *filters_descr)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
AVFilter *buffersrc = avfilter_get_by_name("buffer");
|
||||
AVFilter *buffersink = avfilter_get_by_name("buffersink");
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
|
||||
enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
|
||||
|
||||
filter_graph = avfilter_graph_alloc();
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer video source: the decoded frames from the decoder will be inserted here. */
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
time_base.num, time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer video sink: to terminate the filter chain. */
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
|
||||
AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the endpoints for the filter graph. The filter_graph will
|
||||
* be linked to the graph described by filters_descr.
|
||||
*/
|
||||
|
||||
/*
|
||||
* The buffer source output must be connected to the input pad of
|
||||
* the first filter described by filters_descr; since the first
|
||||
* filter input label is not specified, it is set to "in" by
|
||||
* default.
|
||||
*/
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
/*
|
||||
* The buffer sink input must be connected to the output pad of
|
||||
* the last filter described by filters_descr; since the last
|
||||
* filter output label is not specified, it is set to "out" by
|
||||
* default.
|
||||
*/
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static void display_frame(const AVFrame *frame, AVRational time_base)
|
||||
{
|
||||
int x, y;
|
||||
uint8_t *p0, *p;
|
||||
int64_t delay;
|
||||
|
||||
if (frame->pts != AV_NOPTS_VALUE) {
|
||||
if (last_pts != AV_NOPTS_VALUE) {
|
||||
/* sleep roughly the right amount of time;
|
||||
* usleep is in microseconds, just like AV_TIME_BASE. */
|
||||
delay = av_rescale_q(frame->pts - last_pts,
|
||||
time_base, AV_TIME_BASE_Q);
|
||||
if (delay > 0 && delay < 1000000)
|
||||
usleep(delay);
|
||||
}
|
||||
last_pts = frame->pts;
|
||||
}
|
||||
|
||||
/* Trivial ASCII grayscale display. */
|
||||
p0 = frame->data[0];
|
||||
puts("\033c");
|
||||
for (y = 0; y < frame->height; y++) {
|
||||
p = p0;
|
||||
for (x = 0; x < frame->width; x++)
|
||||
putchar(" .-+#"[*(p++) / 52]);
|
||||
putchar('\n');
|
||||
p0 += frame->linesize[0];
|
||||
}
|
||||
fflush(stdout);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet;
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
AVFrame *filt_frame = av_frame_alloc();
|
||||
int got_frame;
|
||||
|
||||
if (!frame || !filt_frame) {
|
||||
perror("Could not allocate frame");
|
||||
exit(1);
|
||||
}
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s file\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters(filter_descr)) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
|
||||
if (packet.stream_index == video_stream_index) {
|
||||
got_frame = 0;
|
||||
ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
frame->pts = av_frame_get_best_effort_timestamp(frame);
|
||||
|
||||
/* push the decoded frame into the filtergraph */
|
||||
if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
break;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
break;
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
|
||||
av_frame_unref(filt_frame);
|
||||
}
|
||||
av_frame_unref(frame);
|
||||
}
|
||||
}
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
end:
|
||||
avfilter_graph_free(&filter_graph);
|
||||
avcodec_close(dec_ctx);
|
||||
avformat_close_input(&fmt_ctx);
|
||||
av_frame_free(&frame);
|
||||
av_frame_free(&filt_frame);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
exit(0);
|
||||
}
|
@@ -0,0 +1,155 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Stephan Holljes
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat multi-client network API usage example.
|
||||
*
|
||||
* @example http_multiclient.c
|
||||
* This example will serve a file without decoding or demuxing it over http.
|
||||
* Multiple clients can connect and will receive the same file.
|
||||
*/
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <unistd.h>
|
||||
|
||||
void process_client(AVIOContext *client, const char *in_uri)
|
||||
{
|
||||
AVIOContext *input = NULL;
|
||||
uint8_t buf[1024];
|
||||
int ret, n, reply_code;
|
||||
char *resource = NULL;
|
||||
while ((ret = avio_handshake(client)) > 0) {
|
||||
av_opt_get(client, "resource", AV_OPT_SEARCH_CHILDREN, &resource);
|
||||
// check for strlen(resource) is necessary, because av_opt_get()
|
||||
// may return empty string.
|
||||
if (resource && strlen(resource))
|
||||
break;
|
||||
}
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
av_log(client, AV_LOG_TRACE, "resource=%p\n", resource);
|
||||
if (resource && resource[0] == '/' && !strcmp((resource + 1), in_uri)) {
|
||||
reply_code = 200;
|
||||
} else {
|
||||
reply_code = AVERROR_HTTP_NOT_FOUND;
|
||||
}
|
||||
if ((ret = av_opt_set_int(client, "reply_code", reply_code, AV_OPT_SEARCH_CHILDREN)) < 0) {
|
||||
av_log(client, AV_LOG_ERROR, "Failed to set reply_code: %s.\n", av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
av_log(client, AV_LOG_TRACE, "Set reply code to %d\n", reply_code);
|
||||
|
||||
while ((ret = avio_handshake(client)) > 0);
|
||||
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
|
||||
fprintf(stderr, "Handshake performed.\n");
|
||||
if (reply_code != 200)
|
||||
goto end;
|
||||
fprintf(stderr, "Opening input file.\n");
|
||||
if ((ret = avio_open2(&input, in_uri, AVIO_FLAG_READ, NULL, NULL)) < 0) {
|
||||
av_log(input, AV_LOG_ERROR, "Failed to open input: %s: %s.\n", in_uri,
|
||||
av_err2str(ret));
|
||||
goto end;
|
||||
}
|
||||
for(;;) {
|
||||
n = avio_read(input, buf, sizeof(buf));
|
||||
if (n < 0) {
|
||||
if (n == AVERROR_EOF)
|
||||
break;
|
||||
av_log(input, AV_LOG_ERROR, "Error reading from input: %s.\n",
|
||||
av_err2str(n));
|
||||
break;
|
||||
}
|
||||
avio_write(client, buf, n);
|
||||
avio_flush(client);
|
||||
}
|
||||
end:
|
||||
fprintf(stderr, "Flushing client\n");
|
||||
avio_flush(client);
|
||||
fprintf(stderr, "Closing client\n");
|
||||
avio_close(client);
|
||||
fprintf(stderr, "Closing input\n");
|
||||
avio_close(input);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
av_log_set_level(AV_LOG_TRACE);
|
||||
AVDictionary *options = NULL;
|
||||
AVIOContext *client = NULL, *server = NULL;
|
||||
const char *in_uri, *out_uri;
|
||||
int ret, pid;
|
||||
if (argc < 3) {
|
||||
printf("usage: %s input http://hostname[:port]\n"
|
||||
"API example program to serve http to multiple clients.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
in_uri = argv[1];
|
||||
out_uri = argv[2];
|
||||
|
||||
av_register_all();
|
||||
avformat_network_init();
|
||||
|
||||
if ((ret = av_dict_set(&options, "listen", "2", 0)) < 0) {
|
||||
fprintf(stderr, "Failed to set listen mode for server: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
if ((ret = avio_open2(&server, out_uri, AVIO_FLAG_WRITE, NULL, &options)) < 0) {
|
||||
fprintf(stderr, "Failed to open server: %s\n", av_err2str(ret));
|
||||
return ret;
|
||||
}
|
||||
fprintf(stderr, "Entering main loop.\n");
|
||||
for(;;) {
|
||||
if ((ret = avio_accept(server, &client)) < 0)
|
||||
goto end;
|
||||
fprintf(stderr, "Accepted client, forking process.\n");
|
||||
// XXX: Since we don't reap our children and don't ignore signals
|
||||
// this produces zombie processes.
|
||||
pid = fork();
|
||||
if (pid < 0) {
|
||||
perror("Fork failed");
|
||||
ret = AVERROR(errno);
|
||||
goto end;
|
||||
}
|
||||
if (pid == 0) {
|
||||
fprintf(stderr, "In child.\n");
|
||||
process_client(client, in_uri);
|
||||
avio_close(server);
|
||||
exit(0);
|
||||
}
|
||||
if (pid > 0)
|
||||
avio_close(client);
|
||||
}
|
||||
end:
|
||||
avio_close(server);
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Some errors occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
return 0;
|
||||
}
|
@@ -0,0 +1,56 @@
|
||||
/*
|
||||
* Copyright (c) 2011 Reinhard Tartler
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Shows how the metadata API can be used in application programs.
|
||||
* @example metadata.c
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavutil/dict.h>
|
||||
|
||||
int main (int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *fmt_ctx = NULL;
|
||||
AVDictionaryEntry *tag = NULL;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
printf("usage: %s <input_file>\n"
|
||||
"example program to demonstrate the use of the libavformat metadata API.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
if ((ret = avformat_open_input(&fmt_ctx, argv[1], NULL, NULL)))
|
||||
return ret;
|
||||
|
||||
while ((tag = av_dict_get(fmt_ctx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX)))
|
||||
printf("%s=%s\n", tag->key, tag->value);
|
||||
|
||||
avformat_close_input(&fmt_ctx);
|
||||
return 0;
|
||||
}
|
670
contrib/sdk/sources/ffmpeg/ffmpeg-2.8/doc/examples/muxing.c
Normal file
670
contrib/sdk/sources/ffmpeg/ffmpeg-2.8/doc/examples/muxing.c
Normal file
@@ -0,0 +1,670 @@
|
||||
/*
|
||||
* Copyright (c) 2003 Fabrice Bellard
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat API example.
|
||||
*
|
||||
* Output a media file in any supported libavformat format. The default
|
||||
* codecs are used.
|
||||
* @example muxing.c
|
||||
*/
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
#include <math.h>
|
||||
|
||||
#include <libavutil/avassert.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/mathematics.h>
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
#define STREAM_DURATION 10.0
|
||||
#define STREAM_FRAME_RATE 25 /* 25 images/s */
|
||||
#define STREAM_PIX_FMT AV_PIX_FMT_YUV420P /* default pix_fmt */
|
||||
|
||||
#define SCALE_FLAGS SWS_BICUBIC
|
||||
|
||||
// a wrapper around a single output AVStream
|
||||
typedef struct OutputStream {
|
||||
AVStream *st;
|
||||
|
||||
/* pts of the next frame that will be generated */
|
||||
int64_t next_pts;
|
||||
int samples_count;
|
||||
|
||||
AVFrame *frame;
|
||||
AVFrame *tmp_frame;
|
||||
|
||||
float t, tincr, tincr2;
|
||||
|
||||
struct SwsContext *sws_ctx;
|
||||
struct SwrContext *swr_ctx;
|
||||
} OutputStream;
|
||||
|
||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt)
|
||||
{
|
||||
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
|
||||
|
||||
printf("pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
|
||||
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
|
||||
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
|
||||
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
|
||||
{
|
||||
/* rescale output packet timestamp values from codec to stream timebase */
|
||||
av_packet_rescale_ts(pkt, *time_base, st->time_base);
|
||||
pkt->stream_index = st->index;
|
||||
|
||||
/* Write the compressed frame to the media file. */
|
||||
log_packet(fmt_ctx, pkt);
|
||||
return av_interleaved_write_frame(fmt_ctx, pkt);
|
||||
}
|
||||
|
||||
/* Add an output stream. */
|
||||
static void add_stream(OutputStream *ost, AVFormatContext *oc,
|
||||
AVCodec **codec,
|
||||
enum AVCodecID codec_id)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int i;
|
||||
|
||||
/* find the encoder */
|
||||
*codec = avcodec_find_encoder(codec_id);
|
||||
if (!(*codec)) {
|
||||
fprintf(stderr, "Could not find encoder for '%s'\n",
|
||||
avcodec_get_name(codec_id));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
ost->st = avformat_new_stream(oc, *codec);
|
||||
if (!ost->st) {
|
||||
fprintf(stderr, "Could not allocate stream\n");
|
||||
exit(1);
|
||||
}
|
||||
ost->st->id = oc->nb_streams-1;
|
||||
c = ost->st->codec;
|
||||
|
||||
switch ((*codec)->type) {
|
||||
case AVMEDIA_TYPE_AUDIO:
|
||||
c->sample_fmt = (*codec)->sample_fmts ?
|
||||
(*codec)->sample_fmts[0] : AV_SAMPLE_FMT_FLTP;
|
||||
c->bit_rate = 64000;
|
||||
c->sample_rate = 44100;
|
||||
if ((*codec)->supported_samplerates) {
|
||||
c->sample_rate = (*codec)->supported_samplerates[0];
|
||||
for (i = 0; (*codec)->supported_samplerates[i]; i++) {
|
||||
if ((*codec)->supported_samplerates[i] == 44100)
|
||||
c->sample_rate = 44100;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
if ((*codec)->channel_layouts) {
|
||||
c->channel_layout = (*codec)->channel_layouts[0];
|
||||
for (i = 0; (*codec)->channel_layouts[i]; i++) {
|
||||
if ((*codec)->channel_layouts[i] == AV_CH_LAYOUT_STEREO)
|
||||
c->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||
}
|
||||
}
|
||||
c->channels = av_get_channel_layout_nb_channels(c->channel_layout);
|
||||
ost->st->time_base = (AVRational){ 1, c->sample_rate };
|
||||
break;
|
||||
|
||||
case AVMEDIA_TYPE_VIDEO:
|
||||
c->codec_id = codec_id;
|
||||
|
||||
c->bit_rate = 400000;
|
||||
/* Resolution must be a multiple of two. */
|
||||
c->width = 352;
|
||||
c->height = 288;
|
||||
/* timebase: This is the fundamental unit of time (in seconds) in terms
|
||||
* of which frame timestamps are represented. For fixed-fps content,
|
||||
* timebase should be 1/framerate and timestamp increments should be
|
||||
* identical to 1. */
|
||||
ost->st->time_base = (AVRational){ 1, STREAM_FRAME_RATE };
|
||||
c->time_base = ost->st->time_base;
|
||||
|
||||
c->gop_size = 12; /* emit one intra frame every twelve frames at most */
|
||||
c->pix_fmt = STREAM_PIX_FMT;
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
|
||||
/* just for testing, we also add B frames */
|
||||
c->max_b_frames = 2;
|
||||
}
|
||||
if (c->codec_id == AV_CODEC_ID_MPEG1VIDEO) {
|
||||
/* Needed to avoid using macroblocks in which some coeffs overflow.
|
||||
* This does not happen with normal video, it just happens here as
|
||||
* the motion of the chroma plane does not match the luma plane. */
|
||||
c->mb_decision = 2;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
/* Some formats want stream headers to be separate. */
|
||||
if (oc->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
c->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* audio output */
|
||||
|
||||
static AVFrame *alloc_audio_frame(enum AVSampleFormat sample_fmt,
|
||||
uint64_t channel_layout,
|
||||
int sample_rate, int nb_samples)
|
||||
{
|
||||
AVFrame *frame = av_frame_alloc();
|
||||
int ret;
|
||||
|
||||
if (!frame) {
|
||||
fprintf(stderr, "Error allocating an audio frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
frame->format = sample_fmt;
|
||||
frame->channel_layout = channel_layout;
|
||||
frame->sample_rate = sample_rate;
|
||||
frame->nb_samples = nb_samples;
|
||||
|
||||
if (nb_samples) {
|
||||
ret = av_frame_get_buffer(frame, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error allocating an audio buffer\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
int nb_samples;
|
||||
int ret;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
c = ost->st->codec;
|
||||
|
||||
/* open it */
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* init signal generator */
|
||||
ost->t = 0;
|
||||
ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
|
||||
/* increment frequency by 110 Hz per second */
|
||||
ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
|
||||
|
||||
if (c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)
|
||||
nb_samples = 10000;
|
||||
else
|
||||
nb_samples = c->frame_size;
|
||||
|
||||
ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
|
||||
c->sample_rate, nb_samples);
|
||||
|
||||
/* create resampler context */
|
||||
ost->swr_ctx = swr_alloc();
|
||||
if (!ost->swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int (ost->swr_ctx, "in_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "in_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt", AV_SAMPLE_FMT_S16, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_channel_count", c->channels, 0);
|
||||
av_opt_set_int (ost->swr_ctx, "out_sample_rate", c->sample_rate, 0);
|
||||
av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt", c->sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(ost->swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a 16 bit dummy audio frame of 'frame_size' samples and
|
||||
* 'nb_channels' channels. */
|
||||
static AVFrame *get_audio_frame(OutputStream *ost)
|
||||
{
|
||||
AVFrame *frame = ost->tmp_frame;
|
||||
int j, i, v;
|
||||
int16_t *q = (int16_t*)frame->data[0];
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
for (j = 0; j <frame->nb_samples; j++) {
|
||||
v = (int)(sin(ost->t) * 10000);
|
||||
for (i = 0; i < ost->st->codec->channels; i++)
|
||||
*q++ = v;
|
||||
ost->t += ost->tincr;
|
||||
ost->tincr += ost->tincr2;
|
||||
}
|
||||
|
||||
frame->pts = ost->next_pts;
|
||||
ost->next_pts += frame->nb_samples;
|
||||
|
||||
return frame;
|
||||
}
|
||||
|
||||
/*
|
||||
* encode one audio frame and send it to the muxer
|
||||
* return 1 when encoding is finished, 0 otherwise
|
||||
*/
|
||||
static int write_audio_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c;
|
||||
AVPacket pkt = { 0 }; // data and size must be 0;
|
||||
AVFrame *frame;
|
||||
int ret;
|
||||
int got_packet;
|
||||
int dst_nb_samples;
|
||||
|
||||
av_init_packet(&pkt);
|
||||
c = ost->st->codec;
|
||||
|
||||
frame = get_audio_frame(ost);
|
||||
|
||||
if (frame) {
|
||||
/* convert samples from native format to destination codec format, using the resampler */
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(ost->swr_ctx, c->sample_rate) + frame->nb_samples,
|
||||
c->sample_rate, c->sample_rate, AV_ROUND_UP);
|
||||
av_assert0(dst_nb_samples == frame->nb_samples);
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(ost->frame);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(ost->swr_ctx,
|
||||
ost->frame->data, dst_nb_samples,
|
||||
(const uint8_t **)frame->data, frame->nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
exit(1);
|
||||
}
|
||||
frame = ost->frame;
|
||||
|
||||
frame->pts = av_rescale_q(ost->samples_count, (AVRational){1, c->sample_rate}, c->time_base);
|
||||
ost->samples_count += dst_nb_samples;
|
||||
}
|
||||
|
||||
ret = avcodec_encode_audio2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing audio frame: %s\n",
|
||||
av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* video output */
|
||||
|
||||
static AVFrame *alloc_picture(enum AVPixelFormat pix_fmt, int width, int height)
|
||||
{
|
||||
AVFrame *picture;
|
||||
int ret;
|
||||
|
||||
picture = av_frame_alloc();
|
||||
if (!picture)
|
||||
return NULL;
|
||||
|
||||
picture->format = pix_fmt;
|
||||
picture->width = width;
|
||||
picture->height = height;
|
||||
|
||||
/* allocate the buffers for the frame data */
|
||||
ret = av_frame_get_buffer(picture, 32);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate frame data.\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return picture;
|
||||
}
|
||||
|
||||
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c = ost->st->codec;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
av_dict_copy(&opt, opt_arg, 0);
|
||||
|
||||
/* open the codec */
|
||||
ret = avcodec_open2(c, codec, &opt);
|
||||
av_dict_free(&opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* allocate and init a re-usable frame */
|
||||
ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
|
||||
if (!ost->frame) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* If the output format is not YUV420P, then a temporary YUV420P
|
||||
* picture is needed too. It is then converted to the required
|
||||
* output format. */
|
||||
ost->tmp_frame = NULL;
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
|
||||
if (!ost->tmp_frame) {
|
||||
fprintf(stderr, "Could not allocate temporary picture\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/* Prepare a dummy image. */
|
||||
static void fill_yuv_image(AVFrame *pict, int frame_index,
|
||||
int width, int height)
|
||||
{
|
||||
int x, y, i, ret;
|
||||
|
||||
/* when we pass a frame to the encoder, it may keep a reference to it
|
||||
* internally;
|
||||
* make sure we do not overwrite it here
|
||||
*/
|
||||
ret = av_frame_make_writable(pict);
|
||||
if (ret < 0)
|
||||
exit(1);
|
||||
|
||||
i = frame_index;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
pict->data[0][y * pict->linesize[0] + x] = x + y + i * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
pict->data[1][y * pict->linesize[1] + x] = 128 + y + i * 2;
|
||||
pict->data[2][y * pict->linesize[2] + x] = 64 + x + i * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static AVFrame *get_video_frame(OutputStream *ost)
|
||||
{
|
||||
AVCodecContext *c = ost->st->codec;
|
||||
|
||||
/* check if we want to generate more frames */
|
||||
if (av_compare_ts(ost->next_pts, ost->st->codec->time_base,
|
||||
STREAM_DURATION, (AVRational){ 1, 1 }) >= 0)
|
||||
return NULL;
|
||||
|
||||
if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
|
||||
/* as we only generate a YUV420P picture, we must convert it
|
||||
* to the codec pixel format if needed */
|
||||
if (!ost->sws_ctx) {
|
||||
ost->sws_ctx = sws_getContext(c->width, c->height,
|
||||
AV_PIX_FMT_YUV420P,
|
||||
c->width, c->height,
|
||||
c->pix_fmt,
|
||||
SCALE_FLAGS, NULL, NULL, NULL);
|
||||
if (!ost->sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Could not initialize the conversion context\n");
|
||||
exit(1);
|
||||
}
|
||||
}
|
||||
fill_yuv_image(ost->tmp_frame, ost->next_pts, c->width, c->height);
|
||||
sws_scale(ost->sws_ctx,
|
||||
(const uint8_t * const *)ost->tmp_frame->data, ost->tmp_frame->linesize,
|
||||
0, c->height, ost->frame->data, ost->frame->linesize);
|
||||
} else {
|
||||
fill_yuv_image(ost->frame, ost->next_pts, c->width, c->height);
|
||||
}
|
||||
|
||||
ost->frame->pts = ost->next_pts++;
|
||||
|
||||
return ost->frame;
|
||||
}
|
||||
|
||||
/*
|
||||
* encode one video frame and send it to the muxer
|
||||
* return 1 when encoding is finished, 0 otherwise
|
||||
*/
|
||||
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
int ret;
|
||||
AVCodecContext *c;
|
||||
AVFrame *frame;
|
||||
int got_packet = 0;
|
||||
|
||||
c = ost->st->codec;
|
||||
|
||||
frame = get_video_frame(ost);
|
||||
|
||||
if (oc->oformat->flags & AVFMT_RAWPICTURE) {
|
||||
/* a hack to avoid data copy with some raw video muxers */
|
||||
AVPacket pkt;
|
||||
av_init_packet(&pkt);
|
||||
|
||||
if (!frame)
|
||||
return 1;
|
||||
|
||||
pkt.flags |= AV_PKT_FLAG_KEY;
|
||||
pkt.stream_index = ost->st->index;
|
||||
pkt.data = (uint8_t *)frame;
|
||||
pkt.size = sizeof(AVPicture);
|
||||
|
||||
pkt.pts = pkt.dts = frame->pts;
|
||||
av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
|
||||
|
||||
ret = av_interleaved_write_frame(oc, &pkt);
|
||||
} else {
|
||||
AVPacket pkt = { 0 };
|
||||
av_init_packet(&pkt);
|
||||
|
||||
/* encode the image */
|
||||
ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
if (got_packet) {
|
||||
ret = write_frame(oc, &c->time_base, ost->st, &pkt);
|
||||
} else {
|
||||
ret = 0;
|
||||
}
|
||||
}
|
||||
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
}
|
||||
|
||||
return (frame || got_packet) ? 0 : 1;
|
||||
}
|
||||
|
||||
static void close_stream(AVFormatContext *oc, OutputStream *ost)
|
||||
{
|
||||
avcodec_close(ost->st->codec);
|
||||
av_frame_free(&ost->frame);
|
||||
av_frame_free(&ost->tmp_frame);
|
||||
sws_freeContext(ost->sws_ctx);
|
||||
swr_free(&ost->swr_ctx);
|
||||
}
|
||||
|
||||
/**************************************************************/
|
||||
/* media file output */
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
OutputStream video_st = { 0 }, audio_st = { 0 };
|
||||
const char *filename;
|
||||
AVOutputFormat *fmt;
|
||||
AVFormatContext *oc;
|
||||
AVCodec *audio_codec, *video_codec;
|
||||
int ret;
|
||||
int have_video = 0, have_audio = 0;
|
||||
int encode_video = 0, encode_audio = 0;
|
||||
AVDictionary *opt = NULL;
|
||||
|
||||
/* Initialize libavcodec, and register all codecs and formats. */
|
||||
av_register_all();
|
||||
|
||||
if (argc < 2) {
|
||||
printf("usage: %s output_file\n"
|
||||
"API example program to output a media file with libavformat.\n"
|
||||
"This program generates a synthetic audio and video stream, encodes and\n"
|
||||
"muxes them into a file named output_file.\n"
|
||||
"The output format is automatically guessed according to the file extension.\n"
|
||||
"Raw images can also be output by using '%%d' in the filename.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
filename = argv[1];
|
||||
if (argc > 3 && !strcmp(argv[2], "-flags")) {
|
||||
av_dict_set(&opt, argv[2]+1, argv[3], 0);
|
||||
}
|
||||
|
||||
/* allocate the output media context */
|
||||
avformat_alloc_output_context2(&oc, NULL, NULL, filename);
|
||||
if (!oc) {
|
||||
printf("Could not deduce output format from file extension: using MPEG.\n");
|
||||
avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
|
||||
}
|
||||
if (!oc)
|
||||
return 1;
|
||||
|
||||
fmt = oc->oformat;
|
||||
|
||||
/* Add the audio and video streams using the default format codecs
|
||||
* and initialize the codecs. */
|
||||
if (fmt->video_codec != AV_CODEC_ID_NONE) {
|
||||
add_stream(&video_st, oc, &video_codec, fmt->video_codec);
|
||||
have_video = 1;
|
||||
encode_video = 1;
|
||||
}
|
||||
if (fmt->audio_codec != AV_CODEC_ID_NONE) {
|
||||
add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
|
||||
have_audio = 1;
|
||||
encode_audio = 1;
|
||||
}
|
||||
|
||||
/* Now that all the parameters are set, we can open the audio and
|
||||
* video codecs and allocate the necessary encode buffers. */
|
||||
if (have_video)
|
||||
open_video(oc, video_codec, &video_st, opt);
|
||||
|
||||
if (have_audio)
|
||||
open_audio(oc, audio_codec, &audio_st, opt);
|
||||
|
||||
av_dump_format(oc, 0, filename, 1);
|
||||
|
||||
/* open the output file, if needed */
|
||||
if (!(fmt->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open '%s': %s\n", filename,
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the stream header, if any. */
|
||||
ret = avformat_write_header(oc, &opt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file: %s\n",
|
||||
av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
while (encode_video || encode_audio) {
|
||||
/* select the stream to encode */
|
||||
if (encode_video &&
|
||||
(!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
|
||||
audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
|
||||
encode_video = !write_video_frame(oc, &video_st);
|
||||
} else {
|
||||
encode_audio = !write_audio_frame(oc, &audio_st);
|
||||
}
|
||||
}
|
||||
|
||||
/* Write the trailer, if any. The trailer must be written before you
|
||||
* close the CodecContexts open when you wrote the header; otherwise
|
||||
* av_write_trailer() may try to use memory that was freed on
|
||||
* av_codec_close(). */
|
||||
av_write_trailer(oc);
|
||||
|
||||
/* Close each codec. */
|
||||
if (have_video)
|
||||
close_stream(oc, &video_st);
|
||||
if (have_audio)
|
||||
close_stream(oc, &audio_st);
|
||||
|
||||
if (!(fmt->flags & AVFMT_NOFILE))
|
||||
/* Close the output file. */
|
||||
avio_closep(&oc->pb);
|
||||
|
||||
/* free the stream */
|
||||
avformat_free_context(oc);
|
||||
|
||||
return 0;
|
||||
}
|
@@ -0,0 +1,12 @@
|
||||
prefix=
|
||||
exec_prefix=
|
||||
libdir=${pcfiledir}/../../../libavcodec
|
||||
includedir=${pcfiledir}/../../..
|
||||
|
||||
Name: libavcodec
|
||||
Description: FFmpeg codec library
|
||||
Version: 56.60.100
|
||||
Requires: libswresample >= 1.2.101, libavutil >= 54.31.100
|
||||
Conflicts:
|
||||
Libs: -L${libdir} -Wl,-rpath,${libdir} -lavcodec
|
||||
Cflags: -I${includedir}
|
@@ -0,0 +1,12 @@
|
||||
prefix=
|
||||
exec_prefix=
|
||||
libdir=${pcfiledir}/../../../libavdevice
|
||||
includedir=${pcfiledir}/../../..
|
||||
|
||||
Name: libavdevice
|
||||
Description: FFmpeg device handling library
|
||||
Version: 56.4.100
|
||||
Requires: libavformat >= 56.40.101, libavcodec >= 56.60.100, libswresample >= 1.2.101, libavutil >= 54.31.100
|
||||
Conflicts:
|
||||
Libs: -L${libdir} -Wl,-rpath,${libdir} -lavdevice
|
||||
Cflags: -I${includedir}
|
@@ -0,0 +1,12 @@
|
||||
prefix=
|
||||
exec_prefix=
|
||||
libdir=${pcfiledir}/../../../libavformat
|
||||
includedir=${pcfiledir}/../../..
|
||||
|
||||
Name: libavformat
|
||||
Description: FFmpeg container format library
|
||||
Version: 56.40.101
|
||||
Requires: libavcodec >= 56.60.100, libswresample >= 1.2.101, libavutil >= 54.31.100
|
||||
Conflicts:
|
||||
Libs: -L${libdir} -Wl,-rpath,${libdir} -lavformat
|
||||
Cflags: -I${includedir}
|
@@ -0,0 +1,12 @@
|
||||
prefix=
|
||||
exec_prefix=
|
||||
libdir=${pcfiledir}/../../../libavutil
|
||||
includedir=${pcfiledir}/../../..
|
||||
|
||||
Name: libavutil
|
||||
Description: FFmpeg utility library
|
||||
Version: 54.31.100
|
||||
Requires:
|
||||
Conflicts:
|
||||
Libs: -L${libdir} -Wl,-rpath,${libdir} -lavutil
|
||||
Cflags: -I${includedir}
|
@@ -0,0 +1,12 @@
|
||||
prefix=
|
||||
exec_prefix=
|
||||
libdir=${pcfiledir}/../../../libswresample
|
||||
includedir=${pcfiledir}/../../..
|
||||
|
||||
Name: libswresample
|
||||
Description: FFmpeg audio resampling library
|
||||
Version: 1.2.101
|
||||
Requires: libavutil >= 54.31.100
|
||||
Conflicts:
|
||||
Libs: -L${libdir} -Wl,-rpath,${libdir} -lswresample
|
||||
Cflags: -I${includedir}
|
@@ -0,0 +1,12 @@
|
||||
prefix=
|
||||
exec_prefix=
|
||||
libdir=${pcfiledir}/../../../libswscale
|
||||
includedir=${pcfiledir}/../../..
|
||||
|
||||
Name: libswscale
|
||||
Description: FFmpeg image rescaling library
|
||||
Version: 3.1.101
|
||||
Requires: libavutil >= 54.31.100
|
||||
Conflicts:
|
||||
Libs: -L${libdir} -Wl,-rpath,${libdir} -lswscale
|
||||
Cflags: -I${includedir}
|
484
contrib/sdk/sources/ffmpeg/ffmpeg-2.8/doc/examples/qsvdec.c
Normal file
484
contrib/sdk/sources/ffmpeg/ffmpeg-2.8/doc/examples/qsvdec.c
Normal file
@@ -0,0 +1,484 @@
|
||||
/*
|
||||
* Copyright (c) 2015 Anton Khirnov
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* Intel QSV-accelerated H.264 decoding example.
|
||||
*
|
||||
* @example qsvdec.c
|
||||
* This example shows how to do QSV-accelerated H.264 decoding with output
|
||||
* frames in the VA-API video surfaces.
|
||||
*/
|
||||
|
||||
#include "config.h"
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include <mfx/mfxvideo.h>
|
||||
|
||||
#include <va/va.h>
|
||||
#include <va/va_x11.h>
|
||||
#include <X11/Xlib.h>
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/avio.h"
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
#include "libavcodec/qsv.h"
|
||||
|
||||
#include "libavutil/error.h"
|
||||
#include "libavutil/mem.h"
|
||||
|
||||
typedef struct DecodeContext {
|
||||
mfxSession mfx_session;
|
||||
VADisplay va_dpy;
|
||||
|
||||
VASurfaceID *surfaces;
|
||||
mfxMemId *surface_ids;
|
||||
int *surface_used;
|
||||
int nb_surfaces;
|
||||
|
||||
mfxFrameInfo frame_info;
|
||||
} DecodeContext;
|
||||
|
||||
static mfxStatus frame_alloc(mfxHDL pthis, mfxFrameAllocRequest *req,
|
||||
mfxFrameAllocResponse *resp)
|
||||
{
|
||||
DecodeContext *decode = pthis;
|
||||
int err, i;
|
||||
|
||||
if (decode->surfaces) {
|
||||
fprintf(stderr, "Multiple allocation requests.\n");
|
||||
return MFX_ERR_MEMORY_ALLOC;
|
||||
}
|
||||
if (!(req->Type & MFX_MEMTYPE_VIDEO_MEMORY_DECODER_TARGET)) {
|
||||
fprintf(stderr, "Unsupported surface type: %d\n", req->Type);
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
if (req->Info.BitDepthLuma != 8 || req->Info.BitDepthChroma != 8 ||
|
||||
req->Info.Shift || req->Info.FourCC != MFX_FOURCC_NV12 ||
|
||||
req->Info.ChromaFormat != MFX_CHROMAFORMAT_YUV420) {
|
||||
fprintf(stderr, "Unsupported surface properties.\n");
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
decode->surfaces = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surfaces));
|
||||
decode->surface_ids = av_malloc_array (req->NumFrameSuggested, sizeof(*decode->surface_ids));
|
||||
decode->surface_used = av_mallocz_array(req->NumFrameSuggested, sizeof(*decode->surface_used));
|
||||
if (!decode->surfaces || !decode->surface_ids || !decode->surface_used)
|
||||
goto fail;
|
||||
|
||||
err = vaCreateSurfaces(decode->va_dpy, VA_RT_FORMAT_YUV420,
|
||||
req->Info.Width, req->Info.Height,
|
||||
decode->surfaces, req->NumFrameSuggested,
|
||||
NULL, 0);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error allocating VA surfaces\n");
|
||||
goto fail;
|
||||
}
|
||||
decode->nb_surfaces = req->NumFrameSuggested;
|
||||
|
||||
for (i = 0; i < decode->nb_surfaces; i++)
|
||||
decode->surface_ids[i] = &decode->surfaces[i];
|
||||
|
||||
resp->mids = decode->surface_ids;
|
||||
resp->NumFrameActual = decode->nb_surfaces;
|
||||
|
||||
decode->frame_info = req->Info;
|
||||
|
||||
return MFX_ERR_NONE;
|
||||
fail:
|
||||
av_freep(&decode->surfaces);
|
||||
av_freep(&decode->surface_ids);
|
||||
av_freep(&decode->surface_used);
|
||||
|
||||
return MFX_ERR_MEMORY_ALLOC;
|
||||
}
|
||||
|
||||
static mfxStatus frame_free(mfxHDL pthis, mfxFrameAllocResponse *resp)
|
||||
{
|
||||
DecodeContext *decode = pthis;
|
||||
|
||||
if (decode->surfaces)
|
||||
vaDestroySurfaces(decode->va_dpy, decode->surfaces, decode->nb_surfaces);
|
||||
av_freep(&decode->surfaces);
|
||||
av_freep(&decode->surface_ids);
|
||||
av_freep(&decode->surface_used);
|
||||
decode->nb_surfaces = 0;
|
||||
|
||||
return MFX_ERR_NONE;
|
||||
}
|
||||
|
||||
static mfxStatus frame_lock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
|
||||
{
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static mfxStatus frame_unlock(mfxHDL pthis, mfxMemId mid, mfxFrameData *ptr)
|
||||
{
|
||||
return MFX_ERR_UNSUPPORTED;
|
||||
}
|
||||
|
||||
static mfxStatus frame_get_hdl(mfxHDL pthis, mfxMemId mid, mfxHDL *hdl)
|
||||
{
|
||||
*hdl = mid;
|
||||
return MFX_ERR_NONE;
|
||||
}
|
||||
|
||||
static void free_buffer(void *opaque, uint8_t *data)
|
||||
{
|
||||
int *used = opaque;
|
||||
*used = 0;
|
||||
av_freep(&data);
|
||||
}
|
||||
|
||||
static int get_buffer(AVCodecContext *avctx, AVFrame *frame, int flags)
|
||||
{
|
||||
DecodeContext *decode = avctx->opaque;
|
||||
|
||||
mfxFrameSurface1 *surf;
|
||||
AVBufferRef *surf_buf;
|
||||
int idx;
|
||||
|
||||
for (idx = 0; idx < decode->nb_surfaces; idx++) {
|
||||
if (!decode->surface_used[idx])
|
||||
break;
|
||||
}
|
||||
if (idx == decode->nb_surfaces) {
|
||||
fprintf(stderr, "No free surfaces\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
surf = av_mallocz(sizeof(*surf));
|
||||
if (!surf)
|
||||
return AVERROR(ENOMEM);
|
||||
surf_buf = av_buffer_create((uint8_t*)surf, sizeof(*surf), free_buffer,
|
||||
&decode->surface_used[idx], AV_BUFFER_FLAG_READONLY);
|
||||
if (!surf_buf) {
|
||||
av_freep(&surf);
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
surf->Info = decode->frame_info;
|
||||
surf->Data.MemId = &decode->surfaces[idx];
|
||||
|
||||
frame->buf[0] = surf_buf;
|
||||
frame->data[3] = (uint8_t*)surf;
|
||||
|
||||
decode->surface_used[idx] = 1;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int get_format(AVCodecContext *avctx, const enum AVPixelFormat *pix_fmts)
|
||||
{
|
||||
while (*pix_fmts != AV_PIX_FMT_NONE) {
|
||||
if (*pix_fmts == AV_PIX_FMT_QSV) {
|
||||
if (!avctx->hwaccel_context) {
|
||||
DecodeContext *decode = avctx->opaque;
|
||||
AVQSVContext *qsv = av_qsv_alloc_context();
|
||||
if (!qsv)
|
||||
return AV_PIX_FMT_NONE;
|
||||
|
||||
qsv->session = decode->mfx_session;
|
||||
qsv->iopattern = MFX_IOPATTERN_OUT_VIDEO_MEMORY;
|
||||
|
||||
avctx->hwaccel_context = qsv;
|
||||
}
|
||||
|
||||
return AV_PIX_FMT_QSV;
|
||||
}
|
||||
|
||||
pix_fmts++;
|
||||
}
|
||||
|
||||
fprintf(stderr, "The QSV pixel format not offered in get_format()\n");
|
||||
|
||||
return AV_PIX_FMT_NONE;
|
||||
}
|
||||
|
||||
static int decode_packet(DecodeContext *decode, AVCodecContext *decoder_ctx,
|
||||
AVFrame *frame, AVPacket *pkt,
|
||||
AVIOContext *output_ctx)
|
||||
{
|
||||
int ret = 0;
|
||||
int got_frame = 1;
|
||||
|
||||
while (pkt->size > 0 || (!pkt->data && got_frame)) {
|
||||
ret = avcodec_decode_video2(decoder_ctx, frame, &got_frame, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
pkt->data += ret;
|
||||
pkt->size -= ret;
|
||||
|
||||
/* A real program would do something useful with the decoded frame here.
|
||||
* We just retrieve the raw data and write it to a file, which is rather
|
||||
* useless but pedagogic. */
|
||||
if (got_frame) {
|
||||
mfxFrameSurface1 *surf = (mfxFrameSurface1*)frame->data[3];
|
||||
VASurfaceID surface = *(VASurfaceID*)surf->Data.MemId;
|
||||
|
||||
VAImageFormat img_fmt = {
|
||||
.fourcc = VA_FOURCC_NV12,
|
||||
.byte_order = VA_LSB_FIRST,
|
||||
.bits_per_pixel = 8,
|
||||
.depth = 8,
|
||||
};
|
||||
|
||||
VAImage img;
|
||||
|
||||
VAStatus err;
|
||||
uint8_t *data;
|
||||
int i, j;
|
||||
|
||||
img.buf = VA_INVALID_ID;
|
||||
img.image_id = VA_INVALID_ID;
|
||||
|
||||
err = vaCreateImage(decode->va_dpy, &img_fmt,
|
||||
frame->width, frame->height, &img);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error creating an image: %s\n",
|
||||
vaErrorStr(err));
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = vaGetImage(decode->va_dpy, surface, 0, 0,
|
||||
frame->width, frame->height,
|
||||
img.image_id);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error getting an image: %s\n",
|
||||
vaErrorStr(err));
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
err = vaMapBuffer(decode->va_dpy, img.buf, (void**)&data);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Error mapping the image buffer: %s\n",
|
||||
vaErrorStr(err));
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto fail;
|
||||
}
|
||||
|
||||
for (i = 0; i < img.num_planes; i++)
|
||||
for (j = 0; j < (img.height >> (i > 0)); j++)
|
||||
avio_write(output_ctx, data + img.offsets[i] + j * img.pitches[i], img.width);
|
||||
|
||||
fail:
|
||||
if (img.buf != VA_INVALID_ID)
|
||||
vaUnmapBuffer(decode->va_dpy, img.buf);
|
||||
if (img.image_id != VA_INVALID_ID)
|
||||
vaDestroyImage(decode->va_dpy, img.image_id);
|
||||
av_frame_unref(frame);
|
||||
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *input_ctx = NULL;
|
||||
AVStream *video_st = NULL;
|
||||
AVCodecContext *decoder_ctx = NULL;
|
||||
const AVCodec *decoder;
|
||||
|
||||
AVPacket pkt = { 0 };
|
||||
AVFrame *frame = NULL;
|
||||
|
||||
DecodeContext decode = { NULL };
|
||||
|
||||
Display *dpy = NULL;
|
||||
int va_ver_major, va_ver_minor;
|
||||
|
||||
mfxIMPL mfx_impl = MFX_IMPL_AUTO_ANY;
|
||||
mfxVersion mfx_ver = { { 1, 1 } };
|
||||
|
||||
mfxFrameAllocator frame_allocator = {
|
||||
.pthis = &decode,
|
||||
.Alloc = frame_alloc,
|
||||
.Lock = frame_lock,
|
||||
.Unlock = frame_unlock,
|
||||
.GetHDL = frame_get_hdl,
|
||||
.Free = frame_free,
|
||||
};
|
||||
|
||||
AVIOContext *output_ctx = NULL;
|
||||
|
||||
int ret, i, err;
|
||||
|
||||
av_register_all();
|
||||
|
||||
if (argc < 3) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
/* open the input file */
|
||||
ret = avformat_open_input(&input_ctx, argv[1], NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Cannot open input file '%s': ", argv[1]);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* find the first H.264 video stream */
|
||||
for (i = 0; i < input_ctx->nb_streams; i++) {
|
||||
AVStream *st = input_ctx->streams[i];
|
||||
|
||||
if (st->codec->codec_id == AV_CODEC_ID_H264 && !video_st)
|
||||
video_st = st;
|
||||
else
|
||||
st->discard = AVDISCARD_ALL;
|
||||
}
|
||||
if (!video_st) {
|
||||
fprintf(stderr, "No H.264 video stream in the input file\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* initialize VA-API */
|
||||
dpy = XOpenDisplay(NULL);
|
||||
if (!dpy) {
|
||||
fprintf(stderr, "Cannot open the X display\n");
|
||||
goto finish;
|
||||
}
|
||||
decode.va_dpy = vaGetDisplay(dpy);
|
||||
if (!decode.va_dpy) {
|
||||
fprintf(stderr, "Cannot open the VA display\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
err = vaInitialize(decode.va_dpy, &va_ver_major, &va_ver_minor);
|
||||
if (err != VA_STATUS_SUCCESS) {
|
||||
fprintf(stderr, "Cannot initialize VA: %s\n", vaErrorStr(err));
|
||||
goto finish;
|
||||
}
|
||||
fprintf(stderr, "Initialized VA v%d.%d\n", va_ver_major, va_ver_minor);
|
||||
|
||||
/* initialize an MFX session */
|
||||
err = MFXInit(mfx_impl, &mfx_ver, &decode.mfx_session);
|
||||
if (err != MFX_ERR_NONE) {
|
||||
fprintf(stderr, "Error initializing an MFX session\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
MFXVideoCORE_SetHandle(decode.mfx_session, MFX_HANDLE_VA_DISPLAY, decode.va_dpy);
|
||||
MFXVideoCORE_SetFrameAllocator(decode.mfx_session, &frame_allocator);
|
||||
|
||||
/* initialize the decoder */
|
||||
decoder = avcodec_find_decoder_by_name("h264_qsv");
|
||||
if (!decoder) {
|
||||
fprintf(stderr, "The QSV decoder is not present in libavcodec\n");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
decoder_ctx = avcodec_alloc_context3(decoder);
|
||||
if (!decoder_ctx) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
decoder_ctx->codec_id = AV_CODEC_ID_H264;
|
||||
if (video_st->codec->extradata_size) {
|
||||
decoder_ctx->extradata = av_mallocz(video_st->codec->extradata_size +
|
||||
AV_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!decoder_ctx->extradata) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
memcpy(decoder_ctx->extradata, video_st->codec->extradata,
|
||||
video_st->codec->extradata_size);
|
||||
decoder_ctx->extradata_size = video_st->codec->extradata_size;
|
||||
}
|
||||
decoder_ctx->refcounted_frames = 1;
|
||||
|
||||
decoder_ctx->opaque = &decode;
|
||||
decoder_ctx->get_buffer2 = get_buffer;
|
||||
decoder_ctx->get_format = get_format;
|
||||
|
||||
ret = avcodec_open2(decoder_ctx, NULL, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error opening the decoder: ");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* open the output stream */
|
||||
ret = avio_open(&output_ctx, argv[2], AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error opening the output context: ");
|
||||
goto finish;
|
||||
}
|
||||
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto finish;
|
||||
}
|
||||
|
||||
/* actual decoding */
|
||||
while (ret >= 0) {
|
||||
ret = av_read_frame(input_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
if (pkt.stream_index == video_st->index)
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
|
||||
|
||||
av_packet_unref(&pkt);
|
||||
}
|
||||
|
||||
/* flush the decoder */
|
||||
pkt.data = NULL;
|
||||
pkt.size = 0;
|
||||
ret = decode_packet(&decode, decoder_ctx, frame, &pkt, output_ctx);
|
||||
|
||||
finish:
|
||||
if (ret < 0) {
|
||||
char buf[1024];
|
||||
av_strerror(ret, buf, sizeof(buf));
|
||||
fprintf(stderr, "%s\n", buf);
|
||||
}
|
||||
|
||||
avformat_close_input(&input_ctx);
|
||||
|
||||
av_frame_free(&frame);
|
||||
|
||||
if (decode.mfx_session)
|
||||
MFXClose(decode.mfx_session);
|
||||
if (decode.va_dpy)
|
||||
vaTerminate(decode.va_dpy);
|
||||
if (dpy)
|
||||
XCloseDisplay(dpy);
|
||||
|
||||
if (decoder_ctx)
|
||||
av_freep(&decoder_ctx->hwaccel_context);
|
||||
avcodec_free_context(&decoder_ctx);
|
||||
|
||||
avio_close(output_ctx);
|
||||
|
||||
return ret;
|
||||
}
|
165
contrib/sdk/sources/ffmpeg/ffmpeg-2.8/doc/examples/remuxing.c
Normal file
165
contrib/sdk/sources/ffmpeg/ffmpeg-2.8/doc/examples/remuxing.c
Normal file
@@ -0,0 +1,165 @@
|
||||
/*
|
||||
* Copyright (c) 2013 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libavformat/libavcodec demuxing and muxing API example.
|
||||
*
|
||||
* Remux streams from one container format to another.
|
||||
* @example remuxing.c
|
||||
*/
|
||||
|
||||
#include <libavutil/timestamp.h>
|
||||
#include <libavformat/avformat.h>
|
||||
|
||||
static void log_packet(const AVFormatContext *fmt_ctx, const AVPacket *pkt, const char *tag)
|
||||
{
|
||||
AVRational *time_base = &fmt_ctx->streams[pkt->stream_index]->time_base;
|
||||
|
||||
printf("%s: pts:%s pts_time:%s dts:%s dts_time:%s duration:%s duration_time:%s stream_index:%d\n",
|
||||
tag,
|
||||
av_ts2str(pkt->pts), av_ts2timestr(pkt->pts, time_base),
|
||||
av_ts2str(pkt->dts), av_ts2timestr(pkt->dts, time_base),
|
||||
av_ts2str(pkt->duration), av_ts2timestr(pkt->duration, time_base),
|
||||
pkt->stream_index);
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVOutputFormat *ofmt = NULL;
|
||||
AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
|
||||
AVPacket pkt;
|
||||
const char *in_filename, *out_filename;
|
||||
int ret, i;
|
||||
|
||||
if (argc < 3) {
|
||||
printf("usage: %s input output\n"
|
||||
"API example program to remux a media file with libavformat and libavcodec.\n"
|
||||
"The output format is guessed according to the file extension.\n"
|
||||
"\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
in_filename = argv[1];
|
||||
out_filename = argv[2];
|
||||
|
||||
av_register_all();
|
||||
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
|
||||
fprintf(stderr, "Could not open input file '%s'", in_filename);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
|
||||
fprintf(stderr, "Failed to retrieve input stream information");
|
||||
goto end;
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, in_filename, 0);
|
||||
|
||||
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
|
||||
if (!ofmt_ctx) {
|
||||
fprintf(stderr, "Could not create output context\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
ofmt = ofmt_ctx->oformat;
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *in_stream = ifmt_ctx->streams[i];
|
||||
AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
|
||||
if (!out_stream) {
|
||||
fprintf(stderr, "Failed allocating output stream\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
|
||||
goto end;
|
||||
}
|
||||
out_stream->codec->codec_tag = 0;
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
out_stream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
}
|
||||
av_dump_format(ofmt_ctx, 0, out_filename, 1);
|
||||
|
||||
if (!(ofmt->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not open output file '%s'", out_filename);
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
ret = avformat_write_header(ofmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error occurred when opening output file\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
while (1) {
|
||||
AVStream *in_stream, *out_stream;
|
||||
|
||||
ret = av_read_frame(ifmt_ctx, &pkt);
|
||||
if (ret < 0)
|
||||
break;
|
||||
|
||||
in_stream = ifmt_ctx->streams[pkt.stream_index];
|
||||
out_stream = ofmt_ctx->streams[pkt.stream_index];
|
||||
|
||||
log_packet(ifmt_ctx, &pkt, "in");
|
||||
|
||||
/* copy packet */
|
||||
pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
|
||||
pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
|
||||
pkt.pos = -1;
|
||||
log_packet(ofmt_ctx, &pkt, "out");
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error muxing packet\n");
|
||||
break;
|
||||
}
|
||||
av_free_packet(&pkt);
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
|
||||
/* close output */
|
||||
if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0 && ret != AVERROR_EOF) {
|
||||
fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
@@ -0,0 +1,214 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @example resampling_audio.c
|
||||
* libswresample API use example.
|
||||
*/
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/channel_layout.h>
|
||||
#include <libavutil/samplefmt.h>
|
||||
#include <libswresample/swresample.h>
|
||||
|
||||
static int get_format_from_sample_fmt(const char **fmt,
|
||||
enum AVSampleFormat sample_fmt)
|
||||
{
|
||||
int i;
|
||||
struct sample_fmt_entry {
|
||||
enum AVSampleFormat sample_fmt; const char *fmt_be, *fmt_le;
|
||||
} sample_fmt_entries[] = {
|
||||
{ AV_SAMPLE_FMT_U8, "u8", "u8" },
|
||||
{ AV_SAMPLE_FMT_S16, "s16be", "s16le" },
|
||||
{ AV_SAMPLE_FMT_S32, "s32be", "s32le" },
|
||||
{ AV_SAMPLE_FMT_FLT, "f32be", "f32le" },
|
||||
{ AV_SAMPLE_FMT_DBL, "f64be", "f64le" },
|
||||
};
|
||||
*fmt = NULL;
|
||||
|
||||
for (i = 0; i < FF_ARRAY_ELEMS(sample_fmt_entries); i++) {
|
||||
struct sample_fmt_entry *entry = &sample_fmt_entries[i];
|
||||
if (sample_fmt == entry->sample_fmt) {
|
||||
*fmt = AV_NE(entry->fmt_be, entry->fmt_le);
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
fprintf(stderr,
|
||||
"Sample format %s not supported as output format\n",
|
||||
av_get_sample_fmt_name(sample_fmt));
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
/**
|
||||
* Fill dst buffer with nb_samples, generated starting from t.
|
||||
*/
|
||||
static void fill_samples(double *dst, int nb_samples, int nb_channels, int sample_rate, double *t)
|
||||
{
|
||||
int i, j;
|
||||
double tincr = 1.0 / sample_rate, *dstp = dst;
|
||||
const double c = 2 * M_PI * 440.0;
|
||||
|
||||
/* generate sin tone with 440Hz frequency and duplicated channels */
|
||||
for (i = 0; i < nb_samples; i++) {
|
||||
*dstp = sin(c * *t);
|
||||
for (j = 1; j < nb_channels; j++)
|
||||
dstp[j] = dstp[0];
|
||||
dstp += nb_channels;
|
||||
*t += tincr;
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int64_t src_ch_layout = AV_CH_LAYOUT_STEREO, dst_ch_layout = AV_CH_LAYOUT_SURROUND;
|
||||
int src_rate = 48000, dst_rate = 44100;
|
||||
uint8_t **src_data = NULL, **dst_data = NULL;
|
||||
int src_nb_channels = 0, dst_nb_channels = 0;
|
||||
int src_linesize, dst_linesize;
|
||||
int src_nb_samples = 1024, dst_nb_samples, max_dst_nb_samples;
|
||||
enum AVSampleFormat src_sample_fmt = AV_SAMPLE_FMT_DBL, dst_sample_fmt = AV_SAMPLE_FMT_S16;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
const char *fmt;
|
||||
struct SwrContext *swr_ctx;
|
||||
double t;
|
||||
int ret;
|
||||
|
||||
if (argc != 2) {
|
||||
fprintf(stderr, "Usage: %s output_file\n"
|
||||
"API example program to show how to resample an audio stream with libswresample.\n"
|
||||
"This program generates a series of audio frames, resamples them to a specified "
|
||||
"output format and rate and saves them to an output file named output_file.\n",
|
||||
argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create resampler context */
|
||||
swr_ctx = swr_alloc();
|
||||
if (!swr_ctx) {
|
||||
fprintf(stderr, "Could not allocate resampler context\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* set options */
|
||||
av_opt_set_int(swr_ctx, "in_channel_layout", src_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "in_sample_rate", src_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "in_sample_fmt", src_sample_fmt, 0);
|
||||
|
||||
av_opt_set_int(swr_ctx, "out_channel_layout", dst_ch_layout, 0);
|
||||
av_opt_set_int(swr_ctx, "out_sample_rate", dst_rate, 0);
|
||||
av_opt_set_sample_fmt(swr_ctx, "out_sample_fmt", dst_sample_fmt, 0);
|
||||
|
||||
/* initialize the resampling context */
|
||||
if ((ret = swr_init(swr_ctx)) < 0) {
|
||||
fprintf(stderr, "Failed to initialize the resampling context\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination samples buffers */
|
||||
|
||||
src_nb_channels = av_get_channel_layout_nb_channels(src_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&src_data, &src_linesize, src_nb_channels,
|
||||
src_nb_samples, src_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate source samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* compute the number of converted samples: buffering is avoided
|
||||
* ensuring that the output buffer will contain at least all the
|
||||
* converted input samples */
|
||||
max_dst_nb_samples = dst_nb_samples =
|
||||
av_rescale_rnd(src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
|
||||
/* buffer is going to be directly written to a rawaudio file, no alignment */
|
||||
dst_nb_channels = av_get_channel_layout_nb_channels(dst_ch_layout);
|
||||
ret = av_samples_alloc_array_and_samples(&dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 0);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Could not allocate destination samples\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
t = 0;
|
||||
do {
|
||||
/* generate synthetic audio */
|
||||
fill_samples((double *)src_data[0], src_nb_samples, src_nb_channels, src_rate, &t);
|
||||
|
||||
/* compute destination number of samples */
|
||||
dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, src_rate) +
|
||||
src_nb_samples, dst_rate, src_rate, AV_ROUND_UP);
|
||||
if (dst_nb_samples > max_dst_nb_samples) {
|
||||
av_freep(&dst_data[0]);
|
||||
ret = av_samples_alloc(dst_data, &dst_linesize, dst_nb_channels,
|
||||
dst_nb_samples, dst_sample_fmt, 1);
|
||||
if (ret < 0)
|
||||
break;
|
||||
max_dst_nb_samples = dst_nb_samples;
|
||||
}
|
||||
|
||||
/* convert to destination format */
|
||||
ret = swr_convert(swr_ctx, dst_data, dst_nb_samples, (const uint8_t **)src_data, src_nb_samples);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error while converting\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = av_samples_get_buffer_size(&dst_linesize, dst_nb_channels,
|
||||
ret, dst_sample_fmt, 1);
|
||||
if (dst_bufsize < 0) {
|
||||
fprintf(stderr, "Could not get sample buffer size\n");
|
||||
goto end;
|
||||
}
|
||||
printf("t:%f in:%d out:%d\n", t, src_nb_samples, ret);
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
} while (t < 10);
|
||||
|
||||
if ((ret = get_format_from_sample_fmt(&fmt, dst_sample_fmt)) < 0)
|
||||
goto end;
|
||||
fprintf(stderr, "Resampling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f %s -channel_layout %"PRId64" -channels %d -ar %d %s\n",
|
||||
fmt, dst_ch_layout, dst_nb_channels, dst_rate, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
|
||||
if (src_data)
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&src_data);
|
||||
|
||||
if (dst_data)
|
||||
av_freep(&dst_data[0]);
|
||||
av_freep(&dst_data);
|
||||
|
||||
swr_free(&swr_ctx);
|
||||
return ret < 0;
|
||||
}
|
@@ -0,0 +1,140 @@
|
||||
/*
|
||||
* Copyright (c) 2012 Stefano Sabatini
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* libswscale API use example.
|
||||
* @example scaling_video.c
|
||||
*/
|
||||
|
||||
#include <libavutil/imgutils.h>
|
||||
#include <libavutil/parseutils.h>
|
||||
#include <libswscale/swscale.h>
|
||||
|
||||
static void fill_yuv_image(uint8_t *data[4], int linesize[4],
|
||||
int width, int height, int frame_index)
|
||||
{
|
||||
int x, y;
|
||||
|
||||
/* Y */
|
||||
for (y = 0; y < height; y++)
|
||||
for (x = 0; x < width; x++)
|
||||
data[0][y * linesize[0] + x] = x + y + frame_index * 3;
|
||||
|
||||
/* Cb and Cr */
|
||||
for (y = 0; y < height / 2; y++) {
|
||||
for (x = 0; x < width / 2; x++) {
|
||||
data[1][y * linesize[1] + x] = 128 + y + frame_index * 2;
|
||||
data[2][y * linesize[2] + x] = 64 + x + frame_index * 5;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
uint8_t *src_data[4], *dst_data[4];
|
||||
int src_linesize[4], dst_linesize[4];
|
||||
int src_w = 320, src_h = 240, dst_w, dst_h;
|
||||
enum AVPixelFormat src_pix_fmt = AV_PIX_FMT_YUV420P, dst_pix_fmt = AV_PIX_FMT_RGB24;
|
||||
const char *dst_size = NULL;
|
||||
const char *dst_filename = NULL;
|
||||
FILE *dst_file;
|
||||
int dst_bufsize;
|
||||
struct SwsContext *sws_ctx;
|
||||
int i, ret;
|
||||
|
||||
if (argc != 3) {
|
||||
fprintf(stderr, "Usage: %s output_file output_size\n"
|
||||
"API example program to show how to scale an image with libswscale.\n"
|
||||
"This program generates a series of pictures, rescales them to the given "
|
||||
"output_size and saves them to an output file named output_file\n."
|
||||
"\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
dst_filename = argv[1];
|
||||
dst_size = argv[2];
|
||||
|
||||
if (av_parse_video_size(&dst_w, &dst_h, dst_size) < 0) {
|
||||
fprintf(stderr,
|
||||
"Invalid size '%s', must be in the form WxH or a valid size abbreviation\n",
|
||||
dst_size);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
dst_file = fopen(dst_filename, "wb");
|
||||
if (!dst_file) {
|
||||
fprintf(stderr, "Could not open destination file %s\n", dst_filename);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/* create scaling context */
|
||||
sws_ctx = sws_getContext(src_w, src_h, src_pix_fmt,
|
||||
dst_w, dst_h, dst_pix_fmt,
|
||||
SWS_BILINEAR, NULL, NULL, NULL);
|
||||
if (!sws_ctx) {
|
||||
fprintf(stderr,
|
||||
"Impossible to create scale context for the conversion "
|
||||
"fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
|
||||
av_get_pix_fmt_name(src_pix_fmt), src_w, src_h,
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h);
|
||||
ret = AVERROR(EINVAL);
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* allocate source and destination image buffers */
|
||||
if ((ret = av_image_alloc(src_data, src_linesize,
|
||||
src_w, src_h, src_pix_fmt, 16)) < 0) {
|
||||
fprintf(stderr, "Could not allocate source image\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* buffer is going to be written to rawvideo file, no alignment */
|
||||
if ((ret = av_image_alloc(dst_data, dst_linesize,
|
||||
dst_w, dst_h, dst_pix_fmt, 1)) < 0) {
|
||||
fprintf(stderr, "Could not allocate destination image\n");
|
||||
goto end;
|
||||
}
|
||||
dst_bufsize = ret;
|
||||
|
||||
for (i = 0; i < 100; i++) {
|
||||
/* generate synthetic video */
|
||||
fill_yuv_image(src_data, src_linesize, src_w, src_h, i);
|
||||
|
||||
/* convert to destination format */
|
||||
sws_scale(sws_ctx, (const uint8_t * const*)src_data,
|
||||
src_linesize, 0, src_h, dst_data, dst_linesize);
|
||||
|
||||
/* write scaled image to file */
|
||||
fwrite(dst_data[0], 1, dst_bufsize, dst_file);
|
||||
}
|
||||
|
||||
fprintf(stderr, "Scaling succeeded. Play the output file with the command:\n"
|
||||
"ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
|
||||
av_get_pix_fmt_name(dst_pix_fmt), dst_w, dst_h, dst_filename);
|
||||
|
||||
end:
|
||||
fclose(dst_file);
|
||||
av_freep(&src_data[0]);
|
||||
av_freep(&dst_data[0]);
|
||||
sws_freeContext(sws_ctx);
|
||||
return ret < 0;
|
||||
}
|
@@ -0,0 +1,770 @@
|
||||
/*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* simple audio converter
|
||||
*
|
||||
* @example transcode_aac.c
|
||||
* Convert an input audio file to AAC in an MP4 container using FFmpeg.
|
||||
* @author Andreas Unterweger (dustsigns@gmail.com)
|
||||
*/
|
||||
|
||||
#include <stdio.h>
|
||||
|
||||
#include "libavformat/avformat.h"
|
||||
#include "libavformat/avio.h"
|
||||
|
||||
#include "libavcodec/avcodec.h"
|
||||
|
||||
#include "libavutil/audio_fifo.h"
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/avstring.h"
|
||||
#include "libavutil/frame.h"
|
||||
#include "libavutil/opt.h"
|
||||
|
||||
#include "libswresample/swresample.h"
|
||||
|
||||
/** The output bit rate in kbit/s */
|
||||
#define OUTPUT_BIT_RATE 96000
|
||||
/** The number of output channels */
|
||||
#define OUTPUT_CHANNELS 2
|
||||
|
||||
/**
|
||||
* Convert an error code into a text message.
|
||||
* @param error Error code to be converted
|
||||
* @return Corresponding error text (not thread-safe)
|
||||
*/
|
||||
static const char *get_error_text(const int error)
|
||||
{
|
||||
static char error_buffer[255];
|
||||
av_strerror(error, error_buffer, sizeof(error_buffer));
|
||||
return error_buffer;
|
||||
}
|
||||
|
||||
/** Open an input file and the required decoder. */
|
||||
static int open_input_file(const char *filename,
|
||||
AVFormatContext **input_format_context,
|
||||
AVCodecContext **input_codec_context)
|
||||
{
|
||||
AVCodec *input_codec;
|
||||
int error;
|
||||
|
||||
/** Open the input file to read from it. */
|
||||
if ((error = avformat_open_input(input_format_context, filename, NULL,
|
||||
NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open input file '%s' (error '%s')\n",
|
||||
filename, get_error_text(error));
|
||||
*input_format_context = NULL;
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Get information on the input file (number of streams etc.). */
|
||||
if ((error = avformat_find_stream_info(*input_format_context, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open find stream info (error '%s')\n",
|
||||
get_error_text(error));
|
||||
avformat_close_input(input_format_context);
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Make sure that there is only one stream in the input file. */
|
||||
if ((*input_format_context)->nb_streams != 1) {
|
||||
fprintf(stderr, "Expected one audio input stream, but found %d\n",
|
||||
(*input_format_context)->nb_streams);
|
||||
avformat_close_input(input_format_context);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Find a decoder for the audio stream. */
|
||||
if (!(input_codec = avcodec_find_decoder((*input_format_context)->streams[0]->codec->codec_id))) {
|
||||
fprintf(stderr, "Could not find input codec\n");
|
||||
avformat_close_input(input_format_context);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Open the decoder for the audio stream to use it later. */
|
||||
if ((error = avcodec_open2((*input_format_context)->streams[0]->codec,
|
||||
input_codec, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open input codec (error '%s')\n",
|
||||
get_error_text(error));
|
||||
avformat_close_input(input_format_context);
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Save the decoder context for easier access later. */
|
||||
*input_codec_context = (*input_format_context)->streams[0]->codec;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Open an output file and the required encoder.
|
||||
* Also set some basic encoder parameters.
|
||||
* Some of these parameters are based on the input file's parameters.
|
||||
*/
|
||||
static int open_output_file(const char *filename,
|
||||
AVCodecContext *input_codec_context,
|
||||
AVFormatContext **output_format_context,
|
||||
AVCodecContext **output_codec_context)
|
||||
{
|
||||
AVIOContext *output_io_context = NULL;
|
||||
AVStream *stream = NULL;
|
||||
AVCodec *output_codec = NULL;
|
||||
int error;
|
||||
|
||||
/** Open the output file to write to it. */
|
||||
if ((error = avio_open(&output_io_context, filename,
|
||||
AVIO_FLAG_WRITE)) < 0) {
|
||||
fprintf(stderr, "Could not open output file '%s' (error '%s')\n",
|
||||
filename, get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Create a new format context for the output container format. */
|
||||
if (!(*output_format_context = avformat_alloc_context())) {
|
||||
fprintf(stderr, "Could not allocate output format context\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/** Associate the output file (pointer) with the container format context. */
|
||||
(*output_format_context)->pb = output_io_context;
|
||||
|
||||
/** Guess the desired container format based on the file extension. */
|
||||
if (!((*output_format_context)->oformat = av_guess_format(NULL, filename,
|
||||
NULL))) {
|
||||
fprintf(stderr, "Could not find output file format\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
av_strlcpy((*output_format_context)->filename, filename,
|
||||
sizeof((*output_format_context)->filename));
|
||||
|
||||
/** Find the encoder to be used by its name. */
|
||||
if (!(output_codec = avcodec_find_encoder(AV_CODEC_ID_AAC))) {
|
||||
fprintf(stderr, "Could not find an AAC encoder.\n");
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/** Create a new audio stream in the output file container. */
|
||||
if (!(stream = avformat_new_stream(*output_format_context, output_codec))) {
|
||||
fprintf(stderr, "Could not create new stream\n");
|
||||
error = AVERROR(ENOMEM);
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
/** Save the encoder context for easier access later. */
|
||||
*output_codec_context = stream->codec;
|
||||
|
||||
/**
|
||||
* Set the basic encoder parameters.
|
||||
* The input file's sample rate is used to avoid a sample rate conversion.
|
||||
*/
|
||||
(*output_codec_context)->channels = OUTPUT_CHANNELS;
|
||||
(*output_codec_context)->channel_layout = av_get_default_channel_layout(OUTPUT_CHANNELS);
|
||||
(*output_codec_context)->sample_rate = input_codec_context->sample_rate;
|
||||
(*output_codec_context)->sample_fmt = output_codec->sample_fmts[0];
|
||||
(*output_codec_context)->bit_rate = OUTPUT_BIT_RATE;
|
||||
|
||||
/** Allow the use of the experimental AAC encoder */
|
||||
(*output_codec_context)->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
|
||||
|
||||
/** Set the sample rate for the container. */
|
||||
stream->time_base.den = input_codec_context->sample_rate;
|
||||
stream->time_base.num = 1;
|
||||
|
||||
/**
|
||||
* Some container formats (like MP4) require global headers to be present
|
||||
* Mark the encoder so that it behaves accordingly.
|
||||
*/
|
||||
if ((*output_format_context)->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
(*output_codec_context)->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
/** Open the encoder for the audio stream to use it later. */
|
||||
if ((error = avcodec_open2(*output_codec_context, output_codec, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open output codec (error '%s')\n",
|
||||
get_error_text(error));
|
||||
goto cleanup;
|
||||
}
|
||||
|
||||
return 0;
|
||||
|
||||
cleanup:
|
||||
avio_closep(&(*output_format_context)->pb);
|
||||
avformat_free_context(*output_format_context);
|
||||
*output_format_context = NULL;
|
||||
return error < 0 ? error : AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Initialize one data packet for reading or writing. */
|
||||
static void init_packet(AVPacket *packet)
|
||||
{
|
||||
av_init_packet(packet);
|
||||
/** Set the packet data and size so that it is recognized as being empty. */
|
||||
packet->data = NULL;
|
||||
packet->size = 0;
|
||||
}
|
||||
|
||||
/** Initialize one audio frame for reading from the input file */
|
||||
static int init_input_frame(AVFrame **frame)
|
||||
{
|
||||
if (!(*frame = av_frame_alloc())) {
|
||||
fprintf(stderr, "Could not allocate input frame\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize the audio resampler based on the input and output codec settings.
|
||||
* If the input and output sample formats differ, a conversion is required
|
||||
* libswresample takes care of this, but requires initialization.
|
||||
*/
|
||||
static int init_resampler(AVCodecContext *input_codec_context,
|
||||
AVCodecContext *output_codec_context,
|
||||
SwrContext **resample_context)
|
||||
{
|
||||
int error;
|
||||
|
||||
/**
|
||||
* Create a resampler context for the conversion.
|
||||
* Set the conversion parameters.
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity (they are sometimes not detected
|
||||
* properly by the demuxer and/or decoder).
|
||||
*/
|
||||
*resample_context = swr_alloc_set_opts(NULL,
|
||||
av_get_default_channel_layout(output_codec_context->channels),
|
||||
output_codec_context->sample_fmt,
|
||||
output_codec_context->sample_rate,
|
||||
av_get_default_channel_layout(input_codec_context->channels),
|
||||
input_codec_context->sample_fmt,
|
||||
input_codec_context->sample_rate,
|
||||
0, NULL);
|
||||
if (!*resample_context) {
|
||||
fprintf(stderr, "Could not allocate resample context\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
/**
|
||||
* Perform a sanity check so that the number of converted samples is
|
||||
* not greater than the number of samples to be converted.
|
||||
* If the sample rates differ, this case has to be handled differently
|
||||
*/
|
||||
av_assert0(output_codec_context->sample_rate == input_codec_context->sample_rate);
|
||||
|
||||
/** Open the resampler with the specified parameters. */
|
||||
if ((error = swr_init(*resample_context)) < 0) {
|
||||
fprintf(stderr, "Could not open resample context\n");
|
||||
swr_free(resample_context);
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Initialize a FIFO buffer for the audio samples to be encoded. */
|
||||
static int init_fifo(AVAudioFifo **fifo, AVCodecContext *output_codec_context)
|
||||
{
|
||||
/** Create the FIFO buffer based on the specified output sample format. */
|
||||
if (!(*fifo = av_audio_fifo_alloc(output_codec_context->sample_fmt,
|
||||
output_codec_context->channels, 1))) {
|
||||
fprintf(stderr, "Could not allocate FIFO\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Write the header of the output file container. */
|
||||
static int write_output_file_header(AVFormatContext *output_format_context)
|
||||
{
|
||||
int error;
|
||||
if ((error = avformat_write_header(output_format_context, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not write output file header (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Decode one audio frame from the input file. */
|
||||
static int decode_audio_frame(AVFrame *frame,
|
||||
AVFormatContext *input_format_context,
|
||||
AVCodecContext *input_codec_context,
|
||||
int *data_present, int *finished)
|
||||
{
|
||||
/** Packet used for temporary storage. */
|
||||
AVPacket input_packet;
|
||||
int error;
|
||||
init_packet(&input_packet);
|
||||
|
||||
/** Read one audio frame from the input file into a temporary packet. */
|
||||
if ((error = av_read_frame(input_format_context, &input_packet)) < 0) {
|
||||
/** If we are at the end of the file, flush the decoder below. */
|
||||
if (error == AVERROR_EOF)
|
||||
*finished = 1;
|
||||
else {
|
||||
fprintf(stderr, "Could not read frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Decode the audio frame stored in the temporary packet.
|
||||
* The input audio stream decoder is used to do this.
|
||||
* If we are at the end of the file, pass an empty packet to the decoder
|
||||
* to flush it.
|
||||
*/
|
||||
if ((error = avcodec_decode_audio4(input_codec_context, frame,
|
||||
data_present, &input_packet)) < 0) {
|
||||
fprintf(stderr, "Could not decode frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_free_packet(&input_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
/**
|
||||
* If the decoder has not been flushed completely, we are not finished,
|
||||
* so that this function has to be called again.
|
||||
*/
|
||||
if (*finished && *data_present)
|
||||
*finished = 0;
|
||||
av_free_packet(&input_packet);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize a temporary storage for the specified number of audio samples.
|
||||
* The conversion requires temporary storage due to the different format.
|
||||
* The number of audio samples to be allocated is specified in frame_size.
|
||||
*/
|
||||
static int init_converted_samples(uint8_t ***converted_input_samples,
|
||||
AVCodecContext *output_codec_context,
|
||||
int frame_size)
|
||||
{
|
||||
int error;
|
||||
|
||||
/**
|
||||
* Allocate as many pointers as there are audio channels.
|
||||
* Each pointer will later point to the audio samples of the corresponding
|
||||
* channels (although it may be NULL for interleaved formats).
|
||||
*/
|
||||
if (!(*converted_input_samples = calloc(output_codec_context->channels,
|
||||
sizeof(**converted_input_samples)))) {
|
||||
fprintf(stderr, "Could not allocate converted input sample pointers\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
|
||||
/**
|
||||
* Allocate memory for the samples of all channels in one consecutive
|
||||
* block for convenience.
|
||||
*/
|
||||
if ((error = av_samples_alloc(*converted_input_samples, NULL,
|
||||
output_codec_context->channels,
|
||||
frame_size,
|
||||
output_codec_context->sample_fmt, 0)) < 0) {
|
||||
fprintf(stderr,
|
||||
"Could not allocate converted input samples (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_freep(&(*converted_input_samples)[0]);
|
||||
free(*converted_input_samples);
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Convert the input audio samples into the output sample format.
|
||||
* The conversion happens on a per-frame basis, the size of which is specified
|
||||
* by frame_size.
|
||||
*/
|
||||
static int convert_samples(const uint8_t **input_data,
|
||||
uint8_t **converted_data, const int frame_size,
|
||||
SwrContext *resample_context)
|
||||
{
|
||||
int error;
|
||||
|
||||
/** Convert the samples using the resampler. */
|
||||
if ((error = swr_convert(resample_context,
|
||||
converted_data, frame_size,
|
||||
input_data , frame_size)) < 0) {
|
||||
fprintf(stderr, "Could not convert input samples (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Add converted input audio samples to the FIFO buffer for later processing. */
|
||||
static int add_samples_to_fifo(AVAudioFifo *fifo,
|
||||
uint8_t **converted_input_samples,
|
||||
const int frame_size)
|
||||
{
|
||||
int error;
|
||||
|
||||
/**
|
||||
* Make the FIFO as large as it needs to be to hold both,
|
||||
* the old and the new samples.
|
||||
*/
|
||||
if ((error = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + frame_size)) < 0) {
|
||||
fprintf(stderr, "Could not reallocate FIFO\n");
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Store the new samples in the FIFO buffer. */
|
||||
if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
|
||||
frame_size) < frame_size) {
|
||||
fprintf(stderr, "Could not write data to FIFO\n");
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Read one audio frame from the input file, decodes, converts and stores
|
||||
* it in the FIFO buffer.
|
||||
*/
|
||||
static int read_decode_convert_and_store(AVAudioFifo *fifo,
|
||||
AVFormatContext *input_format_context,
|
||||
AVCodecContext *input_codec_context,
|
||||
AVCodecContext *output_codec_context,
|
||||
SwrContext *resampler_context,
|
||||
int *finished)
|
||||
{
|
||||
/** Temporary storage of the input samples of the frame read from the file. */
|
||||
AVFrame *input_frame = NULL;
|
||||
/** Temporary storage for the converted input samples. */
|
||||
uint8_t **converted_input_samples = NULL;
|
||||
int data_present;
|
||||
int ret = AVERROR_EXIT;
|
||||
|
||||
/** Initialize temporary storage for one input frame. */
|
||||
if (init_input_frame(&input_frame))
|
||||
goto cleanup;
|
||||
/** Decode one frame worth of audio samples. */
|
||||
if (decode_audio_frame(input_frame, input_format_context,
|
||||
input_codec_context, &data_present, finished))
|
||||
goto cleanup;
|
||||
/**
|
||||
* If we are at the end of the file and there are no more samples
|
||||
* in the decoder which are delayed, we are actually finished.
|
||||
* This must not be treated as an error.
|
||||
*/
|
||||
if (*finished && !data_present) {
|
||||
ret = 0;
|
||||
goto cleanup;
|
||||
}
|
||||
/** If there is decoded data, convert and store it */
|
||||
if (data_present) {
|
||||
/** Initialize the temporary storage for the converted input samples. */
|
||||
if (init_converted_samples(&converted_input_samples, output_codec_context,
|
||||
input_frame->nb_samples))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* Convert the input samples to the desired output sample format.
|
||||
* This requires a temporary storage provided by converted_input_samples.
|
||||
*/
|
||||
if (convert_samples((const uint8_t**)input_frame->extended_data, converted_input_samples,
|
||||
input_frame->nb_samples, resampler_context))
|
||||
goto cleanup;
|
||||
|
||||
/** Add the converted input samples to the FIFO buffer for later processing. */
|
||||
if (add_samples_to_fifo(fifo, converted_input_samples,
|
||||
input_frame->nb_samples))
|
||||
goto cleanup;
|
||||
ret = 0;
|
||||
}
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
if (converted_input_samples) {
|
||||
av_freep(&converted_input_samples[0]);
|
||||
free(converted_input_samples);
|
||||
}
|
||||
av_frame_free(&input_frame);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize one input frame for writing to the output file.
|
||||
* The frame will be exactly frame_size samples large.
|
||||
*/
|
||||
static int init_output_frame(AVFrame **frame,
|
||||
AVCodecContext *output_codec_context,
|
||||
int frame_size)
|
||||
{
|
||||
int error;
|
||||
|
||||
/** Create a new frame to store the audio samples. */
|
||||
if (!(*frame = av_frame_alloc())) {
|
||||
fprintf(stderr, "Could not allocate output frame\n");
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set the frame's parameters, especially its size and format.
|
||||
* av_frame_get_buffer needs this to allocate memory for the
|
||||
* audio samples of the frame.
|
||||
* Default channel layouts based on the number of channels
|
||||
* are assumed for simplicity.
|
||||
*/
|
||||
(*frame)->nb_samples = frame_size;
|
||||
(*frame)->channel_layout = output_codec_context->channel_layout;
|
||||
(*frame)->format = output_codec_context->sample_fmt;
|
||||
(*frame)->sample_rate = output_codec_context->sample_rate;
|
||||
|
||||
/**
|
||||
* Allocate the samples of the created frame. This call will make
|
||||
* sure that the audio frame can hold as many samples as specified.
|
||||
*/
|
||||
if ((error = av_frame_get_buffer(*frame, 0)) < 0) {
|
||||
fprintf(stderr, "Could allocate output frame samples (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_frame_free(frame);
|
||||
return error;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Global timestamp for the audio frames */
|
||||
static int64_t pts = 0;
|
||||
|
||||
/** Encode one frame worth of audio to the output file. */
|
||||
static int encode_audio_frame(AVFrame *frame,
|
||||
AVFormatContext *output_format_context,
|
||||
AVCodecContext *output_codec_context,
|
||||
int *data_present)
|
||||
{
|
||||
/** Packet used for temporary storage. */
|
||||
AVPacket output_packet;
|
||||
int error;
|
||||
init_packet(&output_packet);
|
||||
|
||||
/** Set a timestamp based on the sample rate for the container. */
|
||||
if (frame) {
|
||||
frame->pts = pts;
|
||||
pts += frame->nb_samples;
|
||||
}
|
||||
|
||||
/**
|
||||
* Encode the audio frame and store it in the temporary packet.
|
||||
* The output audio stream encoder is used to do this.
|
||||
*/
|
||||
if ((error = avcodec_encode_audio2(output_codec_context, &output_packet,
|
||||
frame, data_present)) < 0) {
|
||||
fprintf(stderr, "Could not encode frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_free_packet(&output_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
/** Write one audio frame from the temporary packet to the output file. */
|
||||
if (*data_present) {
|
||||
if ((error = av_write_frame(output_format_context, &output_packet)) < 0) {
|
||||
fprintf(stderr, "Could not write frame (error '%s')\n",
|
||||
get_error_text(error));
|
||||
av_free_packet(&output_packet);
|
||||
return error;
|
||||
}
|
||||
|
||||
av_free_packet(&output_packet);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Load one audio frame from the FIFO buffer, encode and write it to the
|
||||
* output file.
|
||||
*/
|
||||
static int load_encode_and_write(AVAudioFifo *fifo,
|
||||
AVFormatContext *output_format_context,
|
||||
AVCodecContext *output_codec_context)
|
||||
{
|
||||
/** Temporary storage of the output samples of the frame written to the file. */
|
||||
AVFrame *output_frame;
|
||||
/**
|
||||
* Use the maximum number of possible samples per frame.
|
||||
* If there is less than the maximum possible frame size in the FIFO
|
||||
* buffer use this number. Otherwise, use the maximum possible frame size
|
||||
*/
|
||||
const int frame_size = FFMIN(av_audio_fifo_size(fifo),
|
||||
output_codec_context->frame_size);
|
||||
int data_written;
|
||||
|
||||
/** Initialize temporary storage for one output frame. */
|
||||
if (init_output_frame(&output_frame, output_codec_context, frame_size))
|
||||
return AVERROR_EXIT;
|
||||
|
||||
/**
|
||||
* Read as many samples from the FIFO buffer as required to fill the frame.
|
||||
* The samples are stored in the frame temporarily.
|
||||
*/
|
||||
if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
|
||||
fprintf(stderr, "Could not read data from FIFO\n");
|
||||
av_frame_free(&output_frame);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
|
||||
/** Encode one frame worth of audio samples. */
|
||||
if (encode_audio_frame(output_frame, output_format_context,
|
||||
output_codec_context, &data_written)) {
|
||||
av_frame_free(&output_frame);
|
||||
return AVERROR_EXIT;
|
||||
}
|
||||
av_frame_free(&output_frame);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Write the trailer of the output file container. */
|
||||
static int write_output_file_trailer(AVFormatContext *output_format_context)
|
||||
{
|
||||
int error;
|
||||
if ((error = av_write_trailer(output_format_context)) < 0) {
|
||||
fprintf(stderr, "Could not write output file trailer (error '%s')\n",
|
||||
get_error_text(error));
|
||||
return error;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
/** Convert an audio file to an AAC file in an MP4 container. */
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
AVFormatContext *input_format_context = NULL, *output_format_context = NULL;
|
||||
AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
|
||||
SwrContext *resample_context = NULL;
|
||||
AVAudioFifo *fifo = NULL;
|
||||
int ret = AVERROR_EXIT;
|
||||
|
||||
if (argc < 3) {
|
||||
fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
exit(1);
|
||||
}
|
||||
|
||||
/** Register all codecs and formats so that they can be used. */
|
||||
av_register_all();
|
||||
/** Open the input file for reading. */
|
||||
if (open_input_file(argv[1], &input_format_context,
|
||||
&input_codec_context))
|
||||
goto cleanup;
|
||||
/** Open the output file for writing. */
|
||||
if (open_output_file(argv[2], input_codec_context,
|
||||
&output_format_context, &output_codec_context))
|
||||
goto cleanup;
|
||||
/** Initialize the resampler to be able to convert audio sample formats. */
|
||||
if (init_resampler(input_codec_context, output_codec_context,
|
||||
&resample_context))
|
||||
goto cleanup;
|
||||
/** Initialize the FIFO buffer to store audio samples to be encoded. */
|
||||
if (init_fifo(&fifo, output_codec_context))
|
||||
goto cleanup;
|
||||
/** Write the header of the output file container. */
|
||||
if (write_output_file_header(output_format_context))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* Loop as long as we have input samples to read or output samples
|
||||
* to write; abort as soon as we have neither.
|
||||
*/
|
||||
while (1) {
|
||||
/** Use the encoder's desired frame size for processing. */
|
||||
const int output_frame_size = output_codec_context->frame_size;
|
||||
int finished = 0;
|
||||
|
||||
/**
|
||||
* Make sure that there is one frame worth of samples in the FIFO
|
||||
* buffer so that the encoder can do its work.
|
||||
* Since the decoder's and the encoder's frame size may differ, we
|
||||
* need to FIFO buffer to store as many frames worth of input samples
|
||||
* that they make up at least one frame worth of output samples.
|
||||
*/
|
||||
while (av_audio_fifo_size(fifo) < output_frame_size) {
|
||||
/**
|
||||
* Decode one frame worth of audio samples, convert it to the
|
||||
* output sample format and put it into the FIFO buffer.
|
||||
*/
|
||||
if (read_decode_convert_and_store(fifo, input_format_context,
|
||||
input_codec_context,
|
||||
output_codec_context,
|
||||
resample_context, &finished))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* If we are at the end of the input file, we continue
|
||||
* encoding the remaining audio samples to the output file.
|
||||
*/
|
||||
if (finished)
|
||||
break;
|
||||
}
|
||||
|
||||
/**
|
||||
* If we have enough samples for the encoder, we encode them.
|
||||
* At the end of the file, we pass the remaining samples to
|
||||
* the encoder.
|
||||
*/
|
||||
while (av_audio_fifo_size(fifo) >= output_frame_size ||
|
||||
(finished && av_audio_fifo_size(fifo) > 0))
|
||||
/**
|
||||
* Take one frame worth of audio samples from the FIFO buffer,
|
||||
* encode it and write it to the output file.
|
||||
*/
|
||||
if (load_encode_and_write(fifo, output_format_context,
|
||||
output_codec_context))
|
||||
goto cleanup;
|
||||
|
||||
/**
|
||||
* If we are at the end of the input file and have encoded
|
||||
* all remaining samples, we can exit this loop and finish.
|
||||
*/
|
||||
if (finished) {
|
||||
int data_written;
|
||||
/** Flush the encoder as it may have delayed frames. */
|
||||
do {
|
||||
if (encode_audio_frame(NULL, output_format_context,
|
||||
output_codec_context, &data_written))
|
||||
goto cleanup;
|
||||
} while (data_written);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/** Write the trailer of the output file container. */
|
||||
if (write_output_file_trailer(output_format_context))
|
||||
goto cleanup;
|
||||
ret = 0;
|
||||
|
||||
cleanup:
|
||||
if (fifo)
|
||||
av_audio_fifo_free(fifo);
|
||||
swr_free(&resample_context);
|
||||
if (output_codec_context)
|
||||
avcodec_close(output_codec_context);
|
||||
if (output_format_context) {
|
||||
avio_closep(&output_format_context->pb);
|
||||
avformat_free_context(output_format_context);
|
||||
}
|
||||
if (input_codec_context)
|
||||
avcodec_close(input_codec_context);
|
||||
if (input_format_context)
|
||||
avformat_close_input(&input_format_context);
|
||||
|
||||
return ret;
|
||||
}
|
583
contrib/sdk/sources/ffmpeg/ffmpeg-2.8/doc/examples/transcoding.c
Normal file
583
contrib/sdk/sources/ffmpeg/ffmpeg-2.8/doc/examples/transcoding.c
Normal file
@@ -0,0 +1,583 @@
|
||||
/*
|
||||
* Copyright (c) 2010 Nicolas George
|
||||
* Copyright (c) 2011 Stefano Sabatini
|
||||
* Copyright (c) 2014 Andrey Utkin
|
||||
*
|
||||
* Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
* of this software and associated documentation files (the "Software"), to deal
|
||||
* in the Software without restriction, including without limitation the rights
|
||||
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
* copies of the Software, and to permit persons to whom the Software is
|
||||
* furnished to do so, subject to the following conditions:
|
||||
*
|
||||
* The above copyright notice and this permission notice shall be included in
|
||||
* all copies or substantial portions of the Software.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
|
||||
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
* THE SOFTWARE.
|
||||
*/
|
||||
|
||||
/**
|
||||
* @file
|
||||
* API example for demuxing, decoding, filtering, encoding and muxing
|
||||
* @example transcoding.c
|
||||
*/
|
||||
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libavformat/avformat.h>
|
||||
#include <libavfilter/avfiltergraph.h>
|
||||
#include <libavfilter/avcodec.h>
|
||||
#include <libavfilter/buffersink.h>
|
||||
#include <libavfilter/buffersrc.h>
|
||||
#include <libavutil/opt.h>
|
||||
#include <libavutil/pixdesc.h>
|
||||
|
||||
static AVFormatContext *ifmt_ctx;
|
||||
static AVFormatContext *ofmt_ctx;
|
||||
typedef struct FilteringContext {
|
||||
AVFilterContext *buffersink_ctx;
|
||||
AVFilterContext *buffersrc_ctx;
|
||||
AVFilterGraph *filter_graph;
|
||||
} FilteringContext;
|
||||
static FilteringContext *filter_ctx;
|
||||
|
||||
static int open_input_file(const char *filename)
|
||||
{
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
ifmt_ctx = NULL;
|
||||
if ((ret = avformat_open_input(&ifmt_ctx, filename, NULL, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open input file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
if ((ret = avformat_find_stream_info(ifmt_ctx, NULL)) < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot find stream information\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
AVStream *stream;
|
||||
AVCodecContext *codec_ctx;
|
||||
stream = ifmt_ctx->streams[i];
|
||||
codec_ctx = stream->codec;
|
||||
/* Reencode video & audio and remux subtitles etc. */
|
||||
if (codec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| codec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
/* Open decoder */
|
||||
ret = avcodec_open2(codec_ctx,
|
||||
avcodec_find_decoder(codec_ctx->codec_id), NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed to open decoder for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
av_dump_format(ifmt_ctx, 0, filename, 0);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int open_output_file(const char *filename)
|
||||
{
|
||||
AVStream *out_stream;
|
||||
AVStream *in_stream;
|
||||
AVCodecContext *dec_ctx, *enc_ctx;
|
||||
AVCodec *encoder;
|
||||
int ret;
|
||||
unsigned int i;
|
||||
|
||||
ofmt_ctx = NULL;
|
||||
avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, filename);
|
||||
if (!ofmt_ctx) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not create output context\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
out_stream = avformat_new_stream(ofmt_ctx, NULL);
|
||||
if (!out_stream) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
|
||||
return AVERROR_UNKNOWN;
|
||||
}
|
||||
|
||||
in_stream = ifmt_ctx->streams[i];
|
||||
dec_ctx = in_stream->codec;
|
||||
enc_ctx = out_stream->codec;
|
||||
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO
|
||||
|| dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
/* in this example, we choose transcoding to same codec */
|
||||
encoder = avcodec_find_encoder(dec_ctx->codec_id);
|
||||
if (!encoder) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
}
|
||||
|
||||
/* In this example, we transcode to same properties (picture size,
|
||||
* sample rate etc.). These properties can be changed for output
|
||||
* streams easily using filters */
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
enc_ctx->height = dec_ctx->height;
|
||||
enc_ctx->width = dec_ctx->width;
|
||||
enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->pix_fmt = encoder->pix_fmts[0];
|
||||
/* video time_base can be set to whatever is handy and supported by encoder */
|
||||
enc_ctx->time_base = dec_ctx->time_base;
|
||||
} else {
|
||||
enc_ctx->sample_rate = dec_ctx->sample_rate;
|
||||
enc_ctx->channel_layout = dec_ctx->channel_layout;
|
||||
enc_ctx->channels = av_get_channel_layout_nb_channels(enc_ctx->channel_layout);
|
||||
/* take first format from list of supported formats */
|
||||
enc_ctx->sample_fmt = encoder->sample_fmts[0];
|
||||
enc_ctx->time_base = (AVRational){1, enc_ctx->sample_rate};
|
||||
}
|
||||
|
||||
/* Third parameter can be used to pass settings to encoder */
|
||||
ret = avcodec_open2(enc_ctx, encoder, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream #%u\n", i);
|
||||
return ret;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_UNKNOWN) {
|
||||
av_log(NULL, AV_LOG_FATAL, "Elementary stream #%d is of unknown type, cannot proceed\n", i);
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else {
|
||||
/* if this stream must be remuxed */
|
||||
ret = avcodec_copy_context(ofmt_ctx->streams[i]->codec,
|
||||
ifmt_ctx->streams[i]->codec);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Copying stream context failed\n");
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
|
||||
enc_ctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||
|
||||
}
|
||||
av_dump_format(ofmt_ctx, 0, filename, 1);
|
||||
|
||||
if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
|
||||
ret = avio_open(&ofmt_ctx->pb, filename, AVIO_FLAG_WRITE);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Could not open output file '%s'", filename);
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* init muxer, write output file header */
|
||||
ret = avformat_write_header(ofmt_ctx, NULL);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error occurred when opening output file\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
|
||||
AVCodecContext *enc_ctx, const char *filter_spec)
|
||||
{
|
||||
char args[512];
|
||||
int ret = 0;
|
||||
AVFilter *buffersrc = NULL;
|
||||
AVFilter *buffersink = NULL;
|
||||
AVFilterContext *buffersrc_ctx = NULL;
|
||||
AVFilterContext *buffersink_ctx = NULL;
|
||||
AVFilterInOut *outputs = avfilter_inout_alloc();
|
||||
AVFilterInOut *inputs = avfilter_inout_alloc();
|
||||
AVFilterGraph *filter_graph = avfilter_graph_alloc();
|
||||
|
||||
if (!outputs || !inputs || !filter_graph) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
|
||||
buffersrc = avfilter_get_by_name("buffer");
|
||||
buffersink = avfilter_get_by_name("buffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
snprintf(args, sizeof(args),
|
||||
"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
|
||||
dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den,
|
||||
dec_ctx->sample_aspect_ratio.num,
|
||||
dec_ctx->sample_aspect_ratio.den);
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "pix_fmts",
|
||||
(uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
|
||||
goto end;
|
||||
}
|
||||
} else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
|
||||
buffersrc = avfilter_get_by_name("abuffer");
|
||||
buffersink = avfilter_get_by_name("abuffersink");
|
||||
if (!buffersrc || !buffersink) {
|
||||
av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
if (!dec_ctx->channel_layout)
|
||||
dec_ctx->channel_layout =
|
||||
av_get_default_channel_layout(dec_ctx->channels);
|
||||
snprintf(args, sizeof(args),
|
||||
"time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
|
||||
dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
|
||||
av_get_sample_fmt_name(dec_ctx->sample_fmt),
|
||||
dec_ctx->channel_layout);
|
||||
ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
|
||||
args, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
|
||||
NULL, NULL, filter_graph);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "sample_fmts",
|
||||
(uint8_t*)&enc_ctx->sample_fmt, sizeof(enc_ctx->sample_fmt),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "channel_layouts",
|
||||
(uint8_t*)&enc_ctx->channel_layout,
|
||||
sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
ret = av_opt_set_bin(buffersink_ctx, "sample_rates",
|
||||
(uint8_t*)&enc_ctx->sample_rate, sizeof(enc_ctx->sample_rate),
|
||||
AV_OPT_SEARCH_CHILDREN);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
|
||||
goto end;
|
||||
}
|
||||
} else {
|
||||
ret = AVERROR_UNKNOWN;
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* Endpoints for the filter graph. */
|
||||
outputs->name = av_strdup("in");
|
||||
outputs->filter_ctx = buffersrc_ctx;
|
||||
outputs->pad_idx = 0;
|
||||
outputs->next = NULL;
|
||||
|
||||
inputs->name = av_strdup("out");
|
||||
inputs->filter_ctx = buffersink_ctx;
|
||||
inputs->pad_idx = 0;
|
||||
inputs->next = NULL;
|
||||
|
||||
if (!outputs->name || !inputs->name) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto end;
|
||||
}
|
||||
|
||||
if ((ret = avfilter_graph_parse_ptr(filter_graph, filter_spec,
|
||||
&inputs, &outputs, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
|
||||
goto end;
|
||||
|
||||
/* Fill FilteringContext */
|
||||
fctx->buffersrc_ctx = buffersrc_ctx;
|
||||
fctx->buffersink_ctx = buffersink_ctx;
|
||||
fctx->filter_graph = filter_graph;
|
||||
|
||||
end:
|
||||
avfilter_inout_free(&inputs);
|
||||
avfilter_inout_free(&outputs);
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int init_filters(void)
|
||||
{
|
||||
const char *filter_spec;
|
||||
unsigned int i;
|
||||
int ret;
|
||||
filter_ctx = av_malloc_array(ifmt_ctx->nb_streams, sizeof(*filter_ctx));
|
||||
if (!filter_ctx)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
filter_ctx[i].buffersrc_ctx = NULL;
|
||||
filter_ctx[i].buffersink_ctx = NULL;
|
||||
filter_ctx[i].filter_graph = NULL;
|
||||
if (!(ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO
|
||||
|| ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
|
||||
continue;
|
||||
|
||||
|
||||
if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
|
||||
filter_spec = "null"; /* passthrough (dummy) filter for video */
|
||||
else
|
||||
filter_spec = "anull"; /* passthrough (dummy) filter for audio */
|
||||
ret = init_filter(&filter_ctx[i], ifmt_ctx->streams[i]->codec,
|
||||
ofmt_ctx->streams[i]->codec, filter_spec);
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
|
||||
int ret;
|
||||
int got_frame_local;
|
||||
AVPacket enc_pkt;
|
||||
int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
|
||||
(ifmt_ctx->streams[stream_index]->codec->codec_type ==
|
||||
AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;
|
||||
|
||||
if (!got_frame)
|
||||
got_frame = &got_frame_local;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
|
||||
/* encode filtered frame */
|
||||
enc_pkt.data = NULL;
|
||||
enc_pkt.size = 0;
|
||||
av_init_packet(&enc_pkt);
|
||||
ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
|
||||
filt_frame, got_frame);
|
||||
av_frame_free(&filt_frame);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
if (!(*got_frame))
|
||||
return 0;
|
||||
|
||||
/* prepare packet for muxing */
|
||||
enc_pkt.stream_index = stream_index;
|
||||
av_packet_rescale_ts(&enc_pkt,
|
||||
ofmt_ctx->streams[stream_index]->codec->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
|
||||
/* mux encoded frame */
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int filter_encode_write_frame(AVFrame *frame, unsigned int stream_index)
|
||||
{
|
||||
int ret;
|
||||
AVFrame *filt_frame;
|
||||
|
||||
av_log(NULL, AV_LOG_INFO, "Pushing decoded frame to filters\n");
|
||||
/* push the decoded frame into the filtergraph */
|
||||
ret = av_buffersrc_add_frame_flags(filter_ctx[stream_index].buffersrc_ctx,
|
||||
frame, 0);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
|
||||
return ret;
|
||||
}
|
||||
|
||||
/* pull filtered frames from the filtergraph */
|
||||
while (1) {
|
||||
filt_frame = av_frame_alloc();
|
||||
if (!filt_frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_log(NULL, AV_LOG_INFO, "Pulling filtered frame from filters\n");
|
||||
ret = av_buffersink_get_frame(filter_ctx[stream_index].buffersink_ctx,
|
||||
filt_frame);
|
||||
if (ret < 0) {
|
||||
/* if no more frames for output - returns AVERROR(EAGAIN)
|
||||
* if flushed and no more frames for output - returns AVERROR_EOF
|
||||
* rewrite retcode to 0 to show it as normal procedure completion
|
||||
*/
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||
ret = 0;
|
||||
av_frame_free(&filt_frame);
|
||||
break;
|
||||
}
|
||||
|
||||
filt_frame->pict_type = AV_PICTURE_TYPE_NONE;
|
||||
ret = encode_write_frame(filt_frame, stream_index, NULL);
|
||||
if (ret < 0)
|
||||
break;
|
||||
}
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int flush_encoder(unsigned int stream_index)
|
||||
{
|
||||
int ret;
|
||||
int got_frame;
|
||||
|
||||
if (!(ofmt_ctx->streams[stream_index]->codec->codec->capabilities &
|
||||
AV_CODEC_CAP_DELAY))
|
||||
return 0;
|
||||
|
||||
while (1) {
|
||||
av_log(NULL, AV_LOG_INFO, "Flushing stream #%u encoder\n", stream_index);
|
||||
ret = encode_write_frame(NULL, stream_index, &got_frame);
|
||||
if (ret < 0)
|
||||
break;
|
||||
if (!got_frame)
|
||||
return 0;
|
||||
}
|
||||
return ret;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
int ret;
|
||||
AVPacket packet = { .data = NULL, .size = 0 };
|
||||
AVFrame *frame = NULL;
|
||||
enum AVMediaType type;
|
||||
unsigned int stream_index;
|
||||
unsigned int i;
|
||||
int got_frame;
|
||||
int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
|
||||
|
||||
if (argc != 3) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
|
||||
return 1;
|
||||
}
|
||||
|
||||
av_register_all();
|
||||
avfilter_register_all();
|
||||
|
||||
if ((ret = open_input_file(argv[1])) < 0)
|
||||
goto end;
|
||||
if ((ret = open_output_file(argv[2])) < 0)
|
||||
goto end;
|
||||
if ((ret = init_filters()) < 0)
|
||||
goto end;
|
||||
|
||||
/* read all packets */
|
||||
while (1) {
|
||||
if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
|
||||
break;
|
||||
stream_index = packet.stream_index;
|
||||
type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
|
||||
av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
|
||||
stream_index);
|
||||
|
||||
if (filter_ctx[stream_index].filter_graph) {
|
||||
av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
|
||||
frame = av_frame_alloc();
|
||||
if (!frame) {
|
||||
ret = AVERROR(ENOMEM);
|
||||
break;
|
||||
}
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ifmt_ctx->streams[stream_index]->codec->time_base);
|
||||
dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
|
||||
avcodec_decode_audio4;
|
||||
ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
|
||||
&got_frame, &packet);
|
||||
if (ret < 0) {
|
||||
av_frame_free(&frame);
|
||||
av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
|
||||
break;
|
||||
}
|
||||
|
||||
if (got_frame) {
|
||||
frame->pts = av_frame_get_best_effort_timestamp(frame);
|
||||
ret = filter_encode_write_frame(frame, stream_index);
|
||||
av_frame_free(&frame);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
} else {
|
||||
av_frame_free(&frame);
|
||||
}
|
||||
} else {
|
||||
/* remux this frame without reencoding */
|
||||
av_packet_rescale_ts(&packet,
|
||||
ifmt_ctx->streams[stream_index]->time_base,
|
||||
ofmt_ctx->streams[stream_index]->time_base);
|
||||
|
||||
ret = av_interleaved_write_frame(ofmt_ctx, &packet);
|
||||
if (ret < 0)
|
||||
goto end;
|
||||
}
|
||||
av_free_packet(&packet);
|
||||
}
|
||||
|
||||
/* flush filters and encoders */
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
/* flush filter */
|
||||
if (!filter_ctx[i].filter_graph)
|
||||
continue;
|
||||
ret = filter_encode_write_frame(NULL, i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
|
||||
goto end;
|
||||
}
|
||||
|
||||
/* flush encoder */
|
||||
ret = flush_encoder(i);
|
||||
if (ret < 0) {
|
||||
av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
|
||||
goto end;
|
||||
}
|
||||
}
|
||||
|
||||
av_write_trailer(ofmt_ctx);
|
||||
end:
|
||||
av_free_packet(&packet);
|
||||
av_frame_free(&frame);
|
||||
for (i = 0; i < ifmt_ctx->nb_streams; i++) {
|
||||
avcodec_close(ifmt_ctx->streams[i]->codec);
|
||||
if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
|
||||
avcodec_close(ofmt_ctx->streams[i]->codec);
|
||||
if (filter_ctx && filter_ctx[i].filter_graph)
|
||||
avfilter_graph_free(&filter_ctx[i].filter_graph);
|
||||
}
|
||||
av_free(filter_ctx);
|
||||
avformat_close_input(&ifmt_ctx);
|
||||
if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
|
||||
avio_closep(&ofmt_ctx->pb);
|
||||
avformat_free_context(ofmt_ctx);
|
||||
|
||||
if (ret < 0)
|
||||
av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
|
||||
|
||||
return ret ? 1 : 0;
|
||||
}
|
Reference in New Issue
Block a user