ffmpeg-2.8.5

git-svn-id: svn://kolibrios.org@6147 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge)
2016-02-05 22:08:02 +00:00
parent a08f61ddb9
commit a4b787f4b8
5429 changed files with 1356786 additions and 0 deletions

View File

@@ -0,0 +1,73 @@
include $(SUBDIR)../config.mak
NAME = avdevice
FFLIBS = avformat avcodec avutil
HEADERS = avdevice.h \
version.h \
OBJS = alldevices.o \
avdevice.o \
utils.o \
# input/output devices
OBJS-$(CONFIG_ALSA_INDEV) += alsa_dec.o alsa.o timefilter.o
OBJS-$(CONFIG_ALSA_OUTDEV) += alsa_enc.o alsa.o
OBJS-$(CONFIG_AVFOUNDATION_INDEV) += avfoundation.o
OBJS-$(CONFIG_BKTR_INDEV) += bktr.o
OBJS-$(CONFIG_CACA_OUTDEV) += caca.o
OBJS-$(CONFIG_DECKLINK_OUTDEV) += decklink_enc.o decklink_enc_c.o decklink_common.o
OBJS-$(CONFIG_DECKLINK_INDEV) += decklink_dec.o decklink_dec_c.o decklink_common.o
OBJS-$(CONFIG_DSHOW_INDEV) += dshow_crossbar.o dshow.o dshow_enummediatypes.o \
dshow_enumpins.o dshow_filter.o \
dshow_pin.o dshow_common.o
OBJS-$(CONFIG_DV1394_INDEV) += dv1394.o
OBJS-$(CONFIG_FBDEV_INDEV) += fbdev_dec.o \
fbdev_common.o
OBJS-$(CONFIG_FBDEV_OUTDEV) += fbdev_enc.o \
fbdev_common.o
OBJS-$(CONFIG_GDIGRAB_INDEV) += gdigrab.o
OBJS-$(CONFIG_IEC61883_INDEV) += iec61883.o
OBJS-$(CONFIG_JACK_INDEV) += jack.o timefilter.o
OBJS-$(CONFIG_LAVFI_INDEV) += lavfi.o
OBJS-$(CONFIG_OPENAL_INDEV) += openal-dec.o
OBJS-$(CONFIG_OPENGL_OUTDEV) += opengl_enc.o
OBJS-$(CONFIG_OSS_INDEV) += oss_dec.o oss.o
OBJS-$(CONFIG_OSS_OUTDEV) += oss_enc.o oss.o
OBJS-$(CONFIG_PULSE_INDEV) += pulse_audio_dec.o \
pulse_audio_common.o timefilter.o
OBJS-$(CONFIG_PULSE_OUTDEV) += pulse_audio_enc.o \
pulse_audio_common.o
OBJS-$(CONFIG_QTKIT_INDEV) += qtkit.o
OBJS-$(CONFIG_SDL_OUTDEV) += sdl.o
OBJS-$(CONFIG_SNDIO_INDEV) += sndio_dec.o sndio.o
OBJS-$(CONFIG_SNDIO_OUTDEV) += sndio_enc.o sndio.o
OBJS-$(CONFIG_V4L2_INDEV) += v4l2.o v4l2-common.o timefilter.o
OBJS-$(CONFIG_V4L2_OUTDEV) += v4l2enc.o v4l2-common.o
OBJS-$(CONFIG_V4L_INDEV) += v4l.o
OBJS-$(CONFIG_VFWCAP_INDEV) += vfwcap.o
OBJS-$(CONFIG_X11GRAB_INDEV) += x11grab.o
OBJS-$(CONFIG_X11GRAB_XCB_INDEV) += xcbgrab.o
OBJS-$(CONFIG_XV_OUTDEV) += xv.o
# external libraries
OBJS-$(CONFIG_LIBCDIO_INDEV) += libcdio.o
OBJS-$(CONFIG_LIBDC1394_INDEV) += libdc1394.o
OBJS-$(HAVE_LIBC_MSVCRT) += file_open.o
# Windows resource file
SLIBOBJS-$(HAVE_GNU_WINDRES) += avdeviceres.o
SKIPHEADERS-$(CONFIG_DECKLINK) += decklink_enc.h decklink_dec.h \
decklink_common.h decklink_common_c.h
SKIPHEADERS-$(CONFIG_DSHOW_INDEV) += dshow_capture.h
SKIPHEADERS-$(CONFIG_FBDEV_INDEV) += fbdev_common.h
SKIPHEADERS-$(CONFIG_FBDEV_OUTDEV) += fbdev_common.h
SKIPHEADERS-$(CONFIG_LIBPULSE) += pulse_audio_common.h
SKIPHEADERS-$(CONFIG_V4L2_INDEV) += v4l2-common.h
SKIPHEADERS-$(CONFIG_V4L2_OUTDEV) += v4l2-common.h
SKIPHEADERS-$(HAVE_ALSA_ASOUNDLIB_H) += alsa.h
SKIPHEADERS-$(HAVE_SNDIO_H) += sndio.h
TESTPROGS = timefilter

View File

@@ -0,0 +1,78 @@
/*
* Register all the grabbing devices.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "avdevice.h"
#define REGISTER_OUTDEV(X, x) \
{ \
extern AVOutputFormat ff_##x##_muxer; \
if (CONFIG_##X##_OUTDEV) \
av_register_output_format(&ff_##x##_muxer); \
}
#define REGISTER_INDEV(X, x) \
{ \
extern AVInputFormat ff_##x##_demuxer; \
if (CONFIG_##X##_INDEV) \
av_register_input_format(&ff_##x##_demuxer); \
}
#define REGISTER_INOUTDEV(X, x) REGISTER_OUTDEV(X, x); REGISTER_INDEV(X, x)
void avdevice_register_all(void)
{
static int initialized;
if (initialized)
return;
initialized = 1;
/* devices */
REGISTER_INOUTDEV(ALSA, alsa);
REGISTER_INDEV (AVFOUNDATION, avfoundation);
REGISTER_INDEV (BKTR, bktr);
REGISTER_OUTDEV (CACA, caca);
REGISTER_INOUTDEV(DECKLINK, decklink);
REGISTER_INDEV (DSHOW, dshow);
REGISTER_INDEV (DV1394, dv1394);
REGISTER_INOUTDEV(FBDEV, fbdev);
REGISTER_INDEV (GDIGRAB, gdigrab);
REGISTER_INDEV (IEC61883, iec61883);
REGISTER_INDEV (JACK, jack);
REGISTER_INDEV (LAVFI, lavfi);
REGISTER_INDEV (OPENAL, openal);
REGISTER_OUTDEV (OPENGL, opengl);
REGISTER_INOUTDEV(OSS, oss);
REGISTER_INOUTDEV(PULSE, pulse);
REGISTER_INDEV (QTKIT, qtkit);
REGISTER_OUTDEV (SDL, sdl);
REGISTER_INOUTDEV(SNDIO, sndio);
REGISTER_INOUTDEV(V4L2, v4l2);
// REGISTER_INDEV (V4L, v4l
REGISTER_INDEV (VFWCAP, vfwcap);
REGISTER_INDEV (X11GRAB, x11grab);
REGISTER_INDEV (X11GRAB_XCB, x11grab_xcb);
REGISTER_OUTDEV (XV, xv);
/* external libraries */
REGISTER_INDEV (LIBCDIO, libcdio);
REGISTER_INDEV (LIBDC1394, libdc1394);
}

View File

@@ -0,0 +1,399 @@
/*
* ALSA input and output
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* ALSA input and output: common code
* @author Luca Abeni ( lucabe72 email it )
* @author Benoit Fouet ( benoit fouet free fr )
* @author Nicolas George ( nicolas george normalesup org )
*/
#include <alsa/asoundlib.h>
#include "avdevice.h"
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "alsa.h"
static av_cold snd_pcm_format_t codec_id_to_pcm_format(int codec_id)
{
switch(codec_id) {
case AV_CODEC_ID_PCM_F64LE: return SND_PCM_FORMAT_FLOAT64_LE;
case AV_CODEC_ID_PCM_F64BE: return SND_PCM_FORMAT_FLOAT64_BE;
case AV_CODEC_ID_PCM_F32LE: return SND_PCM_FORMAT_FLOAT_LE;
case AV_CODEC_ID_PCM_F32BE: return SND_PCM_FORMAT_FLOAT_BE;
case AV_CODEC_ID_PCM_S32LE: return SND_PCM_FORMAT_S32_LE;
case AV_CODEC_ID_PCM_S32BE: return SND_PCM_FORMAT_S32_BE;
case AV_CODEC_ID_PCM_U32LE: return SND_PCM_FORMAT_U32_LE;
case AV_CODEC_ID_PCM_U32BE: return SND_PCM_FORMAT_U32_BE;
case AV_CODEC_ID_PCM_S24LE: return SND_PCM_FORMAT_S24_3LE;
case AV_CODEC_ID_PCM_S24BE: return SND_PCM_FORMAT_S24_3BE;
case AV_CODEC_ID_PCM_U24LE: return SND_PCM_FORMAT_U24_3LE;
case AV_CODEC_ID_PCM_U24BE: return SND_PCM_FORMAT_U24_3BE;
case AV_CODEC_ID_PCM_S16LE: return SND_PCM_FORMAT_S16_LE;
case AV_CODEC_ID_PCM_S16BE: return SND_PCM_FORMAT_S16_BE;
case AV_CODEC_ID_PCM_U16LE: return SND_PCM_FORMAT_U16_LE;
case AV_CODEC_ID_PCM_U16BE: return SND_PCM_FORMAT_U16_BE;
case AV_CODEC_ID_PCM_S8: return SND_PCM_FORMAT_S8;
case AV_CODEC_ID_PCM_U8: return SND_PCM_FORMAT_U8;
case AV_CODEC_ID_PCM_MULAW: return SND_PCM_FORMAT_MU_LAW;
case AV_CODEC_ID_PCM_ALAW: return SND_PCM_FORMAT_A_LAW;
default: return SND_PCM_FORMAT_UNKNOWN;
}
}
#define MAKE_REORDER_FUNC(NAME, TYPE, CHANNELS, LAYOUT, MAP) \
static void alsa_reorder_ ## NAME ## _ ## LAYOUT(const void *in_v, \
void *out_v, \
int n) \
{ \
const TYPE *in = in_v; \
TYPE *out = out_v; \
\
while (n-- > 0) { \
MAP \
in += CHANNELS; \
out += CHANNELS; \
} \
}
#define MAKE_REORDER_FUNCS(CHANNELS, LAYOUT, MAP) \
MAKE_REORDER_FUNC(int8, int8_t, CHANNELS, LAYOUT, MAP) \
MAKE_REORDER_FUNC(int16, int16_t, CHANNELS, LAYOUT, MAP) \
MAKE_REORDER_FUNC(int32, int32_t, CHANNELS, LAYOUT, MAP) \
MAKE_REORDER_FUNC(f32, float, CHANNELS, LAYOUT, MAP)
MAKE_REORDER_FUNCS(5, out_50, \
out[0] = in[0]; \
out[1] = in[1]; \
out[2] = in[3]; \
out[3] = in[4]; \
out[4] = in[2]; \
);
MAKE_REORDER_FUNCS(6, out_51, \
out[0] = in[0]; \
out[1] = in[1]; \
out[2] = in[4]; \
out[3] = in[5]; \
out[4] = in[2]; \
out[5] = in[3]; \
);
MAKE_REORDER_FUNCS(8, out_71, \
out[0] = in[0]; \
out[1] = in[1]; \
out[2] = in[4]; \
out[3] = in[5]; \
out[4] = in[2]; \
out[5] = in[3]; \
out[6] = in[6]; \
out[7] = in[7]; \
);
#define FORMAT_I8 0
#define FORMAT_I16 1
#define FORMAT_I32 2
#define FORMAT_F32 3
#define PICK_REORDER(layout)\
switch(format) {\
case FORMAT_I8: s->reorder_func = alsa_reorder_int8_out_ ##layout; break;\
case FORMAT_I16: s->reorder_func = alsa_reorder_int16_out_ ##layout; break;\
case FORMAT_I32: s->reorder_func = alsa_reorder_int32_out_ ##layout; break;\
case FORMAT_F32: s->reorder_func = alsa_reorder_f32_out_ ##layout; break;\
}
static av_cold int find_reorder_func(AlsaData *s, int codec_id, uint64_t layout, int out)
{
int format;
/* reordering input is not currently supported */
if (!out)
return AVERROR(ENOSYS);
/* reordering is not needed for QUAD or 2_2 layout */
if (layout == AV_CH_LAYOUT_QUAD || layout == AV_CH_LAYOUT_2_2)
return 0;
switch (codec_id) {
case AV_CODEC_ID_PCM_S8:
case AV_CODEC_ID_PCM_U8:
case AV_CODEC_ID_PCM_ALAW:
case AV_CODEC_ID_PCM_MULAW: format = FORMAT_I8; break;
case AV_CODEC_ID_PCM_S16LE:
case AV_CODEC_ID_PCM_S16BE:
case AV_CODEC_ID_PCM_U16LE:
case AV_CODEC_ID_PCM_U16BE: format = FORMAT_I16; break;
case AV_CODEC_ID_PCM_S32LE:
case AV_CODEC_ID_PCM_S32BE:
case AV_CODEC_ID_PCM_U32LE:
case AV_CODEC_ID_PCM_U32BE: format = FORMAT_I32; break;
case AV_CODEC_ID_PCM_F32LE:
case AV_CODEC_ID_PCM_F32BE: format = FORMAT_F32; break;
default: return AVERROR(ENOSYS);
}
if (layout == AV_CH_LAYOUT_5POINT0_BACK || layout == AV_CH_LAYOUT_5POINT0)
PICK_REORDER(50)
else if (layout == AV_CH_LAYOUT_5POINT1_BACK || layout == AV_CH_LAYOUT_5POINT1)
PICK_REORDER(51)
else if (layout == AV_CH_LAYOUT_7POINT1)
PICK_REORDER(71)
return s->reorder_func ? 0 : AVERROR(ENOSYS);
}
av_cold int ff_alsa_open(AVFormatContext *ctx, snd_pcm_stream_t mode,
unsigned int *sample_rate,
int channels, enum AVCodecID *codec_id)
{
AlsaData *s = ctx->priv_data;
const char *audio_device;
int res, flags = 0;
snd_pcm_format_t format;
snd_pcm_t *h;
snd_pcm_hw_params_t *hw_params;
snd_pcm_uframes_t buffer_size, period_size;
uint64_t layout = ctx->streams[0]->codec->channel_layout;
if (ctx->filename[0] == 0) audio_device = "default";
else audio_device = ctx->filename;
if (*codec_id == AV_CODEC_ID_NONE)
*codec_id = DEFAULT_CODEC_ID;
format = codec_id_to_pcm_format(*codec_id);
if (format == SND_PCM_FORMAT_UNKNOWN) {
av_log(ctx, AV_LOG_ERROR, "sample format 0x%04x is not supported\n", *codec_id);
return AVERROR(ENOSYS);
}
s->frame_size = av_get_bits_per_sample(*codec_id) / 8 * channels;
if (ctx->flags & AVFMT_FLAG_NONBLOCK) {
flags = SND_PCM_NONBLOCK;
}
res = snd_pcm_open(&h, audio_device, mode, flags);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot open audio device %s (%s)\n",
audio_device, snd_strerror(res));
return AVERROR(EIO);
}
res = snd_pcm_hw_params_malloc(&hw_params);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot allocate hardware parameter structure (%s)\n",
snd_strerror(res));
goto fail1;
}
res = snd_pcm_hw_params_any(h, hw_params);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot initialize hardware parameter structure (%s)\n",
snd_strerror(res));
goto fail;
}
res = snd_pcm_hw_params_set_access(h, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set access type (%s)\n",
snd_strerror(res));
goto fail;
}
res = snd_pcm_hw_params_set_format(h, hw_params, format);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set sample format 0x%04x %d (%s)\n",
*codec_id, format, snd_strerror(res));
goto fail;
}
res = snd_pcm_hw_params_set_rate_near(h, hw_params, sample_rate, 0);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set sample rate (%s)\n",
snd_strerror(res));
goto fail;
}
res = snd_pcm_hw_params_set_channels(h, hw_params, channels);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set channel count to %d (%s)\n",
channels, snd_strerror(res));
goto fail;
}
snd_pcm_hw_params_get_buffer_size_max(hw_params, &buffer_size);
buffer_size = FFMIN(buffer_size, ALSA_BUFFER_SIZE_MAX);
/* TODO: maybe use ctx->max_picture_buffer somehow */
res = snd_pcm_hw_params_set_buffer_size_near(h, hw_params, &buffer_size);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set ALSA buffer size (%s)\n",
snd_strerror(res));
goto fail;
}
snd_pcm_hw_params_get_period_size_min(hw_params, &period_size, NULL);
if (!period_size)
period_size = buffer_size / 4;
res = snd_pcm_hw_params_set_period_size_near(h, hw_params, &period_size, NULL);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set ALSA period size (%s)\n",
snd_strerror(res));
goto fail;
}
s->period_size = period_size;
res = snd_pcm_hw_params(h, hw_params);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set parameters (%s)\n",
snd_strerror(res));
goto fail;
}
snd_pcm_hw_params_free(hw_params);
if (channels > 2 && layout) {
if (find_reorder_func(s, *codec_id, layout, mode == SND_PCM_STREAM_PLAYBACK) < 0) {
char name[128];
av_get_channel_layout_string(name, sizeof(name), channels, layout);
av_log(ctx, AV_LOG_WARNING, "ALSA channel layout unknown or unimplemented for %s %s.\n",
name, mode == SND_PCM_STREAM_PLAYBACK ? "playback" : "capture");
}
if (s->reorder_func) {
s->reorder_buf_size = buffer_size;
s->reorder_buf = av_malloc_array(s->reorder_buf_size, s->frame_size);
if (!s->reorder_buf)
goto fail1;
}
}
s->h = h;
return 0;
fail:
snd_pcm_hw_params_free(hw_params);
fail1:
snd_pcm_close(h);
return AVERROR(EIO);
}
av_cold int ff_alsa_close(AVFormatContext *s1)
{
AlsaData *s = s1->priv_data;
av_freep(&s->reorder_buf);
if (CONFIG_ALSA_INDEV)
ff_timefilter_destroy(s->timefilter);
snd_pcm_close(s->h);
return 0;
}
int ff_alsa_xrun_recover(AVFormatContext *s1, int err)
{
AlsaData *s = s1->priv_data;
snd_pcm_t *handle = s->h;
av_log(s1, AV_LOG_WARNING, "ALSA buffer xrun.\n");
if (err == -EPIPE) {
err = snd_pcm_prepare(handle);
if (err < 0) {
av_log(s1, AV_LOG_ERROR, "cannot recover from underrun (snd_pcm_prepare failed: %s)\n", snd_strerror(err));
return AVERROR(EIO);
}
} else if (err == -ESTRPIPE) {
av_log(s1, AV_LOG_ERROR, "-ESTRPIPE... Unsupported!\n");
return -1;
}
return err;
}
int ff_alsa_extend_reorder_buf(AlsaData *s, int min_size)
{
int size = s->reorder_buf_size;
void *r;
av_assert0(size != 0);
while (size < min_size)
size *= 2;
r = av_realloc_array(s->reorder_buf, size, s->frame_size);
if (!r)
return AVERROR(ENOMEM);
s->reorder_buf = r;
s->reorder_buf_size = size;
return 0;
}
/* ported from alsa-utils/aplay.c */
int ff_alsa_get_device_list(AVDeviceInfoList *device_list, snd_pcm_stream_t stream_type)
{
int ret = 0;
void **hints, **n;
char *name = NULL, *descr = NULL, *io = NULL, *tmp;
AVDeviceInfo *new_device = NULL;
const char *filter = stream_type == SND_PCM_STREAM_PLAYBACK ? "Output" : "Input";
if (snd_device_name_hint(-1, "pcm", &hints) < 0)
return AVERROR_EXTERNAL;
n = hints;
while (*n && !ret) {
name = snd_device_name_get_hint(*n, "NAME");
descr = snd_device_name_get_hint(*n, "DESC");
io = snd_device_name_get_hint(*n, "IOID");
if (!io || !strcmp(io, filter)) {
new_device = av_mallocz(sizeof(AVDeviceInfo));
if (!new_device) {
ret = AVERROR(ENOMEM);
goto fail;
}
new_device->device_name = av_strdup(name);
if ((tmp = strrchr(descr, '\n')) && tmp[1])
new_device->device_description = av_strdup(&tmp[1]);
else
new_device->device_description = av_strdup(descr);
if (!new_device->device_description || !new_device->device_name) {
ret = AVERROR(ENOMEM);
goto fail;
}
if ((ret = av_dynarray_add_nofree(&device_list->devices,
&device_list->nb_devices, new_device)) < 0) {
goto fail;
}
if (!strcmp(new_device->device_name, "default"))
device_list->default_device = device_list->nb_devices - 1;
new_device = NULL;
}
fail:
free(io);
free(name);
free(descr);
n++;
}
if (new_device) {
av_free(new_device->device_description);
av_free(new_device->device_name);
av_free(new_device);
}
snd_device_name_free_hint(hints);
return ret;
}

View File

@@ -0,0 +1,104 @@
/*
* ALSA input and output
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* ALSA input and output: definitions and structures
* @author Luca Abeni ( lucabe72 email it )
* @author Benoit Fouet ( benoit fouet free fr )
*/
#ifndef AVDEVICE_ALSA_H
#define AVDEVICE_ALSA_H
#include <alsa/asoundlib.h>
#include "config.h"
#include "libavutil/log.h"
#include "timefilter.h"
#include "avdevice.h"
/* XXX: we make the assumption that the soundcard accepts this format */
/* XXX: find better solution with "preinit" method, needed also in
other formats */
#define DEFAULT_CODEC_ID AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE)
typedef void (*ff_reorder_func)(const void *, void *, int);
#define ALSA_BUFFER_SIZE_MAX 65536
typedef struct AlsaData {
AVClass *class;
snd_pcm_t *h;
int frame_size; ///< bytes per sample * channels
int period_size; ///< preferred size for reads and writes, in frames
int sample_rate; ///< sample rate set by user
int channels; ///< number of channels set by user
int last_period;
TimeFilter *timefilter;
void (*reorder_func)(const void *, void *, int);
void *reorder_buf;
int reorder_buf_size; ///< in frames
int64_t timestamp; ///< current timestamp, without latency applied.
} AlsaData;
/**
* Open an ALSA PCM.
*
* @param s media file handle
* @param mode either SND_PCM_STREAM_CAPTURE or SND_PCM_STREAM_PLAYBACK
* @param sample_rate in: requested sample rate;
* out: actually selected sample rate
* @param channels number of channels
* @param codec_id in: requested AVCodecID or AV_CODEC_ID_NONE;
* out: actually selected AVCodecID, changed only if
* AV_CODEC_ID_NONE was requested
*
* @return 0 if OK, AVERROR_xxx on error
*/
int ff_alsa_open(AVFormatContext *s, snd_pcm_stream_t mode,
unsigned int *sample_rate,
int channels, enum AVCodecID *codec_id);
/**
* Close the ALSA PCM.
*
* @param s1 media file handle
*
* @return 0
*/
int ff_alsa_close(AVFormatContext *s1);
/**
* Try to recover from ALSA buffer underrun.
*
* @param s1 media file handle
* @param err error code reported by the previous ALSA call
*
* @return 0 if OK, AVERROR_xxx on error
*/
int ff_alsa_xrun_recover(AVFormatContext *s1, int err);
int ff_alsa_extend_reorder_buf(AlsaData *s, int size);
int ff_alsa_get_device_list(AVDeviceInfoList *device_list, snd_pcm_stream_t stream_type);
#endif /* AVDEVICE_ALSA_H */

View File

@@ -0,0 +1,168 @@
/*
* ALSA input and output
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* ALSA input and output: input
* @author Luca Abeni ( lucabe72 email it )
* @author Benoit Fouet ( benoit fouet free fr )
* @author Nicolas George ( nicolas george normalesup org )
*
* This avdevice decoder can capture audio from an ALSA (Advanced
* Linux Sound Architecture) device.
*
* The filename parameter is the name of an ALSA PCM device capable of
* capture, for example "default" or "plughw:1"; see the ALSA documentation
* for naming conventions. The empty string is equivalent to "default".
*
* The capture period is set to the lower value available for the device,
* which gives a low latency suitable for real-time capture.
*
* The PTS are an Unix time in microsecond.
*
* Due to a bug in the ALSA library
* (https://bugtrack.alsa-project.org/alsa-bug/view.php?id=4308), this
* decoder does not work with certain ALSA plugins, especially the dsnoop
* plugin.
*/
#include <alsa/asoundlib.h>
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavformat/internal.h"
#include "avdevice.h"
#include "alsa.h"
static av_cold int audio_read_header(AVFormatContext *s1)
{
AlsaData *s = s1->priv_data;
AVStream *st;
int ret;
enum AVCodecID codec_id;
st = avformat_new_stream(s1, NULL);
if (!st) {
av_log(s1, AV_LOG_ERROR, "Cannot add stream\n");
return AVERROR(ENOMEM);
}
codec_id = s1->audio_codec_id;
ret = ff_alsa_open(s1, SND_PCM_STREAM_CAPTURE, &s->sample_rate, s->channels,
&codec_id);
if (ret < 0) {
return AVERROR(EIO);
}
/* take real parameters */
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = codec_id;
st->codec->sample_rate = s->sample_rate;
st->codec->channels = s->channels;
st->codec->frame_size = s->frame_size;
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
/* microseconds instead of seconds, MHz instead of Hz */
s->timefilter = ff_timefilter_new(1000000.0 / s->sample_rate,
s->period_size, 1.5E-6);
if (!s->timefilter)
goto fail;
return 0;
fail:
snd_pcm_close(s->h);
return AVERROR(EIO);
}
static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
AlsaData *s = s1->priv_data;
int res;
int64_t dts;
snd_pcm_sframes_t delay = 0;
if (av_new_packet(pkt, s->period_size * s->frame_size) < 0) {
return AVERROR(EIO);
}
while ((res = snd_pcm_readi(s->h, pkt->data, s->period_size)) < 0) {
if (res == -EAGAIN) {
av_free_packet(pkt);
return AVERROR(EAGAIN);
}
if (ff_alsa_xrun_recover(s1, res) < 0) {
av_log(s1, AV_LOG_ERROR, "ALSA read error: %s\n",
snd_strerror(res));
av_free_packet(pkt);
return AVERROR(EIO);
}
ff_timefilter_reset(s->timefilter);
}
dts = av_gettime();
snd_pcm_delay(s->h, &delay);
dts -= av_rescale(delay + res, 1000000, s->sample_rate);
pkt->pts = ff_timefilter_update(s->timefilter, dts, s->last_period);
s->last_period = res;
pkt->size = res * s->frame_size;
return 0;
}
static int audio_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
{
return ff_alsa_get_device_list(device_list, SND_PCM_STREAM_CAPTURE);
}
static const AVOption options[] = {
{ "sample_rate", "", offsetof(AlsaData, sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "channels", "", offsetof(AlsaData, channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass alsa_demuxer_class = {
.class_name = "ALSA demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_alsa_demuxer = {
.name = "alsa",
.long_name = NULL_IF_CONFIG_SMALL("ALSA audio input"),
.priv_data_size = sizeof(AlsaData),
.read_header = audio_read_header,
.read_packet = audio_read_packet,
.read_close = ff_alsa_close,
.get_device_list = audio_get_device_list,
.flags = AVFMT_NOFILE,
.priv_class = &alsa_demuxer_class,
};

View File

@@ -0,0 +1,174 @@
/*
* ALSA input and output
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* ALSA input and output: output
* @author Luca Abeni ( lucabe72 email it )
* @author Benoit Fouet ( benoit fouet free fr )
*
* This avdevice encoder can play audio to an ALSA (Advanced Linux
* Sound Architecture) device.
*
* The filename parameter is the name of an ALSA PCM device capable of
* capture, for example "default" or "plughw:1"; see the ALSA documentation
* for naming conventions. The empty string is equivalent to "default".
*
* The playback period is set to the lower value available for the device,
* which gives a low latency suitable for real-time playback.
*/
#include <alsa/asoundlib.h>
#include "libavutil/internal.h"
#include "libavutil/time.h"
#include "libavformat/internal.h"
#include "avdevice.h"
#include "alsa.h"
static av_cold int audio_write_header(AVFormatContext *s1)
{
AlsaData *s = s1->priv_data;
AVStream *st = NULL;
unsigned int sample_rate;
enum AVCodecID codec_id;
int res;
if (s1->nb_streams != 1 || s1->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
av_log(s1, AV_LOG_ERROR, "Only a single audio stream is supported.\n");
return AVERROR(EINVAL);
}
st = s1->streams[0];
sample_rate = st->codec->sample_rate;
codec_id = st->codec->codec_id;
res = ff_alsa_open(s1, SND_PCM_STREAM_PLAYBACK, &sample_rate,
st->codec->channels, &codec_id);
if (sample_rate != st->codec->sample_rate) {
av_log(s1, AV_LOG_ERROR,
"sample rate %d not available, nearest is %d\n",
st->codec->sample_rate, sample_rate);
goto fail;
}
avpriv_set_pts_info(st, 64, 1, sample_rate);
return res;
fail:
snd_pcm_close(s->h);
return AVERROR(EIO);
}
static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
{
AlsaData *s = s1->priv_data;
int res;
int size = pkt->size;
uint8_t *buf = pkt->data;
size /= s->frame_size;
if (pkt->dts != AV_NOPTS_VALUE)
s->timestamp = pkt->dts;
s->timestamp += pkt->duration ? pkt->duration : size;
if (s->reorder_func) {
if (size > s->reorder_buf_size)
if (ff_alsa_extend_reorder_buf(s, size))
return AVERROR(ENOMEM);
s->reorder_func(buf, s->reorder_buf, size);
buf = s->reorder_buf;
}
while ((res = snd_pcm_writei(s->h, buf, size)) < 0) {
if (res == -EAGAIN) {
return AVERROR(EAGAIN);
}
if (ff_alsa_xrun_recover(s1, res) < 0) {
av_log(s1, AV_LOG_ERROR, "ALSA write error: %s\n",
snd_strerror(res));
return AVERROR(EIO);
}
}
return 0;
}
static int audio_write_frame(AVFormatContext *s1, int stream_index,
AVFrame **frame, unsigned flags)
{
AlsaData *s = s1->priv_data;
AVPacket pkt;
/* ff_alsa_open() should have accepted only supported formats */
if ((flags & AV_WRITE_UNCODED_FRAME_QUERY))
return av_sample_fmt_is_planar(s1->streams[stream_index]->codec->sample_fmt) ?
AVERROR(EINVAL) : 0;
/* set only used fields */
pkt.data = (*frame)->data[0];
pkt.size = (*frame)->nb_samples * s->frame_size;
pkt.dts = (*frame)->pkt_dts;
pkt.duration = av_frame_get_pkt_duration(*frame);
return audio_write_packet(s1, &pkt);
}
static void
audio_get_output_timestamp(AVFormatContext *s1, int stream,
int64_t *dts, int64_t *wall)
{
AlsaData *s = s1->priv_data;
snd_pcm_sframes_t delay = 0;
*wall = av_gettime();
snd_pcm_delay(s->h, &delay);
*dts = s->timestamp - delay;
}
static int audio_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
{
return ff_alsa_get_device_list(device_list, SND_PCM_STREAM_PLAYBACK);
}
static const AVClass alsa_muxer_class = {
.class_name = "ALSA muxer",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
};
AVOutputFormat ff_alsa_muxer = {
.name = "alsa",
.long_name = NULL_IF_CONFIG_SMALL("ALSA audio output"),
.priv_data_size = sizeof(AlsaData),
.audio_codec = DEFAULT_CODEC_ID,
.video_codec = AV_CODEC_ID_NONE,
.write_header = audio_write_header,
.write_packet = audio_write_packet,
.write_trailer = ff_alsa_close,
.write_uncoded_frame = audio_write_frame,
.get_device_list = audio_get_device_list,
.get_output_timestamp = audio_get_output_timestamp,
.flags = AVFMT_NOFILE,
.priv_class = &alsa_muxer_class,
};

View File

@@ -0,0 +1,14 @@
EXPORTS
DllStartup
avdevice_app_to_dev_control_message
avdevice_capabilities_create
avdevice_capabilities_free
avdevice_configuration
avdevice_dev_to_app_control_message
avdevice_free_list_devices
avdevice_license
avdevice_list_devices
avdevice_list_input_sources
avdevice_list_output_sinks
avdevice_register_all
avdevice_version

View File

@@ -0,0 +1,14 @@
EXPORTS
DllStartup @1
avdevice_app_to_dev_control_message @2
avdevice_capabilities_create @3
avdevice_capabilities_free @4
avdevice_configuration @5
avdevice_dev_to_app_control_message @6
avdevice_free_list_devices @7
avdevice_license @8
avdevice_list_devices @9
avdevice_list_input_sources @10
avdevice_list_output_sinks @11
avdevice_register_all @12
avdevice_version @13

View File

@@ -0,0 +1,271 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avassert.h"
#include "libavutil/samplefmt.h"
#include "libavutil/pixfmt.h"
#include "libavcodec/avcodec.h"
#include "avdevice.h"
#include "internal.h"
#include "config.h"
#include "libavutil/ffversion.h"
const char av_device_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
#define E AV_OPT_FLAG_ENCODING_PARAM
#define D AV_OPT_FLAG_DECODING_PARAM
#define A AV_OPT_FLAG_AUDIO_PARAM
#define V AV_OPT_FLAG_VIDEO_PARAM
#define OFFSET(x) offsetof(AVDeviceCapabilitiesQuery, x)
const AVOption av_device_capabilities[] = {
{ "codec", "codec", OFFSET(codec), AV_OPT_TYPE_INT,
{.i64 = AV_CODEC_ID_NONE}, AV_CODEC_ID_NONE, INT_MAX, E|D|A|V },
{ "sample_format", "sample format", OFFSET(sample_format), AV_OPT_TYPE_SAMPLE_FMT,
{.i64 = AV_SAMPLE_FMT_NONE}, AV_SAMPLE_FMT_NONE, INT_MAX, E|D|A },
{ "sample_rate", "sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT,
{.i64 = -1}, -1, INT_MAX, E|D|A },
{ "channels", "channels", OFFSET(channels), AV_OPT_TYPE_INT,
{.i64 = -1}, -1, INT_MAX, E|D|A },
{ "channel_layout", "channel layout", OFFSET(channel_layout), AV_OPT_TYPE_CHANNEL_LAYOUT,
{.i64 = -1}, -1, INT_MAX, E|D|A },
{ "pixel_format", "pixel format", OFFSET(pixel_format), AV_OPT_TYPE_PIXEL_FMT,
{.i64 = AV_PIX_FMT_NONE}, AV_PIX_FMT_NONE, INT_MAX, E|D|V },
{ "window_size", "window size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE,
{.str = NULL}, -1, INT_MAX, E|D|V },
{ "frame_size", "frame size", OFFSET(frame_width), AV_OPT_TYPE_IMAGE_SIZE,
{.str = NULL}, -1, INT_MAX, E|D|V },
{ "fps", "fps", OFFSET(fps), AV_OPT_TYPE_RATIONAL,
{.dbl = -1}, -1, INT_MAX, E|D|V },
{ NULL }
};
#undef E
#undef D
#undef A
#undef V
#undef OFFSET
unsigned avdevice_version(void)
{
av_assert0(LIBAVDEVICE_VERSION_MICRO >= 100);
return LIBAVDEVICE_VERSION_INT;
}
const char * avdevice_configuration(void)
{
return FFMPEG_CONFIGURATION;
}
const char * avdevice_license(void)
{
#define LICENSE_PREFIX "libavdevice license: "
return LICENSE_PREFIX FFMPEG_LICENSE + sizeof(LICENSE_PREFIX) - 1;
}
static void *device_next(void *prev, int output,
AVClassCategory c1, AVClassCategory c2)
{
const AVClass *pc;
AVClassCategory category = AV_CLASS_CATEGORY_NA;
do {
if (output) {
if (!(prev = av_oformat_next(prev)))
break;
pc = ((AVOutputFormat *)prev)->priv_class;
} else {
if (!(prev = av_iformat_next(prev)))
break;
pc = ((AVInputFormat *)prev)->priv_class;
}
if (!pc)
continue;
category = pc->category;
} while (category != c1 && category != c2);
return prev;
}
AVInputFormat *av_input_audio_device_next(AVInputFormat *d)
{
return device_next(d, 0, AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
AV_CLASS_CATEGORY_DEVICE_INPUT);
}
AVInputFormat *av_input_video_device_next(AVInputFormat *d)
{
return device_next(d, 0, AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
AV_CLASS_CATEGORY_DEVICE_INPUT);
}
AVOutputFormat *av_output_audio_device_next(AVOutputFormat *d)
{
return device_next(d, 1, AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
AV_CLASS_CATEGORY_DEVICE_OUTPUT);
}
AVOutputFormat *av_output_video_device_next(AVOutputFormat *d)
{
return device_next(d, 1, AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
AV_CLASS_CATEGORY_DEVICE_OUTPUT);
}
int avdevice_app_to_dev_control_message(struct AVFormatContext *s, enum AVAppToDevMessageType type,
void *data, size_t data_size)
{
if (!s->oformat || !s->oformat->control_message)
return AVERROR(ENOSYS);
return s->oformat->control_message(s, type, data, data_size);
}
int avdevice_dev_to_app_control_message(struct AVFormatContext *s, enum AVDevToAppMessageType type,
void *data, size_t data_size)
{
if (!av_format_get_control_message_cb(s))
return AVERROR(ENOSYS);
return av_format_get_control_message_cb(s)(s, type, data, data_size);
}
int avdevice_capabilities_create(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s,
AVDictionary **device_options)
{
int ret;
av_assert0(s && caps);
av_assert0(s->iformat || s->oformat);
if ((s->oformat && !s->oformat->create_device_capabilities) ||
(s->iformat && !s->iformat->create_device_capabilities))
return AVERROR(ENOSYS);
*caps = av_mallocz(sizeof(**caps));
if (!(*caps))
return AVERROR(ENOMEM);
(*caps)->device_context = s;
if (((ret = av_opt_set_dict(s->priv_data, device_options)) < 0))
goto fail;
if (s->iformat) {
if ((ret = s->iformat->create_device_capabilities(s, *caps)) < 0)
goto fail;
} else {
if ((ret = s->oformat->create_device_capabilities(s, *caps)) < 0)
goto fail;
}
av_opt_set_defaults(*caps);
return 0;
fail:
av_freep(caps);
return ret;
}
void avdevice_capabilities_free(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s)
{
if (!s || !caps || !(*caps))
return;
av_assert0(s->iformat || s->oformat);
if (s->iformat) {
if (s->iformat->free_device_capabilities)
s->iformat->free_device_capabilities(s, *caps);
} else {
if (s->oformat->free_device_capabilities)
s->oformat->free_device_capabilities(s, *caps);
}
av_freep(caps);
}
int avdevice_list_devices(AVFormatContext *s, AVDeviceInfoList **device_list)
{
int ret;
av_assert0(s);
av_assert0(device_list);
av_assert0(s->oformat || s->iformat);
if ((s->oformat && !s->oformat->get_device_list) ||
(s->iformat && !s->iformat->get_device_list)) {
*device_list = NULL;
return AVERROR(ENOSYS);
}
*device_list = av_mallocz(sizeof(AVDeviceInfoList));
if (!(*device_list))
return AVERROR(ENOMEM);
/* no default device by default */
(*device_list)->default_device = -1;
if (s->oformat)
ret = s->oformat->get_device_list(s, *device_list);
else
ret = s->iformat->get_device_list(s, *device_list);
if (ret < 0)
avdevice_free_list_devices(device_list);
return ret;
}
static int list_devices_for_context(AVFormatContext *s, AVDictionary *options,
AVDeviceInfoList **device_list)
{
AVDictionary *tmp = NULL;
int ret;
av_dict_copy(&tmp, options, 0);
if ((ret = av_opt_set_dict2(s, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
goto fail;
ret = avdevice_list_devices(s, device_list);
fail:
av_dict_free(&tmp);
avformat_free_context(s);
return ret;
}
int avdevice_list_input_sources(AVInputFormat *device, const char *device_name,
AVDictionary *device_options, AVDeviceInfoList **device_list)
{
AVFormatContext *s = NULL;
int ret;
if ((ret = ff_alloc_input_device_context(&s, device, device_name)) < 0)
return ret;
return list_devices_for_context(s, device_options, device_list);
}
int avdevice_list_output_sinks(AVOutputFormat *device, const char *device_name,
AVDictionary *device_options, AVDeviceInfoList **device_list)
{
AVFormatContext *s = NULL;
int ret;
if ((ret = avformat_alloc_output_context2(&s, device, device_name, NULL)) < 0)
return ret;
return list_devices_for_context(s, device_options, device_list);
}
void avdevice_free_list_devices(AVDeviceInfoList **device_list)
{
AVDeviceInfoList *list;
AVDeviceInfo *dev;
int i;
av_assert0(device_list);
list = *device_list;
if (!list)
return;
for (i = 0; i < list->nb_devices; i++) {
dev = list->devices[i];
if (dev) {
av_freep(&dev->device_name);
av_freep(&dev->device_description);
av_free(dev);
}
}
av_freep(&list->devices);
av_freep(device_list);
}

View File

@@ -0,0 +1,509 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_AVDEVICE_H
#define AVDEVICE_AVDEVICE_H
#include "version.h"
/**
* @file
* @ingroup lavd
* Main libavdevice API header
*/
/**
* @defgroup lavd Special devices muxing/demuxing library
* @{
* Libavdevice is a complementary library to @ref libavf "libavformat". It
* provides various "special" platform-specific muxers and demuxers, e.g. for
* grabbing devices, audio capture and playback etc. As a consequence, the
* (de)muxers in libavdevice are of the AVFMT_NOFILE type (they use their own
* I/O functions). The filename passed to avformat_open_input() often does not
* refer to an actually existing file, but has some special device-specific
* meaning - e.g. for x11grab it is the display name.
*
* To use libavdevice, simply call avdevice_register_all() to register all
* compiled muxers and demuxers. They all use standard libavformat API.
* @}
*/
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/dict.h"
#include "libavformat/avformat.h"
/**
* Return the LIBAVDEVICE_VERSION_INT constant.
*/
unsigned avdevice_version(void);
/**
* Return the libavdevice build-time configuration.
*/
const char *avdevice_configuration(void);
/**
* Return the libavdevice license.
*/
const char *avdevice_license(void);
/**
* Initialize libavdevice and register all the input and output devices.
* @warning This function is not thread safe.
*/
void avdevice_register_all(void);
/**
* Audio input devices iterator.
*
* If d is NULL, returns the first registered input audio/video device,
* if d is non-NULL, returns the next registered input audio/video device after d
* or NULL if d is the last one.
*/
AVInputFormat *av_input_audio_device_next(AVInputFormat *d);
/**
* Video input devices iterator.
*
* If d is NULL, returns the first registered input audio/video device,
* if d is non-NULL, returns the next registered input audio/video device after d
* or NULL if d is the last one.
*/
AVInputFormat *av_input_video_device_next(AVInputFormat *d);
/**
* Audio output devices iterator.
*
* If d is NULL, returns the first registered output audio/video device,
* if d is non-NULL, returns the next registered output audio/video device after d
* or NULL if d is the last one.
*/
AVOutputFormat *av_output_audio_device_next(AVOutputFormat *d);
/**
* Video output devices iterator.
*
* If d is NULL, returns the first registered output audio/video device,
* if d is non-NULL, returns the next registered output audio/video device after d
* or NULL if d is the last one.
*/
AVOutputFormat *av_output_video_device_next(AVOutputFormat *d);
typedef struct AVDeviceRect {
int x; /**< x coordinate of top left corner */
int y; /**< y coordinate of top left corner */
int width; /**< width */
int height; /**< height */
} AVDeviceRect;
/**
* Message types used by avdevice_app_to_dev_control_message().
*/
enum AVAppToDevMessageType {
/**
* Dummy message.
*/
AV_APP_TO_DEV_NONE = MKBETAG('N','O','N','E'),
/**
* Window size change message.
*
* Message is sent to the device every time the application changes the size
* of the window device renders to.
* Message should also be sent right after window is created.
*
* data: AVDeviceRect: new window size.
*/
AV_APP_TO_DEV_WINDOW_SIZE = MKBETAG('G','E','O','M'),
/**
* Repaint request message.
*
* Message is sent to the device when window has to be repainted.
*
* data: AVDeviceRect: area required to be repainted.
* NULL: whole area is required to be repainted.
*/
AV_APP_TO_DEV_WINDOW_REPAINT = MKBETAG('R','E','P','A'),
/**
* Request pause/play.
*
* Application requests pause/unpause playback.
* Mostly usable with devices that have internal buffer.
* By default devices are not paused.
*
* data: NULL
*/
AV_APP_TO_DEV_PAUSE = MKBETAG('P', 'A', 'U', ' '),
AV_APP_TO_DEV_PLAY = MKBETAG('P', 'L', 'A', 'Y'),
AV_APP_TO_DEV_TOGGLE_PAUSE = MKBETAG('P', 'A', 'U', 'T'),
/**
* Volume control message.
*
* Set volume level. It may be device-dependent if volume
* is changed per stream or system wide. Per stream volume
* change is expected when possible.
*
* data: double: new volume with range of 0.0 - 1.0.
*/
AV_APP_TO_DEV_SET_VOLUME = MKBETAG('S', 'V', 'O', 'L'),
/**
* Mute control messages.
*
* Change mute state. It may be device-dependent if mute status
* is changed per stream or system wide. Per stream mute status
* change is expected when possible.
*
* data: NULL.
*/
AV_APP_TO_DEV_MUTE = MKBETAG(' ', 'M', 'U', 'T'),
AV_APP_TO_DEV_UNMUTE = MKBETAG('U', 'M', 'U', 'T'),
AV_APP_TO_DEV_TOGGLE_MUTE = MKBETAG('T', 'M', 'U', 'T'),
/**
* Get volume/mute messages.
*
* Force the device to send AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED or
* AV_DEV_TO_APP_MUTE_STATE_CHANGED command respectively.
*
* data: NULL.
*/
AV_APP_TO_DEV_GET_VOLUME = MKBETAG('G', 'V', 'O', 'L'),
AV_APP_TO_DEV_GET_MUTE = MKBETAG('G', 'M', 'U', 'T'),
};
/**
* Message types used by avdevice_dev_to_app_control_message().
*/
enum AVDevToAppMessageType {
/**
* Dummy message.
*/
AV_DEV_TO_APP_NONE = MKBETAG('N','O','N','E'),
/**
* Create window buffer message.
*
* Device requests to create a window buffer. Exact meaning is device-
* and application-dependent. Message is sent before rendering first
* frame and all one-shot initializations should be done here.
* Application is allowed to ignore preferred window buffer size.
*
* @note: Application is obligated to inform about window buffer size
* with AV_APP_TO_DEV_WINDOW_SIZE message.
*
* data: AVDeviceRect: preferred size of the window buffer.
* NULL: no preferred size of the window buffer.
*/
AV_DEV_TO_APP_CREATE_WINDOW_BUFFER = MKBETAG('B','C','R','E'),
/**
* Prepare window buffer message.
*
* Device requests to prepare a window buffer for rendering.
* Exact meaning is device- and application-dependent.
* Message is sent before rendering of each frame.
*
* data: NULL.
*/
AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER = MKBETAG('B','P','R','E'),
/**
* Display window buffer message.
*
* Device requests to display a window buffer.
* Message is sent when new frame is ready to be displayed.
* Usually buffers need to be swapped in handler of this message.
*
* data: NULL.
*/
AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER = MKBETAG('B','D','I','S'),
/**
* Destroy window buffer message.
*
* Device requests to destroy a window buffer.
* Message is sent when device is about to be destroyed and window
* buffer is not required anymore.
*
* data: NULL.
*/
AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER = MKBETAG('B','D','E','S'),
/**
* Buffer fullness status messages.
*
* Device signals buffer overflow/underflow.
*
* data: NULL.
*/
AV_DEV_TO_APP_BUFFER_OVERFLOW = MKBETAG('B','O','F','L'),
AV_DEV_TO_APP_BUFFER_UNDERFLOW = MKBETAG('B','U','F','L'),
/**
* Buffer readable/writable.
*
* Device informs that buffer is readable/writable.
* When possible, device informs how many bytes can be read/write.
*
* @warning Device may not inform when number of bytes than can be read/write changes.
*
* data: int64_t: amount of bytes available to read/write.
* NULL: amount of bytes available to read/write is not known.
*/
AV_DEV_TO_APP_BUFFER_READABLE = MKBETAG('B','R','D',' '),
AV_DEV_TO_APP_BUFFER_WRITABLE = MKBETAG('B','W','R',' '),
/**
* Mute state change message.
*
* Device informs that mute state has changed.
*
* data: int: 0 for not muted state, non-zero for muted state.
*/
AV_DEV_TO_APP_MUTE_STATE_CHANGED = MKBETAG('C','M','U','T'),
/**
* Volume level change message.
*
* Device informs that volume level has changed.
*
* data: double: new volume with range of 0.0 - 1.0.
*/
AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED = MKBETAG('C','V','O','L'),
};
/**
* Send control message from application to device.
*
* @param s device context.
* @param type message type.
* @param data message data. Exact type depends on message type.
* @param data_size size of message data.
* @return >= 0 on success, negative on error.
* AVERROR(ENOSYS) when device doesn't implement handler of the message.
*/
int avdevice_app_to_dev_control_message(struct AVFormatContext *s,
enum AVAppToDevMessageType type,
void *data, size_t data_size);
/**
* Send control message from device to application.
*
* @param s device context.
* @param type message type.
* @param data message data. Can be NULL.
* @param data_size size of message data.
* @return >= 0 on success, negative on error.
* AVERROR(ENOSYS) when application doesn't implement handler of the message.
*/
int avdevice_dev_to_app_control_message(struct AVFormatContext *s,
enum AVDevToAppMessageType type,
void *data, size_t data_size);
/**
* Following API allows user to probe device capabilities (supported codecs,
* pixel formats, sample formats, resolutions, channel counts, etc).
* It is build on top op AVOption API.
* Queried capabilities make it possible to set up converters of video or audio
* parameters that fit to the device.
*
* List of capabilities that can be queried:
* - Capabilities valid for both audio and video devices:
* - codec: supported audio/video codecs.
* type: AV_OPT_TYPE_INT (AVCodecID value)
* - Capabilities valid for audio devices:
* - sample_format: supported sample formats.
* type: AV_OPT_TYPE_INT (AVSampleFormat value)
* - sample_rate: supported sample rates.
* type: AV_OPT_TYPE_INT
* - channels: supported number of channels.
* type: AV_OPT_TYPE_INT
* - channel_layout: supported channel layouts.
* type: AV_OPT_TYPE_INT64
* - Capabilities valid for video devices:
* - pixel_format: supported pixel formats.
* type: AV_OPT_TYPE_INT (AVPixelFormat value)
* - window_size: supported window sizes (describes size of the window size presented to the user).
* type: AV_OPT_TYPE_IMAGE_SIZE
* - frame_size: supported frame sizes (describes size of provided video frames).
* type: AV_OPT_TYPE_IMAGE_SIZE
* - fps: supported fps values
* type: AV_OPT_TYPE_RATIONAL
*
* Value of the capability may be set by user using av_opt_set() function
* and AVDeviceCapabilitiesQuery object. Following queries will
* limit results to the values matching already set capabilities.
* For example, setting a codec may impact number of formats or fps values
* returned during next query. Setting invalid value may limit results to zero.
*
* Example of the usage basing on opengl output device:
*
* @code
* AVFormatContext *oc = NULL;
* AVDeviceCapabilitiesQuery *caps = NULL;
* AVOptionRanges *ranges;
* int ret;
*
* if ((ret = avformat_alloc_output_context2(&oc, NULL, "opengl", NULL)) < 0)
* goto fail;
* if (avdevice_capabilities_create(&caps, oc, NULL) < 0)
* goto fail;
*
* //query codecs
* if (av_opt_query_ranges(&ranges, caps, "codec", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
* goto fail;
* //pick codec here and set it
* av_opt_set(caps, "codec", AV_CODEC_ID_RAWVIDEO, 0);
*
* //query format
* if (av_opt_query_ranges(&ranges, caps, "pixel_format", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
* goto fail;
* //pick format here and set it
* av_opt_set(caps, "pixel_format", AV_PIX_FMT_YUV420P, 0);
*
* //query and set more capabilities
*
* fail:
* //clean up code
* avdevice_capabilities_free(&query, oc);
* avformat_free_context(oc);
* @endcode
*/
/**
* Structure describes device capabilities.
*
* It is used by devices in conjunction with av_device_capabilities AVOption table
* to implement capabilities probing API based on AVOption API. Should not be used directly.
*/
typedef struct AVDeviceCapabilitiesQuery {
const AVClass *av_class;
AVFormatContext *device_context;
enum AVCodecID codec;
enum AVSampleFormat sample_format;
enum AVPixelFormat pixel_format;
int sample_rate;
int channels;
int64_t channel_layout;
int window_width;
int window_height;
int frame_width;
int frame_height;
AVRational fps;
} AVDeviceCapabilitiesQuery;
/**
* AVOption table used by devices to implement device capabilities API. Should not be used by a user.
*/
extern const AVOption av_device_capabilities[];
/**
* Initialize capabilities probing API based on AVOption API.
*
* avdevice_capabilities_free() must be called when query capabilities API is
* not used anymore.
*
* @param[out] caps Device capabilities data. Pointer to a NULL pointer must be passed.
* @param s Context of the device.
* @param device_options An AVDictionary filled with device-private options.
* On return this parameter will be destroyed and replaced with a dict
* containing options that were not found. May be NULL.
* The same options must be passed later to avformat_write_header() for output
* devices or avformat_open_input() for input devices, or at any other place
* that affects device-private options.
*
* @return >= 0 on success, negative otherwise.
*/
int avdevice_capabilities_create(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s,
AVDictionary **device_options);
/**
* Free resources created by avdevice_capabilities_create()
*
* @param caps Device capabilities data to be freed.
* @param s Context of the device.
*/
void avdevice_capabilities_free(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s);
/**
* Structure describes basic parameters of the device.
*/
typedef struct AVDeviceInfo {
char *device_name; /**< device name, format depends on device */
char *device_description; /**< human friendly name */
} AVDeviceInfo;
/**
* List of devices.
*/
typedef struct AVDeviceInfoList {
AVDeviceInfo **devices; /**< list of autodetected devices */
int nb_devices; /**< number of autodetected devices */
int default_device; /**< index of default device or -1 if no default */
} AVDeviceInfoList;
/**
* List devices.
*
* Returns available device names and their parameters.
*
* @note: Some devices may accept system-dependent device names that cannot be
* autodetected. The list returned by this function cannot be assumed to
* be always completed.
*
* @param s device context.
* @param[out] device_list list of autodetected devices.
* @return count of autodetected devices, negative on error.
*/
int avdevice_list_devices(struct AVFormatContext *s, AVDeviceInfoList **device_list);
/**
* Convenient function to free result of avdevice_list_devices().
*
* @param devices device list to be freed.
*/
void avdevice_free_list_devices(AVDeviceInfoList **device_list);
/**
* List devices.
*
* Returns available device names and their parameters.
* These are convinient wrappers for avdevice_list_devices().
* Device context is allocated and deallocated internally.
*
* @param device device format. May be NULL if device name is set.
* @param device_name device name. May be NULL if device format is set.
* @param device_options An AVDictionary filled with device-private options. May be NULL.
* The same options must be passed later to avformat_write_header() for output
* devices or avformat_open_input() for input devices, or at any other place
* that affects device-private options.
* @param[out] device_list list of autodetected devices
* @return count of autodetected devices, negative on error.
* @note device argument takes precedence over device_name when both are set.
*/
int avdevice_list_input_sources(struct AVInputFormat *device, const char *device_name,
AVDictionary *device_options, AVDeviceInfoList **device_list);
int avdevice_list_output_sinks(struct AVOutputFormat *device, const char *device_name,
AVDictionary *device_options, AVDeviceInfoList **device_list);
#endif /* AVDEVICE_AVDEVICE_H */

View File

@@ -0,0 +1,55 @@
/*
* Windows resource file for libavdevice
*
* Copyright (C) 2012 James Almer
* Copyright (C) 2013 Tiancheng "Timothy" Gu
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <windows.h>
#include "libavdevice/version.h"
#include "libavutil/ffversion.h"
#include "config.h"
1 VERSIONINFO
FILEVERSION LIBAVDEVICE_VERSION_MAJOR, LIBAVDEVICE_VERSION_MINOR, LIBAVDEVICE_VERSION_MICRO, 0
PRODUCTVERSION LIBAVDEVICE_VERSION_MAJOR, LIBAVDEVICE_VERSION_MINOR, LIBAVDEVICE_VERSION_MICRO, 0
FILEFLAGSMASK VS_FFI_FILEFLAGSMASK
FILEOS VOS_NT_WINDOWS32
FILETYPE VFT_DLL
{
BLOCK "StringFileInfo"
{
BLOCK "040904B0"
{
VALUE "CompanyName", "FFmpeg Project"
VALUE "FileDescription", "FFmpeg device handling library"
VALUE "FileVersion", AV_STRINGIFY(LIBAVDEVICE_VERSION)
VALUE "InternalName", "libavdevice"
VALUE "LegalCopyright", "Copyright (C) 2000-" AV_STRINGIFY(CONFIG_THIS_YEAR) " FFmpeg Project"
VALUE "OriginalFilename", "avdevice" BUILDSUF "-" AV_STRINGIFY(LIBAVDEVICE_VERSION_MAJOR) SLIBSUF
VALUE "ProductName", "FFmpeg"
VALUE "ProductVersion", FFMPEG_VERSION
}
}
BLOCK "VarFileInfo"
{
VALUE "Translation", 0x0409, 0x04B0
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,361 @@
/*
* *BSD video grab interface
* Copyright (c) 2002 Steve O'Hara-Smith
* based on
* Linux video grab interface
* Copyright (c) 2000, 2001 Fabrice Bellard
* and
* simple_grab.c Copyright (c) 1999 Roger Hardiman
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavformat/internal.h"
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/time.h"
#if HAVE_DEV_BKTR_IOCTL_METEOR_H && HAVE_DEV_BKTR_IOCTL_BT848_H
# include <dev/bktr/ioctl_meteor.h>
# include <dev/bktr/ioctl_bt848.h>
#elif HAVE_MACHINE_IOCTL_METEOR_H && HAVE_MACHINE_IOCTL_BT848_H
# include <machine/ioctl_meteor.h>
# include <machine/ioctl_bt848.h>
#elif HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H && HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H
# include <dev/video/meteor/ioctl_meteor.h>
# include <dev/video/bktr/ioctl_bt848.h>
#elif HAVE_DEV_IC_BT8XX_H
# include <dev/ic/bt8xx.h>
#endif
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/time.h>
#include <signal.h>
#include <stdint.h>
#include "avdevice.h"
typedef struct VideoData {
AVClass *class;
int video_fd;
int tuner_fd;
int width, height;
uint64_t per_frame;
int standard;
char *framerate; /**< Set by a private option. */
} VideoData;
#define PAL 1
#define PALBDGHI 1
#define NTSC 2
#define NTSCM 2
#define SECAM 3
#define PALN 4
#define PALM 5
#define NTSCJ 6
/* PAL is 768 x 576. NTSC is 640 x 480 */
#define PAL_HEIGHT 576
#define SECAM_HEIGHT 576
#define NTSC_HEIGHT 480
#ifndef VIDEO_FORMAT
#define VIDEO_FORMAT NTSC
#endif
static const int bktr_dev[] = { METEOR_DEV0, METEOR_DEV1, METEOR_DEV2,
METEOR_DEV3, METEOR_DEV_SVIDEO };
uint8_t *video_buf;
size_t video_buf_size;
uint64_t last_frame_time;
volatile sig_atomic_t nsignals;
static void catchsignal(int signal)
{
nsignals++;
return;
}
static av_cold int bktr_init(const char *video_device, int width, int height,
int format, int *video_fd, int *tuner_fd, int idev, double frequency)
{
struct meteor_geomet geo;
int h_max;
long ioctl_frequency;
char *arg;
int c;
struct sigaction act = { {0} }, old;
int ret;
char errbuf[128];
if (idev < 0 || idev > 4)
{
arg = getenv ("BKTR_DEV");
if (arg)
idev = atoi (arg);
if (idev < 0 || idev > 4)
idev = 1;
}
if (format < 1 || format > 6)
{
arg = getenv ("BKTR_FORMAT");
if (arg)
format = atoi (arg);
if (format < 1 || format > 6)
format = VIDEO_FORMAT;
}
if (frequency <= 0)
{
arg = getenv ("BKTR_FREQUENCY");
if (arg)
frequency = atof (arg);
if (frequency <= 0)
frequency = 0.0;
}
sigemptyset(&act.sa_mask);
act.sa_handler = catchsignal;
sigaction(SIGUSR1, &act, &old);
*tuner_fd = avpriv_open("/dev/tuner0", O_RDONLY);
if (*tuner_fd < 0)
av_log(NULL, AV_LOG_ERROR, "Warning. Tuner not opened, continuing: %s\n", strerror(errno));
*video_fd = avpriv_open(video_device, O_RDONLY);
if (*video_fd < 0) {
ret = AVERROR(errno);
av_strerror(ret, errbuf, sizeof(errbuf));
av_log(NULL, AV_LOG_ERROR, "%s: %s\n", video_device, errbuf);
return ret;
}
geo.rows = height;
geo.columns = width;
geo.frames = 1;
geo.oformat = METEOR_GEO_YUV_422 | METEOR_GEO_YUV_12;
switch (format) {
case PAL: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI; break;
case PALN: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALN; break;
case PALM: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALM; break;
case SECAM: h_max = SECAM_HEIGHT; c = BT848_IFORM_F_SECAM; break;
case NTSC: h_max = NTSC_HEIGHT; c = BT848_IFORM_F_NTSCM; break;
case NTSCJ: h_max = NTSC_HEIGHT; c = BT848_IFORM_F_NTSCJ; break;
default: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI; break;
}
if (height <= h_max / 2)
geo.oformat |= METEOR_GEO_EVEN_ONLY;
if (ioctl(*video_fd, METEORSETGEO, &geo) < 0) {
ret = AVERROR(errno);
av_strerror(ret, errbuf, sizeof(errbuf));
av_log(NULL, AV_LOG_ERROR, "METEORSETGEO: %s\n", errbuf);
return ret;
}
if (ioctl(*video_fd, BT848SFMT, &c) < 0) {
ret = AVERROR(errno);
av_strerror(ret, errbuf, sizeof(errbuf));
av_log(NULL, AV_LOG_ERROR, "BT848SFMT: %s\n", errbuf);
return ret;
}
c = bktr_dev[idev];
if (ioctl(*video_fd, METEORSINPUT, &c) < 0) {
ret = AVERROR(errno);
av_strerror(ret, errbuf, sizeof(errbuf));
av_log(NULL, AV_LOG_ERROR, "METEORSINPUT: %s\n", errbuf);
return ret;
}
video_buf_size = width * height * 12 / 8;
video_buf = (uint8_t *)mmap((caddr_t)0, video_buf_size,
PROT_READ, MAP_SHARED, *video_fd, (off_t)0);
if (video_buf == MAP_FAILED) {
ret = AVERROR(errno);
av_strerror(ret, errbuf, sizeof(errbuf));
av_log(NULL, AV_LOG_ERROR, "mmap: %s\n", errbuf);
return ret;
}
if (frequency != 0.0) {
ioctl_frequency = (unsigned long)(frequency*16);
if (ioctl(*tuner_fd, TVTUNER_SETFREQ, &ioctl_frequency) < 0)
av_log(NULL, AV_LOG_ERROR, "TVTUNER_SETFREQ: %s\n", strerror(errno));
}
c = AUDIO_UNMUTE;
if (ioctl(*tuner_fd, BT848_SAUDIO, &c) < 0)
av_log(NULL, AV_LOG_ERROR, "TVTUNER_SAUDIO: %s\n", strerror(errno));
c = METEOR_CAP_CONTINOUS;
ioctl(*video_fd, METEORCAPTUR, &c);
c = SIGUSR1;
ioctl(*video_fd, METEORSSIGNAL, &c);
return 0;
}
static void bktr_getframe(uint64_t per_frame)
{
uint64_t curtime;
curtime = av_gettime();
if (!last_frame_time
|| ((last_frame_time + per_frame) > curtime)) {
if (!usleep(last_frame_time + per_frame + per_frame / 8 - curtime)) {
if (!nsignals)
av_log(NULL, AV_LOG_INFO,
"SLEPT NO signals - %d microseconds late\n",
(int)(av_gettime() - last_frame_time - per_frame));
}
}
nsignals = 0;
last_frame_time = curtime;
}
/* note: we support only one picture read at a time */
static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
VideoData *s = s1->priv_data;
if (av_new_packet(pkt, video_buf_size) < 0)
return AVERROR(EIO);
bktr_getframe(s->per_frame);
pkt->pts = av_gettime();
memcpy(pkt->data, video_buf, video_buf_size);
return video_buf_size;
}
static int grab_read_header(AVFormatContext *s1)
{
VideoData *s = s1->priv_data;
AVStream *st;
AVRational framerate;
int ret = 0;
if (!s->framerate)
switch (s->standard) {
case PAL: s->framerate = av_strdup("pal"); break;
case NTSC: s->framerate = av_strdup("ntsc"); break;
case SECAM: s->framerate = av_strdup("25"); break;
default:
av_log(s1, AV_LOG_ERROR, "Unknown standard.\n");
ret = AVERROR(EINVAL);
goto out;
}
if ((ret = av_parse_video_rate(&framerate, s->framerate)) < 0) {
av_log(s1, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", s->framerate);
goto out;
}
st = avformat_new_stream(s1, NULL);
if (!st) {
ret = AVERROR(ENOMEM);
goto out;
}
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in use */
s->per_frame = ((uint64_t)1000000 * framerate.den) / framerate.num;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->pix_fmt = AV_PIX_FMT_YUV420P;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->width = s->width;
st->codec->height = s->height;
st->codec->time_base.den = framerate.num;
st->codec->time_base.num = framerate.den;
if (bktr_init(s1->filename, s->width, s->height, s->standard,
&s->video_fd, &s->tuner_fd, -1, 0.0) < 0) {
ret = AVERROR(EIO);
goto out;
}
nsignals = 0;
last_frame_time = 0;
out:
return ret;
}
static int grab_read_close(AVFormatContext *s1)
{
VideoData *s = s1->priv_data;
int c;
c = METEOR_CAP_STOP_CONT;
ioctl(s->video_fd, METEORCAPTUR, &c);
close(s->video_fd);
c = AUDIO_MUTE;
ioctl(s->tuner_fd, BT848_SAUDIO, &c);
close(s->tuner_fd);
munmap((caddr_t)video_buf, video_buf_size);
return 0;
}
#define OFFSET(x) offsetof(VideoData, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "standard", "", offsetof(VideoData, standard), AV_OPT_TYPE_INT, {.i64 = VIDEO_FORMAT}, PAL, NTSCJ, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PAL", "", 0, AV_OPT_TYPE_CONST, {.i64 = PAL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSC", "", 0, AV_OPT_TYPE_CONST, {.i64 = NTSC}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "SECAM", "", 0, AV_OPT_TYPE_CONST, {.i64 = SECAM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PALN", "", 0, AV_OPT_TYPE_CONST, {.i64 = PALN}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PALM", "", 0, AV_OPT_TYPE_CONST, {.i64 = PALM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSCJ", "", 0, AV_OPT_TYPE_CONST, {.i64 = NTSCJ}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = "vga"}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ NULL },
};
static const AVClass bktr_class = {
.class_name = "BKTR grab interface",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_bktr_demuxer = {
.name = "bktr",
.long_name = NULL_IF_CONFIG_SMALL("video grab"),
.priv_data_size = sizeof(VideoData),
.read_header = grab_read_header,
.read_packet = grab_read_packet,
.read_close = grab_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &bktr_class,
};

View File

@@ -0,0 +1,241 @@
/*
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <caca.h>
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avdevice.h"
typedef struct CACAContext {
AVClass *class;
AVFormatContext *ctx;
char *window_title;
int window_width, window_height;
caca_canvas_t *canvas;
caca_display_t *display;
caca_dither_t *dither;
char *algorithm, *antialias;
char *charset, *color;
char *driver;
char *list_dither;
int list_drivers;
} CACAContext;
static int caca_write_trailer(AVFormatContext *s)
{
CACAContext *c = s->priv_data;
av_freep(&c->window_title);
if (c->display) {
caca_free_display(c->display);
c->display = NULL;
}
if (c->dither) {
caca_free_dither(c->dither);
c->dither = NULL;
}
if (c->canvas) {
caca_free_canvas(c->canvas);
c->canvas = NULL;
}
return 0;
}
static void list_drivers(CACAContext *c)
{
const char *const *drivers = caca_get_display_driver_list();
int i;
av_log(c->ctx, AV_LOG_INFO, "Available drivers:\n");
for (i = 0; drivers[i]; i += 2)
av_log(c->ctx, AV_LOG_INFO, "%s : %s\n", drivers[i], drivers[i + 1]);
}
#define DEFINE_LIST_DITHER(thing, thing_str) \
static void list_dither_## thing(CACAContext *c) \
{ \
const char *const *thing = caca_get_dither_## thing ##_list(c->dither); \
int i; \
\
av_log(c->ctx, AV_LOG_INFO, "Available %s:\n", thing_str); \
for (i = 0; thing[i]; i += 2) \
av_log(c->ctx, AV_LOG_INFO, "%s : %s\n", thing[i], thing[i + 1]); \
}
DEFINE_LIST_DITHER(color, "colors");
DEFINE_LIST_DITHER(charset, "charsets");
DEFINE_LIST_DITHER(algorithm, "algorithms");
DEFINE_LIST_DITHER(antialias, "antialias");
static int caca_write_header(AVFormatContext *s)
{
CACAContext *c = s->priv_data;
AVStream *st = s->streams[0];
AVCodecContext *encctx = st->codec;
int ret, bpp;
c->ctx = s;
if (c->list_drivers) {
list_drivers(c);
return AVERROR_EXIT;
}
if (c->list_dither) {
if (!strcmp(c->list_dither, "colors")) {
list_dither_color(c);
} else if (!strcmp(c->list_dither, "charsets")) {
list_dither_charset(c);
} else if (!strcmp(c->list_dither, "algorithms")) {
list_dither_algorithm(c);
} else if (!strcmp(c->list_dither, "antialiases")) {
list_dither_antialias(c);
} else {
av_log(s, AV_LOG_ERROR,
"Invalid argument '%s', for 'list_dither' option\n"
"Argument must be one of 'algorithms, 'antialiases', 'charsets', 'colors'\n",
c->list_dither);
return AVERROR(EINVAL);
}
return AVERROR_EXIT;
}
if ( s->nb_streams > 1
|| encctx->codec_type != AVMEDIA_TYPE_VIDEO
|| encctx->codec_id != AV_CODEC_ID_RAWVIDEO) {
av_log(s, AV_LOG_ERROR, "Only supports one rawvideo stream\n");
return AVERROR(EINVAL);
}
if (encctx->pix_fmt != AV_PIX_FMT_RGB24) {
av_log(s, AV_LOG_ERROR,
"Unsupported pixel format '%s', choose rgb24\n",
av_get_pix_fmt_name(encctx->pix_fmt));
return AVERROR(EINVAL);
}
c->canvas = caca_create_canvas(c->window_width, c->window_height);
if (!c->canvas) {
ret = AVERROR(errno);
av_log(s, AV_LOG_ERROR, "Failed to create canvas\n");
goto fail;
}
bpp = av_get_bits_per_pixel(av_pix_fmt_desc_get(encctx->pix_fmt));
c->dither = caca_create_dither(bpp, encctx->width, encctx->height,
bpp / 8 * encctx->width,
0x0000ff, 0x00ff00, 0xff0000, 0);
if (!c->dither) {
ret = AVERROR(errno);
av_log(s, AV_LOG_ERROR, "Failed to create dither\n");
goto fail;
}
#define CHECK_DITHER_OPT(opt) \
if (caca_set_dither_##opt(c->dither, c->opt) < 0) { \
ret = AVERROR(errno); \
av_log(s, AV_LOG_ERROR, "Failed to set value '%s' for option '%s'\n", \
c->opt, #opt); \
goto fail; \
}
CHECK_DITHER_OPT(algorithm);
CHECK_DITHER_OPT(antialias);
CHECK_DITHER_OPT(charset);
CHECK_DITHER_OPT(color);
c->display = caca_create_display_with_driver(c->canvas, c->driver);
if (!c->display) {
ret = AVERROR(errno);
av_log(s, AV_LOG_ERROR, "Failed to create display\n");
list_drivers(c);
goto fail;
}
if (!c->window_width || !c->window_height) {
c->window_width = caca_get_canvas_width(c->canvas);
c->window_height = caca_get_canvas_height(c->canvas);
}
if (!c->window_title)
c->window_title = av_strdup(s->filename);
caca_set_display_title(c->display, c->window_title);
caca_set_display_time(c->display, av_rescale_q(1, st->codec->time_base, AV_TIME_BASE_Q));
return 0;
fail:
caca_write_trailer(s);
return ret;
}
static int caca_write_packet(AVFormatContext *s, AVPacket *pkt)
{
CACAContext *c = s->priv_data;
caca_dither_bitmap(c->canvas, 0, 0, c->window_width, c->window_height, c->dither, pkt->data);
caca_refresh_display(c->display);
return 0;
}
#define OFFSET(x) offsetof(CACAContext,x)
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "window_size", "set window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL }, 0, 0, ENC},
{ "window_title", "set window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, ENC },
{ "driver", "set display driver", OFFSET(driver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, ENC },
{ "algorithm", "set dithering algorithm", OFFSET(algorithm), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
{ "antialias", "set antialias method", OFFSET(antialias), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
{ "charset", "set charset used to render output", OFFSET(charset), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
{ "color", "set color used to render output", OFFSET(color), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
{ "list_drivers", "list available drivers", OFFSET(list_drivers), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, ENC, "list_drivers" },
{ "true", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 1}, 0, 0, ENC, "list_drivers" },
{ "false", NULL, 0, AV_OPT_TYPE_CONST, {.i64 = 0}, 0, 0, ENC, "list_drivers" },
{ "list_dither", "list available dither options", OFFSET(list_dither), AV_OPT_TYPE_STRING, {.dbl=0}, 0, 1, ENC, "list_dither" },
{ "algorithms", NULL, 0, AV_OPT_TYPE_CONST, {.str = "algorithms"}, 0, 0, ENC, "list_dither" },
{ "antialiases", NULL, 0, AV_OPT_TYPE_CONST, {.str = "antialiases"},0, 0, ENC, "list_dither" },
{ "charsets", NULL, 0, AV_OPT_TYPE_CONST, {.str = "charsets"}, 0, 0, ENC, "list_dither" },
{ "colors", NULL, 0, AV_OPT_TYPE_CONST, {.str = "colors"}, 0, 0, ENC, "list_dither" },
{ NULL },
};
static const AVClass caca_class = {
.class_name = "caca_outdev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
};
AVOutputFormat ff_caca_muxer = {
.name = "caca",
.long_name = NULL_IF_CONFIG_SMALL("caca (color ASCII art) output device"),
.priv_data_size = sizeof(CACAContext),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = caca_write_header,
.write_packet = caca_write_packet,
.write_trailer = caca_write_trailer,
.flags = AVFMT_NOFILE,
.priv_class = &caca_class,
};

View File

@@ -0,0 +1,241 @@
/*
* Blackmagic DeckLink output
* Copyright (c) 2013-2014 Ramiro Polla, Luca Barbato, Deti Fliegl
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <DeckLinkAPI.h>
#ifdef _WIN32
#include <DeckLinkAPI_i.c>
#else
#include <DeckLinkAPIDispatch.cpp>
#endif
#include <pthread.h>
#include <semaphore.h>
extern "C" {
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/imgutils.h"
}
#include "decklink_common.h"
#ifdef _WIN32
IDeckLinkIterator *CreateDeckLinkIteratorInstance(void)
{
IDeckLinkIterator *iter;
if (CoInitialize(NULL) < 0) {
av_log(NULL, AV_LOG_ERROR, "COM initialization failed.\n");
return NULL;
}
if (CoCreateInstance(CLSID_CDeckLinkIterator, NULL, CLSCTX_ALL,
IID_IDeckLinkIterator, (void**) &iter) != S_OK) {
av_log(NULL, AV_LOG_ERROR, "DeckLink drivers not installed.\n");
return NULL;
}
return iter;
}
#endif
#ifdef _WIN32
static char *dup_wchar_to_utf8(wchar_t *w)
{
char *s = NULL;
int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
s = (char *) av_malloc(l);
if (s)
WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
return s;
}
#define DECKLINK_STR OLECHAR *
#define DECKLINK_STRDUP dup_wchar_to_utf8
#define DECKLINK_FREE(s) SysFreeString(s)
#elif defined(__APPLE__)
static char *dup_cfstring_to_utf8(CFStringRef w)
{
char s[256];
CFStringGetCString(w, s, 255, kCFStringEncodingUTF8);
return av_strdup(s);
}
#define DECKLINK_STR const __CFString *
#define DECKLINK_STRDUP dup_cfstring_to_utf8
#define DECKLINK_FREE(s) free((void *) s)
#else
#define DECKLINK_STR const char *
#define DECKLINK_STRDUP av_strdup
/* free() is needed for a string returned by the DeckLink SDL. */
#define DECKLINK_FREE(s) free((void *) s)
#endif
HRESULT ff_decklink_get_display_name(IDeckLink *This, const char **displayName)
{
DECKLINK_STR tmpDisplayName;
HRESULT hr = This->GetDisplayName(&tmpDisplayName);
if (hr != S_OK)
return hr;
*displayName = DECKLINK_STRDUP(tmpDisplayName);
DECKLINK_FREE(tmpDisplayName);
return hr;
}
int ff_decklink_set_format(AVFormatContext *avctx,
int width, int height,
int tb_num, int tb_den,
decklink_direction_t direction, int num)
{
struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
BMDDisplayModeSupport support;
IDeckLinkDisplayModeIterator *itermode;
IDeckLinkDisplayMode *mode;
int i = 1;
HRESULT res;
if (direction == DIRECTION_IN) {
res = ctx->dli->GetDisplayModeIterator (&itermode);
} else {
res = ctx->dlo->GetDisplayModeIterator (&itermode);
}
if (res!= S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not get Display Mode Iterator\n");
return AVERROR(EIO);
}
if (tb_num == 1) {
tb_num *= 1000;
tb_den *= 1000;
}
ctx->bmd_mode = bmdModeUnknown;
while ((ctx->bmd_mode == bmdModeUnknown) && itermode->Next(&mode) == S_OK) {
BMDTimeValue bmd_tb_num, bmd_tb_den;
int bmd_width = mode->GetWidth();
int bmd_height = mode->GetHeight();
mode->GetFrameRate(&bmd_tb_num, &bmd_tb_den);
if ((bmd_width == width && bmd_height == height &&
bmd_tb_num == tb_num && bmd_tb_den == tb_den) || i == num) {
ctx->bmd_mode = mode->GetDisplayMode();
ctx->bmd_width = bmd_width;
ctx->bmd_height = bmd_height;
ctx->bmd_tb_den = bmd_tb_den;
ctx->bmd_tb_num = bmd_tb_num;
ctx->bmd_field_dominance = mode->GetFieldDominance();
av_log(avctx, AV_LOG_INFO, "Found Decklink mode %d x %d with rate %.2f%s\n",
bmd_width, bmd_height, (float)bmd_tb_den/(float)bmd_tb_num,
(ctx->bmd_field_dominance==bmdLowerFieldFirst || ctx->bmd_field_dominance==bmdUpperFieldFirst)?"(i)":"");
}
mode->Release();
i++;
}
itermode->Release();
if (ctx->bmd_mode == bmdModeUnknown)
return -1;
if (direction == DIRECTION_IN) {
if (ctx->dli->DoesSupportVideoMode(ctx->bmd_mode, bmdFormat8BitYUV,
bmdVideoOutputFlagDefault,
&support, NULL) != S_OK)
return -1;
} else {
if (ctx->dlo->DoesSupportVideoMode(ctx->bmd_mode, bmdFormat8BitYUV,
bmdVideoOutputFlagDefault,
&support, NULL) != S_OK)
return -1;
}
if (support == bmdDisplayModeSupported)
return 0;
return -1;
}
int ff_decklink_set_format(AVFormatContext *avctx, decklink_direction_t direction, int num) {
return ff_decklink_set_format(avctx, 0, 0, 0, 0, direction, num);
}
int ff_decklink_list_devices(AVFormatContext *avctx)
{
IDeckLink *dl = NULL;
IDeckLinkIterator *iter = CreateDeckLinkIteratorInstance();
if (!iter) {
av_log(avctx, AV_LOG_ERROR, "Could not create DeckLink iterator\n");
return AVERROR(EIO);
}
av_log(avctx, AV_LOG_INFO, "Blackmagic DeckLink devices:\n");
while (iter->Next(&dl) == S_OK) {
const char *displayName;
ff_decklink_get_display_name(dl, &displayName);
av_log(avctx, AV_LOG_INFO, "\t'%s'\n", displayName);
av_free((void *) displayName);
dl->Release();
}
iter->Release();
return 0;
}
int ff_decklink_list_formats(AVFormatContext *avctx, decklink_direction_t direction)
{
struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
IDeckLinkDisplayModeIterator *itermode;
IDeckLinkDisplayMode *mode;
int i=0;
HRESULT res;
if (direction == DIRECTION_IN) {
res = ctx->dli->GetDisplayModeIterator (&itermode);
} else {
res = ctx->dlo->GetDisplayModeIterator (&itermode);
}
if (res!= S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not get Display Mode Iterator\n");
return AVERROR(EIO);
}
av_log(avctx, AV_LOG_INFO, "Supported formats for '%s':\n",
avctx->filename);
while (itermode->Next(&mode) == S_OK) {
BMDTimeValue tb_num, tb_den;
mode->GetFrameRate(&tb_num, &tb_den);
av_log(avctx, AV_LOG_INFO, "\t%d\t%ldx%ld at %d/%d fps",
++i,mode->GetWidth(), mode->GetHeight(),
(int) tb_den, (int) tb_num);
switch (mode->GetFieldDominance()) {
case bmdLowerFieldFirst:
av_log(avctx, AV_LOG_INFO, " (interlaced, lower field first)"); break;
case bmdUpperFieldFirst:
av_log(avctx, AV_LOG_INFO, " (interlaced, upper field first)"); break;
}
av_log(avctx, AV_LOG_INFO, "\n");
mode->Release();
}
itermode->Release();
return 0;
}

View File

@@ -0,0 +1,103 @@
/*
* Blackmagic DeckLink common code
* Copyright (c) 2013-2014 Ramiro Polla, Luca Barbato, Deti Fliegl
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <DeckLinkAPIVersion.h>
#include "decklink_common_c.h"
class decklink_output_callback;
class decklink_input_callback;
typedef struct AVPacketQueue {
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
unsigned long long size;
int abort_request;
pthread_mutex_t mutex;
pthread_cond_t cond;
AVFormatContext *avctx;
} AVPacketQueue;
struct decklink_ctx {
/* DeckLink SDK interfaces */
IDeckLink *dl;
IDeckLinkOutput *dlo;
IDeckLinkInput *dli;
decklink_output_callback *output_callback;
decklink_input_callback *input_callback;
/* DeckLink mode information */
BMDTimeValue bmd_tb_den;
BMDTimeValue bmd_tb_num;
BMDDisplayMode bmd_mode;
int bmd_width;
int bmd_height;
int bmd_field_dominance;
/* Capture buffer queue */
AVPacketQueue queue;
/* Streams present */
int audio;
int video;
/* Status */
int playback_started;
int capture_started;
int64_t last_pts;
unsigned long frameCount;
unsigned int dropped;
AVStream *audio_st;
AVStream *video_st;
/* Options */
int list_devices;
int list_formats;
double preroll;
int frames_preroll;
int frames_buffer;
sem_t semaphore;
int channels;
};
typedef enum { DIRECTION_IN, DIRECTION_OUT} decklink_direction_t;
#ifdef _WIN32
#if BLACKMAGIC_DECKLINK_API_VERSION < 0x0a040000
typedef unsigned long buffercount_type;
#else
typedef unsigned int buffercount_type;
#endif
IDeckLinkIterator *CreateDeckLinkIteratorInstance(void);
#else
typedef uint32_t buffercount_type;
#endif
HRESULT ff_decklink_get_display_name(IDeckLink *This, const char **displayName);
int ff_decklink_set_format(AVFormatContext *avctx, int width, int height, int tb_num, int tb_den, decklink_direction_t direction = DIRECTION_OUT, int num = 0);
int ff_decklink_set_format(AVFormatContext *avctx, decklink_direction_t direction, int num);
int ff_decklink_list_devices(AVFormatContext *avctx);
int ff_decklink_list_formats(AVFormatContext *avctx, decklink_direction_t direction = DIRECTION_OUT);

View File

@@ -0,0 +1,33 @@
/*
* Blackmagic DeckLink common code
* Copyright (c) 2013-2014 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
struct decklink_cctx {
const AVClass *cclass;
void *ctx;
/* Options */
int list_devices;
int list_formats;
double preroll;
int v210;
};

View File

@@ -0,0 +1,541 @@
/*
* Blackmagic DeckLink output
* Copyright (c) 2013-2014 Luca Barbato, Deti Fliegl
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <DeckLinkAPI.h>
#include <pthread.h>
#include <semaphore.h>
extern "C" {
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/imgutils.h"
}
#include "decklink_common.h"
#include "decklink_dec.h"
static void avpacket_queue_init(AVFormatContext *avctx, AVPacketQueue *q)
{
memset(q, 0, sizeof(AVPacketQueue));
pthread_mutex_init(&q->mutex, NULL);
pthread_cond_init(&q->cond, NULL);
q->avctx = avctx;
}
static void avpacket_queue_flush(AVPacketQueue *q)
{
AVPacketList *pkt, *pkt1;
pthread_mutex_lock(&q->mutex);
for (pkt = q->first_pkt; pkt != NULL; pkt = pkt1) {
pkt1 = pkt->next;
av_free_packet(&pkt->pkt);
av_freep(&pkt);
}
q->last_pkt = NULL;
q->first_pkt = NULL;
q->nb_packets = 0;
q->size = 0;
pthread_mutex_unlock(&q->mutex);
}
static void avpacket_queue_end(AVPacketQueue *q)
{
avpacket_queue_flush(q);
pthread_mutex_destroy(&q->mutex);
pthread_cond_destroy(&q->cond);
}
static unsigned long long avpacket_queue_size(AVPacketQueue *q)
{
unsigned long long size;
pthread_mutex_lock(&q->mutex);
size = q->size;
pthread_mutex_unlock(&q->mutex);
return size;
}
static int avpacket_queue_put(AVPacketQueue *q, AVPacket *pkt)
{
AVPacketList *pkt1;
// Drop Packet if queue size is > 1GB
if (avpacket_queue_size(q) > 1024 * 1024 * 1024 ) {
av_log(q->avctx, AV_LOG_WARNING, "Decklink input buffer overrun!\n");
return -1;
}
/* duplicate the packet */
if (av_dup_packet(pkt) < 0) {
return -1;
}
pkt1 = (AVPacketList *)av_malloc(sizeof(AVPacketList));
if (!pkt1) {
return -1;
}
pkt1->pkt = *pkt;
pkt1->next = NULL;
pthread_mutex_lock(&q->mutex);
if (!q->last_pkt) {
q->first_pkt = pkt1;
} else {
q->last_pkt->next = pkt1;
}
q->last_pkt = pkt1;
q->nb_packets++;
q->size += pkt1->pkt.size + sizeof(*pkt1);
pthread_cond_signal(&q->cond);
pthread_mutex_unlock(&q->mutex);
return 0;
}
static int avpacket_queue_get(AVPacketQueue *q, AVPacket *pkt, int block)
{
AVPacketList *pkt1;
int ret;
pthread_mutex_lock(&q->mutex);
for (;; ) {
pkt1 = q->first_pkt;
if (pkt1) {
q->first_pkt = pkt1->next;
if (!q->first_pkt) {
q->last_pkt = NULL;
}
q->nb_packets--;
q->size -= pkt1->pkt.size + sizeof(*pkt1);
*pkt = pkt1->pkt;
av_free(pkt1);
ret = 1;
break;
} else if (!block) {
ret = 0;
break;
} else {
pthread_cond_wait(&q->cond, &q->mutex);
}
}
pthread_mutex_unlock(&q->mutex);
return ret;
}
class decklink_input_callback : public IDeckLinkInputCallback
{
public:
decklink_input_callback(AVFormatContext *_avctx);
~decklink_input_callback();
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
virtual ULONG STDMETHODCALLTYPE AddRef(void);
virtual ULONG STDMETHODCALLTYPE Release(void);
virtual HRESULT STDMETHODCALLTYPE VideoInputFormatChanged(BMDVideoInputFormatChangedEvents, IDeckLinkDisplayMode*, BMDDetectedVideoInputFormatFlags);
virtual HRESULT STDMETHODCALLTYPE VideoInputFrameArrived(IDeckLinkVideoInputFrame*, IDeckLinkAudioInputPacket*);
private:
ULONG m_refCount;
pthread_mutex_t m_mutex;
AVFormatContext *avctx;
decklink_ctx *ctx;
int no_video;
int64_t initial_video_pts;
int64_t initial_audio_pts;
};
decklink_input_callback::decklink_input_callback(AVFormatContext *_avctx) : m_refCount(0)
{
avctx = _avctx;
decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
ctx = (struct decklink_ctx *) cctx->ctx;
initial_audio_pts = initial_video_pts = AV_NOPTS_VALUE;
pthread_mutex_init(&m_mutex, NULL);
}
decklink_input_callback::~decklink_input_callback()
{
pthread_mutex_destroy(&m_mutex);
}
ULONG decklink_input_callback::AddRef(void)
{
pthread_mutex_lock(&m_mutex);
m_refCount++;
pthread_mutex_unlock(&m_mutex);
return (ULONG)m_refCount;
}
ULONG decklink_input_callback::Release(void)
{
pthread_mutex_lock(&m_mutex);
m_refCount--;
pthread_mutex_unlock(&m_mutex);
if (m_refCount == 0) {
delete this;
return 0;
}
return (ULONG)m_refCount;
}
HRESULT decklink_input_callback::VideoInputFrameArrived(
IDeckLinkVideoInputFrame *videoFrame, IDeckLinkAudioInputPacket *audioFrame)
{
void *frameBytes;
void *audioFrameBytes;
BMDTimeValue frameTime;
BMDTimeValue frameDuration;
ctx->frameCount++;
// Handle Video Frame
if (videoFrame) {
AVPacket pkt;
AVCodecContext *c;
av_init_packet(&pkt);
c = ctx->video_st->codec;
if (ctx->frameCount % 25 == 0) {
unsigned long long qsize = avpacket_queue_size(&ctx->queue);
av_log(avctx, AV_LOG_DEBUG,
"Frame received (#%lu) - Valid (%liB) - QSize %fMB\n",
ctx->frameCount,
videoFrame->GetRowBytes() * videoFrame->GetHeight(),
(double)qsize / 1024 / 1024);
}
videoFrame->GetBytes(&frameBytes);
videoFrame->GetStreamTime(&frameTime, &frameDuration,
ctx->video_st->time_base.den);
if (videoFrame->GetFlags() & bmdFrameHasNoInputSource) {
if (videoFrame->GetPixelFormat() == bmdFormat8BitYUV) {
unsigned bars[8] = {
0xEA80EA80, 0xD292D210, 0xA910A9A5, 0x90229035,
0x6ADD6ACA, 0x51EF515A, 0x286D28EF, 0x10801080 };
int width = videoFrame->GetWidth();
int height = videoFrame->GetHeight();
unsigned *p = (unsigned *)frameBytes;
for (int y = 0; y < height; y++) {
for (int x = 0; x < width; x += 2)
*p++ = bars[(x * 8) / width];
}
}
if (!no_video) {
av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - No input signal detected "
"- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
}
no_video = 1;
} else {
if (no_video) {
av_log(avctx, AV_LOG_WARNING, "Frame received (#%lu) - Input returned "
"- Frames dropped %u\n", ctx->frameCount, ++ctx->dropped);
}
no_video = 0;
}
pkt.pts = frameTime / ctx->video_st->time_base.num;
if (initial_video_pts == AV_NOPTS_VALUE) {
initial_video_pts = pkt.pts;
}
pkt.pts -= initial_video_pts;
pkt.dts = pkt.pts;
pkt.duration = frameDuration;
//To be made sure it still applies
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = ctx->video_st->index;
pkt.data = (uint8_t *)frameBytes;
pkt.size = videoFrame->GetRowBytes() *
videoFrame->GetHeight();
//fprintf(stderr,"Video Frame size %d ts %d\n", pkt.size, pkt.pts);
c->frame_number++;
if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
++ctx->dropped;
}
}
// Handle Audio Frame
if (audioFrame) {
AVCodecContext *c;
AVPacket pkt;
BMDTimeValue audio_pts;
av_init_packet(&pkt);
c = ctx->audio_st->codec;
//hack among hacks
pkt.size = audioFrame->GetSampleFrameCount() * ctx->audio_st->codec->channels * (16 / 8);
audioFrame->GetBytes(&audioFrameBytes);
audioFrame->GetPacketTime(&audio_pts, ctx->audio_st->time_base.den);
pkt.pts = audio_pts / ctx->audio_st->time_base.num;
if (initial_audio_pts == AV_NOPTS_VALUE) {
initial_audio_pts = pkt.pts;
}
pkt.pts -= initial_audio_pts;
pkt.dts = pkt.pts;
//fprintf(stderr,"Audio Frame size %d ts %d\n", pkt.size, pkt.pts);
pkt.flags |= AV_PKT_FLAG_KEY;
pkt.stream_index = ctx->audio_st->index;
pkt.data = (uint8_t *)audioFrameBytes;
c->frame_number++;
if (avpacket_queue_put(&ctx->queue, &pkt) < 0) {
++ctx->dropped;
}
}
return S_OK;
}
HRESULT decklink_input_callback::VideoInputFormatChanged(
BMDVideoInputFormatChangedEvents events, IDeckLinkDisplayMode *mode,
BMDDetectedVideoInputFormatFlags)
{
return S_OK;
}
static HRESULT decklink_start_input(AVFormatContext *avctx)
{
struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
ctx->input_callback = new decklink_input_callback(avctx);
ctx->dli->SetCallback(ctx->input_callback);
return ctx->dli->StartStreams();
}
extern "C" {
av_cold int ff_decklink_read_close(AVFormatContext *avctx)
{
struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
if (ctx->capture_started) {
ctx->dli->StopStreams();
ctx->dli->DisableVideoInput();
ctx->dli->DisableAudioInput();
}
if (ctx->dli)
ctx->dli->Release();
if (ctx->dl)
ctx->dl->Release();
avpacket_queue_end(&ctx->queue);
av_freep(&cctx->ctx);
return 0;
}
av_cold int ff_decklink_read_header(AVFormatContext *avctx)
{
struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
struct decklink_ctx *ctx;
IDeckLinkDisplayModeIterator *itermode;
IDeckLinkIterator *iter;
IDeckLink *dl = NULL;
AVStream *st;
HRESULT result;
char fname[1024];
char *tmp;
int mode_num = 0;
ctx = (struct decklink_ctx *) av_mallocz(sizeof(struct decklink_ctx));
if (!ctx)
return AVERROR(ENOMEM);
ctx->list_devices = cctx->list_devices;
ctx->list_formats = cctx->list_formats;
ctx->preroll = cctx->preroll;
cctx->ctx = ctx;
iter = CreateDeckLinkIteratorInstance();
if (!iter) {
av_log(avctx, AV_LOG_ERROR, "Could not create DeckLink iterator\n");
return AVERROR(EIO);
}
/* List available devices. */
if (ctx->list_devices) {
ff_decklink_list_devices(avctx);
return AVERROR_EXIT;
}
strcpy (fname, avctx->filename);
tmp=strchr (fname, '@');
if (tmp != NULL) {
mode_num = atoi (tmp+1);
*tmp = 0;
}
/* Open device. */
while (iter->Next(&dl) == S_OK) {
const char *displayName;
ff_decklink_get_display_name(dl, &displayName);
if (!strcmp(fname, displayName)) {
av_free((void *) displayName);
ctx->dl = dl;
break;
}
av_free((void *) displayName);
dl->Release();
}
iter->Release();
if (!ctx->dl) {
av_log(avctx, AV_LOG_ERROR, "Could not open '%s'\n", fname);
return AVERROR(EIO);
}
/* Get input device. */
if (ctx->dl->QueryInterface(IID_IDeckLinkInput, (void **) &ctx->dli) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not open output device from '%s'\n",
avctx->filename);
ctx->dl->Release();
return AVERROR(EIO);
}
/* List supported formats. */
if (ctx->list_formats) {
ff_decklink_list_formats(avctx, DIRECTION_IN);
ctx->dli->Release();
ctx->dl->Release();
return AVERROR_EXIT;
}
if (ctx->dli->GetDisplayModeIterator(&itermode) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not get Display Mode Iterator\n");
ctx->dl->Release();
return AVERROR(EIO);
}
if (mode_num > 0) {
if (ff_decklink_set_format(avctx, DIRECTION_IN, mode_num) < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not set mode %d for %s\n", mode_num, fname);
goto error;
}
}
itermode->Release();
/* Setup streams. */
st = avformat_new_stream(avctx, NULL);
if (!st) {
av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
goto error;
}
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
st->codec->sample_rate = bmdAudioSampleRate48kHz;
st->codec->channels = 2;
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
ctx->audio_st=st;
st = avformat_new_stream(avctx, NULL);
if (!st) {
av_log(avctx, AV_LOG_ERROR, "Cannot add stream\n");
goto error;
}
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->width = ctx->bmd_width;
st->codec->height = ctx->bmd_height;
st->codec->time_base.den = ctx->bmd_tb_den;
st->codec->time_base.num = ctx->bmd_tb_num;
st->codec->bit_rate = avpicture_get_size(st->codec->pix_fmt, ctx->bmd_width, ctx->bmd_height) * 1/av_q2d(st->codec->time_base) * 8;
if (cctx->v210) {
st->codec->codec_id = AV_CODEC_ID_V210;
st->codec->codec_tag = MKTAG('V', '2', '1', '0');
} else {
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->pix_fmt = AV_PIX_FMT_UYVY422;
st->codec->codec_tag = MKTAG('U', 'Y', 'V', 'Y');
}
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
ctx->video_st=st;
result = ctx->dli->EnableAudioInput(bmdAudioSampleRate48kHz, bmdAudioSampleType16bitInteger, 2);
if (result != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Cannot enable audio input\n");
goto error;
}
result = ctx->dli->EnableVideoInput(ctx->bmd_mode,
cctx->v210 ? bmdFormat10BitYUV : bmdFormat8BitYUV,
bmdVideoInputFlagDefault);
if (result != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Cannot enable video input\n");
goto error;
}
avpacket_queue_init (avctx, &ctx->queue);
if (decklink_start_input (avctx) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Cannot start input stream\n");
goto error;
}
return 0;
error:
ctx->dli->Release();
ctx->dl->Release();
return AVERROR(EIO);
}
int ff_decklink_read_packet(AVFormatContext *avctx, AVPacket *pkt)
{
struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
AVFrame *frame = ctx->video_st->codec->coded_frame;
avpacket_queue_get(&ctx->queue, pkt, 1);
if (frame && (ctx->bmd_field_dominance == bmdUpperFieldFirst || ctx->bmd_field_dominance == bmdLowerFieldFirst)) {
frame->interlaced_frame = 1;
if (ctx->bmd_field_dominance == bmdUpperFieldFirst) {
frame->top_field_first = 1;
}
}
return 0;
}
} /* extern "C" */

View File

@@ -0,0 +1,32 @@
/*
* Blackmagic DeckLink output
* Copyright (c) 2013-2014 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifdef __cplusplus
extern "C" {
#endif
int ff_decklink_read_header(AVFormatContext *avctx);
int ff_decklink_read_packet(AVFormatContext *avctx, AVPacket *pkt);
int ff_decklink_read_close(AVFormatContext *avctx);
#ifdef __cplusplus
} /* extern "C" */
#endif

View File

@@ -0,0 +1,55 @@
/*
* Blackmagic DeckLink output
* Copyright (c) 2014 Deti Fliegl
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavformat/avformat.h"
#include "libavutil/opt.h"
#include "decklink_common_c.h"
#include "decklink_dec.h"
#define OFFSET(x) offsetof(struct decklink_cctx, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "list_devices", "list available devices" , OFFSET(list_devices), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, DEC },
{ "list_formats", "list supported formats" , OFFSET(list_formats), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, DEC },
{ "bm_v210", "v210 10 bit per channel" , OFFSET(v210), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, DEC },
{ NULL },
};
static const AVClass decklink_demuxer_class = {
.class_name = "Blackmagic DeckLink demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_decklink_demuxer = {
.name = "decklink",
.long_name = NULL_IF_CONFIG_SMALL("Blackmagic DeckLink input"),
.flags = AVFMT_NOFILE | AVFMT_RAWPICTURE,
.priv_class = &decklink_demuxer_class,
.priv_data_size = sizeof(struct decklink_cctx),
.read_header = ff_decklink_read_header,
.read_packet = ff_decklink_read_packet,
.read_close = ff_decklink_read_close,
};

View File

@@ -0,0 +1,426 @@
/*
* Blackmagic DeckLink output
* Copyright (c) 2013-2014 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <DeckLinkAPI.h>
#include <pthread.h>
#include <semaphore.h>
extern "C" {
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/imgutils.h"
}
#include "decklink_common.h"
#include "decklink_enc.h"
/* DeckLink callback class declaration */
class decklink_frame : public IDeckLinkVideoFrame
{
public:
decklink_frame(struct decklink_ctx *ctx, AVFrame *avframe, long width,
long height, void *buffer) :
_ctx(ctx), _avframe(avframe), _width(width),
_height(height), _buffer(buffer), _refs(0) { }
virtual long STDMETHODCALLTYPE GetWidth (void) { return _width; }
virtual long STDMETHODCALLTYPE GetHeight (void) { return _height; }
virtual long STDMETHODCALLTYPE GetRowBytes (void) { return _width<<1; }
virtual BMDPixelFormat STDMETHODCALLTYPE GetPixelFormat(void) { return bmdFormat8BitYUV; }
virtual BMDFrameFlags STDMETHODCALLTYPE GetFlags (void) { return bmdVideoOutputFlagDefault; }
virtual HRESULT STDMETHODCALLTYPE GetBytes (void **buffer) { *buffer = _buffer; return S_OK; }
virtual HRESULT STDMETHODCALLTYPE GetTimecode (BMDTimecodeFormat format, IDeckLinkTimecode **timecode) { return S_FALSE; }
virtual HRESULT STDMETHODCALLTYPE GetAncillaryData(IDeckLinkVideoFrameAncillary **ancillary) { return S_FALSE; }
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
virtual ULONG STDMETHODCALLTYPE AddRef(void) { return ++_refs; }
virtual ULONG STDMETHODCALLTYPE Release(void) { if (!--_refs) delete this; return _refs; }
struct decklink_ctx *_ctx;
AVFrame *_avframe;
private:
long _width;
long _height;
void *_buffer;
int _refs;
};
class decklink_output_callback : public IDeckLinkVideoOutputCallback
{
public:
virtual HRESULT STDMETHODCALLTYPE ScheduledFrameCompleted(IDeckLinkVideoFrame *_frame, BMDOutputFrameCompletionResult result)
{
decklink_frame *frame = static_cast<decklink_frame *>(_frame);
struct decklink_ctx *ctx = frame->_ctx;
AVFrame *avframe = frame->_avframe;
av_frame_free(&avframe);
sem_post(&ctx->semaphore);
return S_OK;
}
virtual HRESULT STDMETHODCALLTYPE ScheduledPlaybackHasStopped(void) { return S_OK; }
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
virtual ULONG STDMETHODCALLTYPE AddRef(void) { return 1; }
virtual ULONG STDMETHODCALLTYPE Release(void) { return 1; }
};
static int decklink_setup_video(AVFormatContext *avctx, AVStream *st)
{
struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
AVCodecContext *c = st->codec;
if (ctx->video) {
av_log(avctx, AV_LOG_ERROR, "Only one video stream is supported!\n");
return -1;
}
if (c->pix_fmt != AV_PIX_FMT_UYVY422) {
av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format!"
" Only AV_PIX_FMT_UYVY422 is supported.\n");
return -1;
}
if (ff_decklink_set_format(avctx, c->width, c->height,
c->time_base.num, c->time_base.den)) {
av_log(avctx, AV_LOG_ERROR, "Unsupported video size or framerate!"
" Check available formats with -list_formats 1.\n");
return -1;
}
if (ctx->dlo->EnableVideoOutput(ctx->bmd_mode,
bmdVideoOutputFlagDefault) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not enable video output!\n");
return -1;
}
/* Set callback. */
ctx->output_callback = new decklink_output_callback();
ctx->dlo->SetScheduledFrameCompletionCallback(ctx->output_callback);
/* Start video semaphore. */
ctx->frames_preroll = c->time_base.den * ctx->preroll;
if (c->time_base.den > 1000)
ctx->frames_preroll /= 1000;
/* Buffer twice as many frames as the preroll. */
ctx->frames_buffer = ctx->frames_preroll * 2;
ctx->frames_buffer = FFMIN(ctx->frames_buffer, 60);
sem_init(&ctx->semaphore, 0, ctx->frames_buffer);
/* The device expects the framerate to be fixed. */
avpriv_set_pts_info(st, 64, c->time_base.num, c->time_base.den);
ctx->video = 1;
return 0;
}
static int decklink_setup_audio(AVFormatContext *avctx, AVStream *st)
{
struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
AVCodecContext *c = st->codec;
if (ctx->audio) {
av_log(avctx, AV_LOG_ERROR, "Only one audio stream is supported!\n");
return -1;
}
if (c->sample_rate != 48000) {
av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate!"
" Only 48kHz is supported.\n");
return -1;
}
if (c->channels != 2 && c->channels != 8) {
av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels!"
" Only stereo and 7.1 are supported.\n");
return -1;
}
if (ctx->dlo->EnableAudioOutput(bmdAudioSampleRate48kHz,
bmdAudioSampleType16bitInteger,
c->channels,
bmdAudioOutputStreamTimestamped) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not enable audio output!\n");
return -1;
}
if (ctx->dlo->BeginAudioPreroll() != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not begin audio preroll!\n");
return -1;
}
/* The device expects the sample rate to be fixed. */
avpriv_set_pts_info(st, 64, 1, c->sample_rate);
ctx->channels = c->channels;
ctx->audio = 1;
return 0;
}
av_cold int ff_decklink_write_trailer(AVFormatContext *avctx)
{
struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
if (ctx->playback_started) {
BMDTimeValue actual;
ctx->dlo->StopScheduledPlayback(ctx->last_pts * ctx->bmd_tb_num,
&actual, ctx->bmd_tb_den);
ctx->dlo->DisableVideoOutput();
if (ctx->audio)
ctx->dlo->DisableAudioOutput();
}
if (ctx->dlo)
ctx->dlo->Release();
if (ctx->dl)
ctx->dl->Release();
if (ctx->output_callback)
delete ctx->output_callback;
sem_destroy(&ctx->semaphore);
av_freep(&cctx->ctx);
return 0;
}
static int decklink_write_video_packet(AVFormatContext *avctx, AVPacket *pkt)
{
struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
AVPicture *avpicture = (AVPicture *) pkt->data;
AVFrame *avframe, *tmp;
decklink_frame *frame;
buffercount_type buffered;
HRESULT hr;
/* HACK while av_uncoded_frame() isn't implemented */
int ret;
tmp = av_frame_alloc();
if (!tmp)
return AVERROR(ENOMEM);
tmp->format = AV_PIX_FMT_UYVY422;
tmp->width = ctx->bmd_width;
tmp->height = ctx->bmd_height;
ret = av_frame_get_buffer(tmp, 32);
if (ret < 0) {
av_frame_free(&tmp);
return ret;
}
av_image_copy(tmp->data, tmp->linesize, (const uint8_t **) avpicture->data,
avpicture->linesize, (AVPixelFormat) tmp->format, tmp->width,
tmp->height);
avframe = av_frame_clone(tmp);
av_frame_free(&tmp);
if (!avframe) {
av_log(avctx, AV_LOG_ERROR, "Could not clone video frame.\n");
return AVERROR(EIO);
}
/* end HACK */
frame = new decklink_frame(ctx, avframe, ctx->bmd_width, ctx->bmd_height,
(void *) avframe->data[0]);
if (!frame) {
av_log(avctx, AV_LOG_ERROR, "Could not create new frame.\n");
return AVERROR(EIO);
}
/* Always keep at most one second of frames buffered. */
sem_wait(&ctx->semaphore);
/* Schedule frame for playback. */
hr = ctx->dlo->ScheduleVideoFrame((struct IDeckLinkVideoFrame *) frame,
pkt->pts * ctx->bmd_tb_num,
ctx->bmd_tb_num, ctx->bmd_tb_den);
if (hr != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not schedule video frame."
" error %08x.\n", (uint32_t) hr);
frame->Release();
return AVERROR(EIO);
}
ctx->dlo->GetBufferedVideoFrameCount(&buffered);
av_log(avctx, AV_LOG_DEBUG, "Buffered video frames: %d.\n", (int) buffered);
if (pkt->pts > 2 && buffered <= 2)
av_log(avctx, AV_LOG_WARNING, "There are not enough buffered video frames."
" Video may misbehave!\n");
/* Preroll video frames. */
if (!ctx->playback_started && pkt->pts > ctx->frames_preroll) {
av_log(avctx, AV_LOG_DEBUG, "Ending audio preroll.\n");
if (ctx->audio && ctx->dlo->EndAudioPreroll() != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not end audio preroll!\n");
return AVERROR(EIO);
}
av_log(avctx, AV_LOG_DEBUG, "Starting scheduled playback.\n");
if (ctx->dlo->StartScheduledPlayback(0, ctx->bmd_tb_den, 1.0) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not start scheduled playback!\n");
return AVERROR(EIO);
}
ctx->playback_started = 1;
}
return 0;
}
static int decklink_write_audio_packet(AVFormatContext *avctx, AVPacket *pkt)
{
struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
int sample_count = pkt->size / (ctx->channels << 1);
buffercount_type buffered;
ctx->dlo->GetBufferedAudioSampleFrameCount(&buffered);
if (pkt->pts > 1 && !buffered)
av_log(avctx, AV_LOG_WARNING, "There's no buffered audio."
" Audio will misbehave!\n");
if (ctx->dlo->ScheduleAudioSamples(pkt->data, sample_count, pkt->pts,
bmdAudioSampleRate48kHz, NULL) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not schedule audio samples.\n");
return AVERROR(EIO);
}
return 0;
}
extern "C" {
av_cold int ff_decklink_write_header(AVFormatContext *avctx)
{
struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
struct decklink_ctx *ctx;
IDeckLinkDisplayModeIterator *itermode;
IDeckLinkIterator *iter;
IDeckLink *dl = NULL;
unsigned int n;
ctx = (struct decklink_ctx *) av_mallocz(sizeof(struct decklink_ctx));
if (!ctx)
return AVERROR(ENOMEM);
ctx->list_devices = cctx->list_devices;
ctx->list_formats = cctx->list_formats;
ctx->preroll = cctx->preroll;
cctx->ctx = ctx;
iter = CreateDeckLinkIteratorInstance();
if (!iter) {
av_log(avctx, AV_LOG_ERROR, "Could not create DeckLink iterator\n");
return AVERROR(EIO);
}
/* List available devices. */
if (ctx->list_devices) {
ff_decklink_list_devices(avctx);
return AVERROR_EXIT;
}
/* Open device. */
while (iter->Next(&dl) == S_OK) {
const char *displayName;
ff_decklink_get_display_name(dl, &displayName);
if (!strcmp(avctx->filename, displayName)) {
av_free((void *) displayName);
ctx->dl = dl;
break;
}
av_free((void *) displayName);
dl->Release();
}
iter->Release();
if (!ctx->dl) {
av_log(avctx, AV_LOG_ERROR, "Could not open '%s'\n", avctx->filename);
return AVERROR(EIO);
}
/* Get output device. */
if (ctx->dl->QueryInterface(IID_IDeckLinkOutput, (void **) &ctx->dlo) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not open output device from '%s'\n",
avctx->filename);
ctx->dl->Release();
return AVERROR(EIO);
}
/* List supported formats. */
if (ctx->list_formats) {
ff_decklink_list_formats(avctx);
ctx->dlo->Release();
ctx->dl->Release();
return AVERROR_EXIT;
}
if (ctx->dlo->GetDisplayModeIterator(&itermode) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not get Display Mode Iterator\n");
ctx->dl->Release();
return AVERROR(EIO);
}
/* Setup streams. */
for (n = 0; n < avctx->nb_streams; n++) {
AVStream *st = avctx->streams[n];
AVCodecContext *c = st->codec;
if (c->codec_type == AVMEDIA_TYPE_AUDIO) {
if (decklink_setup_audio(avctx, st))
goto error;
} else if (c->codec_type == AVMEDIA_TYPE_VIDEO) {
if (decklink_setup_video(avctx, st))
goto error;
} else {
av_log(avctx, AV_LOG_ERROR, "Unsupported stream type.\n");
goto error;
}
}
itermode->Release();
return 0;
error:
ctx->dlo->Release();
ctx->dl->Release();
return AVERROR(EIO);
}
int ff_decklink_write_packet(AVFormatContext *avctx, AVPacket *pkt)
{
struct decklink_cctx *cctx = (struct decklink_cctx *) avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *) cctx->ctx;
AVStream *st = avctx->streams[pkt->stream_index];
ctx->last_pts = FFMAX(ctx->last_pts, pkt->pts);
if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO)
return decklink_write_video_packet(avctx, pkt);
else if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO)
return decklink_write_audio_packet(avctx, pkt);
return AVERROR(EIO);
}
} /* extern "C" */

View File

@@ -0,0 +1,32 @@
/*
* Blackmagic DeckLink output
* Copyright (c) 2013-2014 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifdef __cplusplus
extern "C" {
#endif
int ff_decklink_write_header(AVFormatContext *avctx);
int ff_decklink_write_packet(AVFormatContext *avctx, AVPacket *pkt);
int ff_decklink_write_trailer(AVFormatContext *avctx);
#ifdef __cplusplus
} /* extern "C" */
#endif

View File

@@ -0,0 +1,57 @@
/*
* Blackmagic DeckLink output
* Copyright (c) 2013-2014 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavformat/avformat.h"
#include "libavutil/opt.h"
#include "decklink_common_c.h"
#include "decklink_enc.h"
#define OFFSET(x) offsetof(struct decklink_cctx, x)
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "list_devices", "list available devices" , OFFSET(list_devices), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, ENC },
{ "list_formats", "list supported formats" , OFFSET(list_formats), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, ENC },
{ "preroll" , "video preroll in seconds", OFFSET(preroll ), AV_OPT_TYPE_DOUBLE, { .dbl = 0.5 }, 0, 5, ENC },
{ NULL },
};
static const AVClass decklink_muxer_class = {
.class_name = "Blackmagic DeckLink muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
};
AVOutputFormat ff_decklink_muxer = {
.name = "decklink",
.long_name = NULL_IF_CONFIG_SMALL("Blackmagic DeckLink output"),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.subtitle_codec = AV_CODEC_ID_NONE,
.flags = AVFMT_NOFILE | AVFMT_RAWPICTURE,
.priv_class = &decklink_muxer_class,
.priv_data_size = sizeof(struct decklink_cctx),
.write_header = ff_decklink_write_header,
.write_packet = ff_decklink_write_packet,
.write_trailer = ff_decklink_write_trailer,
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,352 @@
/*
* DirectShow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_DSHOW_H
#define AVDEVICE_DSHOW_H
#define DSHOWDEBUG 0
#include "avdevice.h"
#define COBJMACROS
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#define NO_DSHOW_STRSAFE
#include <dshow.h>
#include <dvdmedia.h>
#include "libavcodec/internal.h"
/* EC_DEVICE_LOST is not defined in MinGW dshow headers. */
#ifndef EC_DEVICE_LOST
#define EC_DEVICE_LOST 0x1f
#endif
long ff_copy_dshow_media_type(AM_MEDIA_TYPE *dst, const AM_MEDIA_TYPE *src);
void ff_print_VIDEO_STREAM_CONFIG_CAPS(const VIDEO_STREAM_CONFIG_CAPS *caps);
void ff_print_AUDIO_STREAM_CONFIG_CAPS(const AUDIO_STREAM_CONFIG_CAPS *caps);
void ff_print_AM_MEDIA_TYPE(const AM_MEDIA_TYPE *type);
void ff_printGUID(const GUID *g);
extern const AVClass *ff_dshow_context_class_ptr;
#define dshowdebug(...) ff_dlog(&ff_dshow_context_class_ptr, __VA_ARGS__)
static inline void nothing(void *foo)
{
}
struct GUIDoffset {
const GUID *iid;
int offset;
};
enum dshowDeviceType {
VideoDevice = 0,
AudioDevice = 1,
};
enum dshowSourceFilterType {
VideoSourceDevice = 0,
AudioSourceDevice = 1,
};
#define DECLARE_QUERYINTERFACE(class, ...) \
long WINAPI \
class##_QueryInterface(class *this, const GUID *riid, void **ppvObject) \
{ \
struct GUIDoffset ifaces[] = __VA_ARGS__; \
int i; \
dshowdebug(AV_STRINGIFY(class)"_QueryInterface(%p, %p, %p)\n", this, riid, ppvObject); \
ff_printGUID(riid); \
if (!ppvObject) \
return E_POINTER; \
for (i = 0; i < sizeof(ifaces)/sizeof(ifaces[0]); i++) { \
if (IsEqualGUID(riid, ifaces[i].iid)) { \
void *obj = (void *) ((uint8_t *) this + ifaces[i].offset); \
class##_AddRef(this); \
dshowdebug("\tfound %d with offset %d\n", i, ifaces[i].offset); \
*ppvObject = (void *) obj; \
return S_OK; \
} \
} \
dshowdebug("\tE_NOINTERFACE\n"); \
*ppvObject = NULL; \
return E_NOINTERFACE; \
}
#define DECLARE_ADDREF(class) \
unsigned long WINAPI \
class##_AddRef(class *this) \
{ \
dshowdebug(AV_STRINGIFY(class)"_AddRef(%p)\t%ld\n", this, this->ref+1); \
return InterlockedIncrement(&this->ref); \
}
#define DECLARE_RELEASE(class) \
unsigned long WINAPI \
class##_Release(class *this) \
{ \
long ref = InterlockedDecrement(&this->ref); \
dshowdebug(AV_STRINGIFY(class)"_Release(%p)\t%ld\n", this, ref); \
if (!ref) \
class##_Destroy(this); \
return ref; \
}
#define DECLARE_DESTROY(class, func) \
void class##_Destroy(class *this) \
{ \
dshowdebug(AV_STRINGIFY(class)"_Destroy(%p)\n", this); \
func(this); \
if (this) { \
if (this->vtbl) \
CoTaskMemFree(this->vtbl); \
CoTaskMemFree(this); \
} \
}
#define DECLARE_CREATE(class, setup, ...) \
class *class##_Create(__VA_ARGS__) \
{ \
class *this = CoTaskMemAlloc(sizeof(class)); \
void *vtbl = CoTaskMemAlloc(sizeof(*this->vtbl)); \
dshowdebug(AV_STRINGIFY(class)"_Create(%p)\n", this); \
if (!this || !vtbl) \
goto fail; \
ZeroMemory(this, sizeof(class)); \
ZeroMemory(vtbl, sizeof(*this->vtbl)); \
this->ref = 1; \
this->vtbl = vtbl; \
if (!setup) \
goto fail; \
dshowdebug("created "AV_STRINGIFY(class)" %p\n", this); \
return this; \
fail: \
class##_Destroy(this); \
dshowdebug("could not create "AV_STRINGIFY(class)"\n"); \
return NULL; \
}
#define SETVTBL(vtbl, class, fn) \
do { (vtbl)->fn = (void *) class##_##fn; } while(0)
/*****************************************************************************
* Forward Declarations
****************************************************************************/
typedef struct libAVPin libAVPin;
typedef struct libAVMemInputPin libAVMemInputPin;
typedef struct libAVEnumPins libAVEnumPins;
typedef struct libAVEnumMediaTypes libAVEnumMediaTypes;
typedef struct libAVFilter libAVFilter;
/*****************************************************************************
* libAVPin
****************************************************************************/
struct libAVPin {
IPinVtbl *vtbl;
long ref;
libAVFilter *filter;
IPin *connectedto;
AM_MEDIA_TYPE type;
IMemInputPinVtbl *imemvtbl;
};
long WINAPI libAVPin_QueryInterface (libAVPin *, const GUID *, void **);
unsigned long WINAPI libAVPin_AddRef (libAVPin *);
unsigned long WINAPI libAVPin_Release (libAVPin *);
long WINAPI libAVPin_Connect (libAVPin *, IPin *, const AM_MEDIA_TYPE *);
long WINAPI libAVPin_ReceiveConnection (libAVPin *, IPin *, const AM_MEDIA_TYPE *);
long WINAPI libAVPin_Disconnect (libAVPin *);
long WINAPI libAVPin_ConnectedTo (libAVPin *, IPin **);
long WINAPI libAVPin_ConnectionMediaType (libAVPin *, AM_MEDIA_TYPE *);
long WINAPI libAVPin_QueryPinInfo (libAVPin *, PIN_INFO *);
long WINAPI libAVPin_QueryDirection (libAVPin *, PIN_DIRECTION *);
long WINAPI libAVPin_QueryId (libAVPin *, wchar_t **);
long WINAPI libAVPin_QueryAccept (libAVPin *, const AM_MEDIA_TYPE *);
long WINAPI libAVPin_EnumMediaTypes (libAVPin *, IEnumMediaTypes **);
long WINAPI libAVPin_QueryInternalConnections(libAVPin *, IPin **, unsigned long *);
long WINAPI libAVPin_EndOfStream (libAVPin *);
long WINAPI libAVPin_BeginFlush (libAVPin *);
long WINAPI libAVPin_EndFlush (libAVPin *);
long WINAPI libAVPin_NewSegment (libAVPin *, REFERENCE_TIME, REFERENCE_TIME, double);
long WINAPI libAVMemInputPin_QueryInterface (libAVMemInputPin *, const GUID *, void **);
unsigned long WINAPI libAVMemInputPin_AddRef (libAVMemInputPin *);
unsigned long WINAPI libAVMemInputPin_Release (libAVMemInputPin *);
long WINAPI libAVMemInputPin_GetAllocator (libAVMemInputPin *, IMemAllocator **);
long WINAPI libAVMemInputPin_NotifyAllocator (libAVMemInputPin *, IMemAllocator *, BOOL);
long WINAPI libAVMemInputPin_GetAllocatorRequirements(libAVMemInputPin *, ALLOCATOR_PROPERTIES *);
long WINAPI libAVMemInputPin_Receive (libAVMemInputPin *, IMediaSample *);
long WINAPI libAVMemInputPin_ReceiveMultiple (libAVMemInputPin *, IMediaSample **, long, long *);
long WINAPI libAVMemInputPin_ReceiveCanBlock (libAVMemInputPin *);
void libAVPin_Destroy(libAVPin *);
libAVPin *libAVPin_Create (libAVFilter *filter);
void libAVMemInputPin_Destroy(libAVMemInputPin *);
/*****************************************************************************
* libAVEnumPins
****************************************************************************/
struct libAVEnumPins {
IEnumPinsVtbl *vtbl;
long ref;
int pos;
libAVPin *pin;
libAVFilter *filter;
};
long WINAPI libAVEnumPins_QueryInterface(libAVEnumPins *, const GUID *, void **);
unsigned long WINAPI libAVEnumPins_AddRef (libAVEnumPins *);
unsigned long WINAPI libAVEnumPins_Release (libAVEnumPins *);
long WINAPI libAVEnumPins_Next (libAVEnumPins *, unsigned long, IPin **, unsigned long *);
long WINAPI libAVEnumPins_Skip (libAVEnumPins *, unsigned long);
long WINAPI libAVEnumPins_Reset (libAVEnumPins *);
long WINAPI libAVEnumPins_Clone (libAVEnumPins *, libAVEnumPins **);
void libAVEnumPins_Destroy(libAVEnumPins *);
libAVEnumPins *libAVEnumPins_Create (libAVPin *pin, libAVFilter *filter);
/*****************************************************************************
* libAVEnumMediaTypes
****************************************************************************/
struct libAVEnumMediaTypes {
IEnumMediaTypesVtbl *vtbl;
long ref;
int pos;
AM_MEDIA_TYPE type;
};
long WINAPI libAVEnumMediaTypes_QueryInterface(libAVEnumMediaTypes *, const GUID *, void **);
unsigned long WINAPI libAVEnumMediaTypes_AddRef (libAVEnumMediaTypes *);
unsigned long WINAPI libAVEnumMediaTypes_Release (libAVEnumMediaTypes *);
long WINAPI libAVEnumMediaTypes_Next (libAVEnumMediaTypes *, unsigned long, AM_MEDIA_TYPE **, unsigned long *);
long WINAPI libAVEnumMediaTypes_Skip (libAVEnumMediaTypes *, unsigned long);
long WINAPI libAVEnumMediaTypes_Reset (libAVEnumMediaTypes *);
long WINAPI libAVEnumMediaTypes_Clone (libAVEnumMediaTypes *, libAVEnumMediaTypes **);
void libAVEnumMediaTypes_Destroy(libAVEnumMediaTypes *);
libAVEnumMediaTypes *libAVEnumMediaTypes_Create(const AM_MEDIA_TYPE *type);
/*****************************************************************************
* libAVFilter
****************************************************************************/
struct libAVFilter {
IBaseFilterVtbl *vtbl;
long ref;
const wchar_t *name;
libAVPin *pin;
FILTER_INFO info;
FILTER_STATE state;
IReferenceClock *clock;
enum dshowDeviceType type;
void *priv_data;
int stream_index;
int64_t start_time;
void (*callback)(void *priv_data, int index, uint8_t *buf, int buf_size, int64_t time, enum dshowDeviceType type);
};
long WINAPI libAVFilter_QueryInterface (libAVFilter *, const GUID *, void **);
unsigned long WINAPI libAVFilter_AddRef (libAVFilter *);
unsigned long WINAPI libAVFilter_Release (libAVFilter *);
long WINAPI libAVFilter_GetClassID (libAVFilter *, CLSID *);
long WINAPI libAVFilter_Stop (libAVFilter *);
long WINAPI libAVFilter_Pause (libAVFilter *);
long WINAPI libAVFilter_Run (libAVFilter *, REFERENCE_TIME);
long WINAPI libAVFilter_GetState (libAVFilter *, DWORD, FILTER_STATE *);
long WINAPI libAVFilter_SetSyncSource (libAVFilter *, IReferenceClock *);
long WINAPI libAVFilter_GetSyncSource (libAVFilter *, IReferenceClock **);
long WINAPI libAVFilter_EnumPins (libAVFilter *, IEnumPins **);
long WINAPI libAVFilter_FindPin (libAVFilter *, const wchar_t *, IPin **);
long WINAPI libAVFilter_QueryFilterInfo(libAVFilter *, FILTER_INFO *);
long WINAPI libAVFilter_JoinFilterGraph(libAVFilter *, IFilterGraph *, const wchar_t *);
long WINAPI libAVFilter_QueryVendorInfo(libAVFilter *, wchar_t **);
void libAVFilter_Destroy(libAVFilter *);
libAVFilter *libAVFilter_Create (void *, void *, enum dshowDeviceType);
/*****************************************************************************
* dshow_ctx
****************************************************************************/
struct dshow_ctx {
const AVClass *class;
IGraphBuilder *graph;
char *device_name[2];
int video_device_number;
int audio_device_number;
int list_options;
int list_devices;
int audio_buffer_size;
int crossbar_video_input_pin_number;
int crossbar_audio_input_pin_number;
char *video_pin_name;
char *audio_pin_name;
int show_video_device_dialog;
int show_audio_device_dialog;
int show_video_crossbar_connection_dialog;
int show_audio_crossbar_connection_dialog;
int show_analog_tv_tuner_dialog;
int show_analog_tv_tuner_audio_dialog;
char *audio_filter_load_file;
char *audio_filter_save_file;
char *video_filter_load_file;
char *video_filter_save_file;
IBaseFilter *device_filter[2];
IPin *device_pin[2];
libAVFilter *capture_filter[2];
libAVPin *capture_pin[2];
HANDLE mutex;
HANDLE event[2]; /* event[0] is set by DirectShow
* event[1] is set by callback() */
AVPacketList *pktl;
int eof;
int64_t curbufsize[2];
unsigned int video_frame_num;
IMediaControl *control;
IMediaEvent *media_event;
enum AVPixelFormat pixel_format;
enum AVCodecID video_codec_id;
char *framerate;
int requested_width;
int requested_height;
AVRational requested_framerate;
int sample_rate;
int sample_size;
int channels;
};
/*****************************************************************************
* CrossBar
****************************************************************************/
HRESULT dshow_try_setup_crossbar_options(ICaptureGraphBuilder2 *graph_builder2,
IBaseFilter *device_filter, enum dshowDeviceType devtype, AVFormatContext *avctx);
void dshow_show_filter_properties(IBaseFilter *pFilter, AVFormatContext *avctx);
#endif /* AVDEVICE_DSHOW_H */

View File

@@ -0,0 +1,190 @@
/*
* Directshow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dshow_capture.h"
long ff_copy_dshow_media_type(AM_MEDIA_TYPE *dst, const AM_MEDIA_TYPE *src)
{
uint8_t *pbFormat = NULL;
if (src->cbFormat) {
pbFormat = CoTaskMemAlloc(src->cbFormat);
if (!pbFormat)
return E_OUTOFMEMORY;
memcpy(pbFormat, src->pbFormat, src->cbFormat);
}
*dst = *src;
dst->pUnk = NULL;
dst->pbFormat = pbFormat;
return S_OK;
}
void ff_printGUID(const GUID *g)
{
#if DSHOWDEBUG
const uint32_t *d = (const uint32_t *) &g->Data1;
const uint16_t *w = (const uint16_t *) &g->Data2;
const uint8_t *c = (const uint8_t *) &g->Data4;
dshowdebug("0x%08x 0x%04x 0x%04x %02x%02x%02x%02x%02x%02x%02x%02x",
d[0], w[0], w[1],
c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
#endif
}
static const char *dshow_context_to_name(void *ptr)
{
return "dshow";
}
static const AVClass ff_dshow_context_class = { "DirectShow", dshow_context_to_name };
const AVClass *ff_dshow_context_class_ptr = &ff_dshow_context_class;
#define dstruct(pctx, sname, var, type) \
dshowdebug(" "#var":\t%"type"\n", sname->var)
#if DSHOWDEBUG
static void dump_bih(void *s, BITMAPINFOHEADER *bih)
{
dshowdebug(" BITMAPINFOHEADER\n");
dstruct(s, bih, biSize, "lu");
dstruct(s, bih, biWidth, "ld");
dstruct(s, bih, biHeight, "ld");
dstruct(s, bih, biPlanes, "d");
dstruct(s, bih, biBitCount, "d");
dstruct(s, bih, biCompression, "lu");
dshowdebug(" biCompression:\t\"%.4s\"\n",
(char*) &bih->biCompression);
dstruct(s, bih, biSizeImage, "lu");
dstruct(s, bih, biXPelsPerMeter, "lu");
dstruct(s, bih, biYPelsPerMeter, "lu");
dstruct(s, bih, biClrUsed, "lu");
dstruct(s, bih, biClrImportant, "lu");
}
#endif
void ff_print_VIDEO_STREAM_CONFIG_CAPS(const VIDEO_STREAM_CONFIG_CAPS *caps)
{
#if DSHOWDEBUG
dshowdebug(" VIDEO_STREAM_CONFIG_CAPS\n");
dshowdebug(" guid\t");
ff_printGUID(&caps->guid);
dshowdebug("\n");
dshowdebug(" VideoStandard\t%lu\n", caps->VideoStandard);
dshowdebug(" InputSize %ld\t%ld\n", caps->InputSize.cx, caps->InputSize.cy);
dshowdebug(" MinCroppingSize %ld\t%ld\n", caps->MinCroppingSize.cx, caps->MinCroppingSize.cy);
dshowdebug(" MaxCroppingSize %ld\t%ld\n", caps->MaxCroppingSize.cx, caps->MaxCroppingSize.cy);
dshowdebug(" CropGranularityX\t%d\n", caps->CropGranularityX);
dshowdebug(" CropGranularityY\t%d\n", caps->CropGranularityY);
dshowdebug(" CropAlignX\t%d\n", caps->CropAlignX);
dshowdebug(" CropAlignY\t%d\n", caps->CropAlignY);
dshowdebug(" MinOutputSize %ld\t%ld\n", caps->MinOutputSize.cx, caps->MinOutputSize.cy);
dshowdebug(" MaxOutputSize %ld\t%ld\n", caps->MaxOutputSize.cx, caps->MaxOutputSize.cy);
dshowdebug(" OutputGranularityX\t%d\n", caps->OutputGranularityX);
dshowdebug(" OutputGranularityY\t%d\n", caps->OutputGranularityY);
dshowdebug(" StretchTapsX\t%d\n", caps->StretchTapsX);
dshowdebug(" StretchTapsY\t%d\n", caps->StretchTapsY);
dshowdebug(" ShrinkTapsX\t%d\n", caps->ShrinkTapsX);
dshowdebug(" ShrinkTapsY\t%d\n", caps->ShrinkTapsY);
dshowdebug(" MinFrameInterval\t%"PRId64"\n", caps->MinFrameInterval);
dshowdebug(" MaxFrameInterval\t%"PRId64"\n", caps->MaxFrameInterval);
dshowdebug(" MinBitsPerSecond\t%ld\n", caps->MinBitsPerSecond);
dshowdebug(" MaxBitsPerSecond\t%ld\n", caps->MaxBitsPerSecond);
#endif
}
void ff_print_AUDIO_STREAM_CONFIG_CAPS(const AUDIO_STREAM_CONFIG_CAPS *caps)
{
#if DSHOWDEBUG
dshowdebug(" AUDIO_STREAM_CONFIG_CAPS\n");
dshowdebug(" guid\t");
ff_printGUID(&caps->guid);
dshowdebug("\n");
dshowdebug(" MinimumChannels\t%lu\n", caps->MinimumChannels);
dshowdebug(" MaximumChannels\t%lu\n", caps->MaximumChannels);
dshowdebug(" ChannelsGranularity\t%lu\n", caps->ChannelsGranularity);
dshowdebug(" MinimumBitsPerSample\t%lu\n", caps->MinimumBitsPerSample);
dshowdebug(" MaximumBitsPerSample\t%lu\n", caps->MaximumBitsPerSample);
dshowdebug(" BitsPerSampleGranularity\t%lu\n", caps->BitsPerSampleGranularity);
dshowdebug(" MinimumSampleFrequency\t%lu\n", caps->MinimumSampleFrequency);
dshowdebug(" MaximumSampleFrequency\t%lu\n", caps->MaximumSampleFrequency);
dshowdebug(" SampleFrequencyGranularity\t%lu\n", caps->SampleFrequencyGranularity);
#endif
}
void ff_print_AM_MEDIA_TYPE(const AM_MEDIA_TYPE *type)
{
#if DSHOWDEBUG
dshowdebug(" majortype\t");
ff_printGUID(&type->majortype);
dshowdebug("\n");
dshowdebug(" subtype\t");
ff_printGUID(&type->subtype);
dshowdebug("\n");
dshowdebug(" bFixedSizeSamples\t%d\n", type->bFixedSizeSamples);
dshowdebug(" bTemporalCompression\t%d\n", type->bTemporalCompression);
dshowdebug(" lSampleSize\t%lu\n", type->lSampleSize);
dshowdebug(" formattype\t");
ff_printGUID(&type->formattype);
dshowdebug("\n");
dshowdebug(" pUnk\t%p\n", type->pUnk);
dshowdebug(" cbFormat\t%lu\n", type->cbFormat);
dshowdebug(" pbFormat\t%p\n", type->pbFormat);
if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo)) {
VIDEOINFOHEADER *v = (void *) type->pbFormat;
dshowdebug(" rcSource: left %ld top %ld right %ld bottom %ld\n",
v->rcSource.left, v->rcSource.top, v->rcSource.right, v->rcSource.bottom);
dshowdebug(" rcTarget: left %ld top %ld right %ld bottom %ld\n",
v->rcTarget.left, v->rcTarget.top, v->rcTarget.right, v->rcTarget.bottom);
dshowdebug(" dwBitRate: %lu\n", v->dwBitRate);
dshowdebug(" dwBitErrorRate: %lu\n", v->dwBitErrorRate);
dshowdebug(" AvgTimePerFrame: %"PRId64"\n", v->AvgTimePerFrame);
dump_bih(NULL, &v->bmiHeader);
} else if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo2)) {
VIDEOINFOHEADER2 *v = (void *) type->pbFormat;
dshowdebug(" rcSource: left %ld top %ld right %ld bottom %ld\n",
v->rcSource.left, v->rcSource.top, v->rcSource.right, v->rcSource.bottom);
dshowdebug(" rcTarget: left %ld top %ld right %ld bottom %ld\n",
v->rcTarget.left, v->rcTarget.top, v->rcTarget.right, v->rcTarget.bottom);
dshowdebug(" dwBitRate: %lu\n", v->dwBitRate);
dshowdebug(" dwBitErrorRate: %lu\n", v->dwBitErrorRate);
dshowdebug(" AvgTimePerFrame: %"PRId64"\n", v->AvgTimePerFrame);
dshowdebug(" dwInterlaceFlags: %lu\n", v->dwInterlaceFlags);
dshowdebug(" dwCopyProtectFlags: %lu\n", v->dwCopyProtectFlags);
dshowdebug(" dwPictAspectRatioX: %lu\n", v->dwPictAspectRatioX);
dshowdebug(" dwPictAspectRatioY: %lu\n", v->dwPictAspectRatioY);
// dshowdebug(" dwReserved1: %lu\n", v->u.dwReserved1); /* mingw-w64 is buggy and doesn't name unnamed unions */
dshowdebug(" dwReserved2: %lu\n", v->dwReserved2);
dump_bih(NULL, &v->bmiHeader);
} else if (IsEqualGUID(&type->formattype, &FORMAT_WaveFormatEx)) {
WAVEFORMATEX *fx = (void *) type->pbFormat;
dshowdebug(" wFormatTag: %u\n", fx->wFormatTag);
dshowdebug(" nChannels: %u\n", fx->nChannels);
dshowdebug(" nSamplesPerSec: %lu\n", fx->nSamplesPerSec);
dshowdebug(" nAvgBytesPerSec: %lu\n", fx->nAvgBytesPerSec);
dshowdebug(" nBlockAlign: %u\n", fx->nBlockAlign);
dshowdebug(" wBitsPerSample: %u\n", fx->wBitsPerSample);
dshowdebug(" cbSize: %u\n", fx->cbSize);
}
#endif
}

View File

@@ -0,0 +1,208 @@
/*
* DirectShow capture interface
* Copyright (c) 2015 Roger Pack
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dshow_capture.h"
static const char *
GetPhysicalPinName(long pin_type)
{
switch (pin_type)
{
case PhysConn_Video_Tuner: return "Video Tuner";
case PhysConn_Video_Composite: return "Video Composite";
case PhysConn_Video_SVideo: return "S-Video";
case PhysConn_Video_RGB: return "Video RGB";
case PhysConn_Video_YRYBY: return "Video YRYBY";
case PhysConn_Video_SerialDigital: return "Video Serial Digital";
case PhysConn_Video_ParallelDigital: return "Video Parallel Digital";
case PhysConn_Video_SCSI: return "Video SCSI";
case PhysConn_Video_AUX: return "Video AUX";
case PhysConn_Video_1394: return "Video 1394";
case PhysConn_Video_USB: return "Video USB";
case PhysConn_Video_VideoDecoder: return "Video Decoder";
case PhysConn_Video_VideoEncoder: return "Video Encoder";
case PhysConn_Audio_Tuner: return "Audio Tuner";
case PhysConn_Audio_Line: return "Audio Line";
case PhysConn_Audio_Mic: return "Audio Microphone";
case PhysConn_Audio_AESDigital: return "Audio AES/EBU Digital";
case PhysConn_Audio_SPDIFDigital: return "Audio S/PDIF";
case PhysConn_Audio_SCSI: return "Audio SCSI";
case PhysConn_Audio_AUX: return "Audio AUX";
case PhysConn_Audio_1394: return "Audio 1394";
case PhysConn_Audio_USB: return "Audio USB";
case PhysConn_Audio_AudioDecoder: return "Audio Decoder";
default: return "Unknown Crossbar Pin Type—Please report!";
}
}
static HRESULT
setup_crossbar_options(IAMCrossbar *cross_bar, enum dshowDeviceType devtype, AVFormatContext *avctx)
{
struct dshow_ctx *ctx = avctx->priv_data;
long count_output_pins, count_input_pins;
int i;
int log_level = ctx->list_options ? AV_LOG_INFO : AV_LOG_DEBUG;
int video_input_pin = ctx->crossbar_video_input_pin_number;
int audio_input_pin = ctx->crossbar_audio_input_pin_number;
const char *device_name = ctx->device_name[devtype];
HRESULT hr;
av_log(avctx, log_level, "Crossbar Switching Information for %s:\n", device_name);
hr = IAMCrossbar_get_PinCounts(cross_bar, &count_output_pins, &count_input_pins);
if (hr != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to get crossbar pin counts\n");
return hr;
}
for (i = 0; i < count_output_pins; i++)
{
int j;
long related_pin, pin_type, route_to_pin;
hr = IAMCrossbar_get_CrossbarPinInfo(cross_bar, FALSE, i, &related_pin, &pin_type);
if (pin_type == PhysConn_Video_VideoDecoder) {
/* assume there is only one "Video (and one Audio) Decoder" output pin, and it's all we care about routing to...for now */
if (video_input_pin != -1) {
av_log(avctx, log_level, "Routing video input from pin %d\n", video_input_pin);
hr = IAMCrossbar_Route(cross_bar, i, video_input_pin);
if (hr != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to route video input from pin %d\n", video_input_pin);
return AVERROR(EIO);
}
}
} else if (pin_type == PhysConn_Audio_AudioDecoder) {
if (audio_input_pin != -1) {
av_log(avctx, log_level, "Routing audio input from pin %d\n", audio_input_pin);
hr = IAMCrossbar_Route(cross_bar, i, audio_input_pin);
if (hr != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to route audio input from pin %d\n", audio_input_pin);
return hr;
}
}
} else {
av_log(avctx, AV_LOG_WARNING, "Unexpected output pin type, please report the type if you want to use this (%s)", GetPhysicalPinName(pin_type));
}
hr = IAMCrossbar_get_IsRoutedTo(cross_bar, i, &route_to_pin);
if (hr != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to get crossbar is routed to from pin %d\n", i);
return hr;
}
av_log(avctx, log_level, " Crossbar Output pin %d: \"%s\" related output pin: %ld ", i, GetPhysicalPinName(pin_type), related_pin);
av_log(avctx, log_level, "current input pin: %ld ", route_to_pin);
av_log(avctx, log_level, "compatible input pins: ");
for (j = 0; j < count_input_pins; j++)
{
hr = IAMCrossbar_CanRoute(cross_bar, i, j);
if (hr == S_OK)
av_log(avctx, log_level ,"%d ", j);
}
av_log(avctx, log_level, "\n");
}
for (i = 0; i < count_input_pins; i++)
{
long related_pin, pin_type;
hr = IAMCrossbar_get_CrossbarPinInfo(cross_bar, TRUE, i, &related_pin, &pin_type);
if (hr != S_OK) {
av_log(avctx, AV_LOG_ERROR, "unable to get crossbar info audio input from pin %d\n", i);
return hr;
}
av_log(avctx, log_level, " Crossbar Input pin %d - \"%s\" ", i, GetPhysicalPinName(pin_type));
av_log(avctx, log_level, "related input pin: %ld\n", related_pin);
}
return S_OK;
}
/**
* Given a fully constructed graph, check if there is a cross bar filter, and configure its pins if so.
*/
HRESULT
dshow_try_setup_crossbar_options(ICaptureGraphBuilder2 *graph_builder2,
IBaseFilter *device_filter, enum dshowDeviceType devtype, AVFormatContext *avctx)
{
struct dshow_ctx *ctx = avctx->priv_data;
IAMCrossbar *cross_bar = NULL;
IBaseFilter *cross_bar_base_filter = NULL;
IAMTVTuner *tv_tuner_filter = NULL;
IBaseFilter *tv_tuner_base_filter = NULL;
IAMAudioInputMixer *tv_audio_filter = NULL;
IBaseFilter *tv_audio_base_filter = NULL;
HRESULT hr;
hr = ICaptureGraphBuilder2_FindInterface(graph_builder2, &LOOK_UPSTREAM_ONLY, (const GUID *) NULL,
device_filter, &IID_IAMCrossbar, (void**) &cross_bar);
if (hr != S_OK) {
/* no crossbar found */
hr = S_OK;
goto end;
}
/* TODO some TV tuners apparently have multiple crossbars? */
if (devtype == VideoDevice && ctx->show_video_crossbar_connection_dialog ||
devtype == AudioDevice && ctx->show_audio_crossbar_connection_dialog) {
hr = IAMCrossbar_QueryInterface(cross_bar, &IID_IBaseFilter, (void **) &cross_bar_base_filter);
if (hr != S_OK)
goto end;
dshow_show_filter_properties(cross_bar_base_filter, avctx);
}
if (devtype == VideoDevice && ctx->show_analog_tv_tuner_dialog) {
hr = ICaptureGraphBuilder2_FindInterface(graph_builder2, &LOOK_UPSTREAM_ONLY, NULL,
device_filter, &IID_IAMTVTuner, (void**) &tv_tuner_filter);
if (hr == S_OK) {
hr = IAMCrossbar_QueryInterface(tv_tuner_filter, &IID_IBaseFilter, (void **) &tv_tuner_base_filter);
if (hr != S_OK)
goto end;
dshow_show_filter_properties(tv_tuner_base_filter, avctx);
} else {
av_log(avctx, AV_LOG_WARNING, "unable to find a tv tuner to display dialog for!");
}
}
if (devtype == AudioDevice && ctx->show_analog_tv_tuner_audio_dialog) {
hr = ICaptureGraphBuilder2_FindInterface(graph_builder2, &LOOK_UPSTREAM_ONLY, NULL,
device_filter, &IID_IAMTVAudio, (void**) &tv_audio_filter);
if (hr == S_OK) {
hr = IAMCrossbar_QueryInterface(tv_audio_filter, &IID_IBaseFilter, (void **) &tv_audio_base_filter);
if (hr != S_OK)
goto end;
dshow_show_filter_properties(tv_audio_base_filter, avctx);
} else {
av_log(avctx, AV_LOG_WARNING, "unable to find a tv audio tuner to display dialog for!");
}
}
hr = setup_crossbar_options(cross_bar, devtype, avctx);
if (hr != S_OK)
goto end;
end:
if (cross_bar)
IAMCrossbar_Release(cross_bar);
if (cross_bar_base_filter)
IBaseFilter_Release(cross_bar_base_filter);
if (tv_tuner_filter)
IAMTVTuner_Release(tv_tuner_filter);
if (tv_tuner_base_filter)
IBaseFilter_Release(tv_tuner_base_filter);
return hr;
}

View File

@@ -0,0 +1,103 @@
/*
* DirectShow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dshow_capture.h"
DECLARE_QUERYINTERFACE(libAVEnumMediaTypes,
{ {&IID_IUnknown,0}, {&IID_IEnumMediaTypes,0} })
DECLARE_ADDREF(libAVEnumMediaTypes)
DECLARE_RELEASE(libAVEnumMediaTypes)
long WINAPI
libAVEnumMediaTypes_Next(libAVEnumMediaTypes *this, unsigned long n,
AM_MEDIA_TYPE **types, unsigned long *fetched)
{
int count = 0;
dshowdebug("libAVEnumMediaTypes_Next(%p)\n", this);
if (!types)
return E_POINTER;
if (!this->pos && n == 1) {
if (!IsEqualGUID(&this->type.majortype, &GUID_NULL)) {
AM_MEDIA_TYPE *type = av_malloc(sizeof(AM_MEDIA_TYPE));
ff_copy_dshow_media_type(type, &this->type);
*types = type;
count = 1;
}
this->pos = 1;
}
if (fetched)
*fetched = count;
if (!count)
return S_FALSE;
return S_OK;
}
long WINAPI
libAVEnumMediaTypes_Skip(libAVEnumMediaTypes *this, unsigned long n)
{
dshowdebug("libAVEnumMediaTypes_Skip(%p)\n", this);
if (n) /* Any skip will always fall outside of the only valid type. */
return S_FALSE;
return S_OK;
}
long WINAPI
libAVEnumMediaTypes_Reset(libAVEnumMediaTypes *this)
{
dshowdebug("libAVEnumMediaTypes_Reset(%p)\n", this);
this->pos = 0;
return S_OK;
}
long WINAPI
libAVEnumMediaTypes_Clone(libAVEnumMediaTypes *this, libAVEnumMediaTypes **enums)
{
libAVEnumMediaTypes *new;
dshowdebug("libAVEnumMediaTypes_Clone(%p)\n", this);
if (!enums)
return E_POINTER;
new = libAVEnumMediaTypes_Create(&this->type);
if (!new)
return E_OUTOFMEMORY;
new->pos = this->pos;
*enums = new;
return S_OK;
}
static int
libAVEnumMediaTypes_Setup(libAVEnumMediaTypes *this, const AM_MEDIA_TYPE *type)
{
IEnumMediaTypesVtbl *vtbl = this->vtbl;
SETVTBL(vtbl, libAVEnumMediaTypes, QueryInterface);
SETVTBL(vtbl, libAVEnumMediaTypes, AddRef);
SETVTBL(vtbl, libAVEnumMediaTypes, Release);
SETVTBL(vtbl, libAVEnumMediaTypes, Next);
SETVTBL(vtbl, libAVEnumMediaTypes, Skip);
SETVTBL(vtbl, libAVEnumMediaTypes, Reset);
SETVTBL(vtbl, libAVEnumMediaTypes, Clone);
if (!type) {
this->type.majortype = GUID_NULL;
} else {
ff_copy_dshow_media_type(&this->type, type);
}
return 1;
}
DECLARE_CREATE(libAVEnumMediaTypes, libAVEnumMediaTypes_Setup(this, type), const AM_MEDIA_TYPE *type)
DECLARE_DESTROY(libAVEnumMediaTypes, nothing)

View File

@@ -0,0 +1,105 @@
/*
* DirectShow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dshow_capture.h"
DECLARE_QUERYINTERFACE(libAVEnumPins,
{ {&IID_IUnknown,0}, {&IID_IEnumPins,0} })
DECLARE_ADDREF(libAVEnumPins)
DECLARE_RELEASE(libAVEnumPins)
long WINAPI
libAVEnumPins_Next(libAVEnumPins *this, unsigned long n, IPin **pins,
unsigned long *fetched)
{
int count = 0;
dshowdebug("libAVEnumPins_Next(%p)\n", this);
if (!pins)
return E_POINTER;
if (!this->pos && n == 1) {
libAVPin_AddRef(this->pin);
*pins = (IPin *) this->pin;
count = 1;
this->pos = 1;
}
if (fetched)
*fetched = count;
if (!count)
return S_FALSE;
return S_OK;
}
long WINAPI
libAVEnumPins_Skip(libAVEnumPins *this, unsigned long n)
{
dshowdebug("libAVEnumPins_Skip(%p)\n", this);
if (n) /* Any skip will always fall outside of the only valid pin. */
return S_FALSE;
return S_OK;
}
long WINAPI
libAVEnumPins_Reset(libAVEnumPins *this)
{
dshowdebug("libAVEnumPins_Reset(%p)\n", this);
this->pos = 0;
return S_OK;
}
long WINAPI
libAVEnumPins_Clone(libAVEnumPins *this, libAVEnumPins **pins)
{
libAVEnumPins *new;
dshowdebug("libAVEnumPins_Clone(%p)\n", this);
if (!pins)
return E_POINTER;
new = libAVEnumPins_Create(this->pin, this->filter);
if (!new)
return E_OUTOFMEMORY;
new->pos = this->pos;
*pins = new;
return S_OK;
}
static int
libAVEnumPins_Setup(libAVEnumPins *this, libAVPin *pin, libAVFilter *filter)
{
IEnumPinsVtbl *vtbl = this->vtbl;
SETVTBL(vtbl, libAVEnumPins, QueryInterface);
SETVTBL(vtbl, libAVEnumPins, AddRef);
SETVTBL(vtbl, libAVEnumPins, Release);
SETVTBL(vtbl, libAVEnumPins, Next);
SETVTBL(vtbl, libAVEnumPins, Skip);
SETVTBL(vtbl, libAVEnumPins, Reset);
SETVTBL(vtbl, libAVEnumPins, Clone);
this->pin = pin;
this->filter = filter;
libAVFilter_AddRef(this->filter);
return 1;
}
static int
libAVEnumPins_Cleanup(libAVEnumPins *this)
{
libAVFilter_Release(this->filter);
return 1;
}
DECLARE_CREATE(libAVEnumPins, libAVEnumPins_Setup(this, pin, filter),
libAVPin *pin, libAVFilter *filter)
DECLARE_DESTROY(libAVEnumPins, libAVEnumPins_Cleanup)

View File

@@ -0,0 +1,202 @@
/*
* DirectShow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dshow_capture.h"
DECLARE_QUERYINTERFACE(libAVFilter,
{ {&IID_IUnknown,0}, {&IID_IBaseFilter,0} })
DECLARE_ADDREF(libAVFilter)
DECLARE_RELEASE(libAVFilter)
long WINAPI
libAVFilter_GetClassID(libAVFilter *this, CLSID *id)
{
dshowdebug("libAVFilter_GetClassID(%p)\n", this);
/* I'm not creating a ClassID just for this. */
return E_FAIL;
}
long WINAPI
libAVFilter_Stop(libAVFilter *this)
{
dshowdebug("libAVFilter_Stop(%p)\n", this);
this->state = State_Stopped;
return S_OK;
}
long WINAPI
libAVFilter_Pause(libAVFilter *this)
{
dshowdebug("libAVFilter_Pause(%p)\n", this);
this->state = State_Paused;
return S_OK;
}
long WINAPI
libAVFilter_Run(libAVFilter *this, REFERENCE_TIME start)
{
dshowdebug("libAVFilter_Run(%p) %"PRId64"\n", this, start);
this->state = State_Running;
this->start_time = start;
return S_OK;
}
long WINAPI
libAVFilter_GetState(libAVFilter *this, DWORD ms, FILTER_STATE *state)
{
dshowdebug("libAVFilter_GetState(%p)\n", this);
if (!state)
return E_POINTER;
*state = this->state;
return S_OK;
}
long WINAPI
libAVFilter_SetSyncSource(libAVFilter *this, IReferenceClock *clock)
{
dshowdebug("libAVFilter_SetSyncSource(%p)\n", this);
if (this->clock != clock) {
if (this->clock)
IReferenceClock_Release(this->clock);
this->clock = clock;
if (clock)
IReferenceClock_AddRef(clock);
}
return S_OK;
}
long WINAPI
libAVFilter_GetSyncSource(libAVFilter *this, IReferenceClock **clock)
{
dshowdebug("libAVFilter_GetSyncSource(%p)\n", this);
if (!clock)
return E_POINTER;
if (this->clock)
IReferenceClock_AddRef(this->clock);
*clock = this->clock;
return S_OK;
}
long WINAPI
libAVFilter_EnumPins(libAVFilter *this, IEnumPins **enumpin)
{
libAVEnumPins *new;
dshowdebug("libAVFilter_EnumPins(%p)\n", this);
if (!enumpin)
return E_POINTER;
new = libAVEnumPins_Create(this->pin, this);
if (!new)
return E_OUTOFMEMORY;
*enumpin = (IEnumPins *) new;
return S_OK;
}
long WINAPI
libAVFilter_FindPin(libAVFilter *this, const wchar_t *id, IPin **pin)
{
libAVPin *found = NULL;
dshowdebug("libAVFilter_FindPin(%p)\n", this);
if (!id || !pin)
return E_POINTER;
if (!wcscmp(id, L"In")) {
found = this->pin;
libAVPin_AddRef(found);
}
*pin = (IPin *) found;
if (!found)
return VFW_E_NOT_FOUND;
return S_OK;
}
long WINAPI
libAVFilter_QueryFilterInfo(libAVFilter *this, FILTER_INFO *info)
{
dshowdebug("libAVFilter_QueryFilterInfo(%p)\n", this);
if (!info)
return E_POINTER;
if (this->info.pGraph)
IFilterGraph_AddRef(this->info.pGraph);
*info = this->info;
return S_OK;
}
long WINAPI
libAVFilter_JoinFilterGraph(libAVFilter *this, IFilterGraph *graph,
const wchar_t *name)
{
dshowdebug("libAVFilter_JoinFilterGraph(%p)\n", this);
this->info.pGraph = graph;
if (name)
wcscpy(this->info.achName, name);
return S_OK;
}
long WINAPI
libAVFilter_QueryVendorInfo(libAVFilter *this, wchar_t **info)
{
dshowdebug("libAVFilter_QueryVendorInfo(%p)\n", this);
if (!info)
return E_POINTER;
*info = wcsdup(L"libAV");
return S_OK;
}
static int
libAVFilter_Setup(libAVFilter *this, void *priv_data, void *callback,
enum dshowDeviceType type)
{
IBaseFilterVtbl *vtbl = this->vtbl;
SETVTBL(vtbl, libAVFilter, QueryInterface);
SETVTBL(vtbl, libAVFilter, AddRef);
SETVTBL(vtbl, libAVFilter, Release);
SETVTBL(vtbl, libAVFilter, GetClassID);
SETVTBL(vtbl, libAVFilter, Stop);
SETVTBL(vtbl, libAVFilter, Pause);
SETVTBL(vtbl, libAVFilter, Run);
SETVTBL(vtbl, libAVFilter, GetState);
SETVTBL(vtbl, libAVFilter, SetSyncSource);
SETVTBL(vtbl, libAVFilter, GetSyncSource);
SETVTBL(vtbl, libAVFilter, EnumPins);
SETVTBL(vtbl, libAVFilter, FindPin);
SETVTBL(vtbl, libAVFilter, QueryFilterInfo);
SETVTBL(vtbl, libAVFilter, JoinFilterGraph);
SETVTBL(vtbl, libAVFilter, QueryVendorInfo);
this->pin = libAVPin_Create(this);
this->priv_data = priv_data;
this->callback = callback;
this->type = type;
return 1;
}
static int
libAVFilter_Cleanup(libAVFilter *this)
{
libAVPin_Release(this->pin);
return 1;
}
DECLARE_CREATE(libAVFilter, libAVFilter_Setup(this, priv_data, callback, type),
void *priv_data, void *callback, enum dshowDeviceType type)
DECLARE_DESTROY(libAVFilter, libAVFilter_Cleanup)

View File

@@ -0,0 +1,384 @@
/*
* DirectShow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dshow_capture.h"
#include <stddef.h>
#define imemoffset offsetof(libAVPin, imemvtbl)
DECLARE_QUERYINTERFACE(libAVPin,
{ {&IID_IUnknown,0}, {&IID_IPin,0}, {&IID_IMemInputPin,imemoffset} })
DECLARE_ADDREF(libAVPin)
DECLARE_RELEASE(libAVPin)
long WINAPI
libAVPin_Connect(libAVPin *this, IPin *pin, const AM_MEDIA_TYPE *type)
{
dshowdebug("libAVPin_Connect(%p, %p, %p)\n", this, pin, type);
/* Input pins receive connections. */
return S_FALSE;
}
long WINAPI
libAVPin_ReceiveConnection(libAVPin *this, IPin *pin,
const AM_MEDIA_TYPE *type)
{
enum dshowDeviceType devtype = this->filter->type;
dshowdebug("libAVPin_ReceiveConnection(%p)\n", this);
if (!pin)
return E_POINTER;
if (this->connectedto)
return VFW_E_ALREADY_CONNECTED;
ff_print_AM_MEDIA_TYPE(type);
if (devtype == VideoDevice) {
if (!IsEqualGUID(&type->majortype, &MEDIATYPE_Video))
return VFW_E_TYPE_NOT_ACCEPTED;
} else {
if (!IsEqualGUID(&type->majortype, &MEDIATYPE_Audio))
return VFW_E_TYPE_NOT_ACCEPTED;
}
IPin_AddRef(pin);
this->connectedto = pin;
ff_copy_dshow_media_type(&this->type, type);
return S_OK;
}
long WINAPI
libAVPin_Disconnect(libAVPin *this)
{
dshowdebug("libAVPin_Disconnect(%p)\n", this);
if (this->filter->state != State_Stopped)
return VFW_E_NOT_STOPPED;
if (!this->connectedto)
return S_FALSE;
IPin_Release(this->connectedto);
this->connectedto = NULL;
return S_OK;
}
long WINAPI
libAVPin_ConnectedTo(libAVPin *this, IPin **pin)
{
dshowdebug("libAVPin_ConnectedTo(%p)\n", this);
if (!pin)
return E_POINTER;
if (!this->connectedto)
return VFW_E_NOT_CONNECTED;
IPin_AddRef(this->connectedto);
*pin = this->connectedto;
return S_OK;
}
long WINAPI
libAVPin_ConnectionMediaType(libAVPin *this, AM_MEDIA_TYPE *type)
{
dshowdebug("libAVPin_ConnectionMediaType(%p)\n", this);
if (!type)
return E_POINTER;
if (!this->connectedto)
return VFW_E_NOT_CONNECTED;
return ff_copy_dshow_media_type(type, &this->type);
}
long WINAPI
libAVPin_QueryPinInfo(libAVPin *this, PIN_INFO *info)
{
dshowdebug("libAVPin_QueryPinInfo(%p)\n", this);
if (!info)
return E_POINTER;
if (this->filter)
libAVFilter_AddRef(this->filter);
info->pFilter = (IBaseFilter *) this->filter;
info->dir = PINDIR_INPUT;
wcscpy(info->achName, L"Capture");
return S_OK;
}
long WINAPI
libAVPin_QueryDirection(libAVPin *this, PIN_DIRECTION *dir)
{
dshowdebug("libAVPin_QueryDirection(%p)\n", this);
if (!dir)
return E_POINTER;
*dir = PINDIR_INPUT;
return S_OK;
}
long WINAPI
libAVPin_QueryId(libAVPin *this, wchar_t **id)
{
dshowdebug("libAVPin_QueryId(%p)\n", this);
if (!id)
return E_POINTER;
*id = wcsdup(L"libAV Pin");
return S_OK;
}
long WINAPI
libAVPin_QueryAccept(libAVPin *this, const AM_MEDIA_TYPE *type)
{
dshowdebug("libAVPin_QueryAccept(%p)\n", this);
return S_FALSE;
}
long WINAPI
libAVPin_EnumMediaTypes(libAVPin *this, IEnumMediaTypes **enumtypes)
{
const AM_MEDIA_TYPE *type = NULL;
libAVEnumMediaTypes *new;
dshowdebug("libAVPin_EnumMediaTypes(%p)\n", this);
if (!enumtypes)
return E_POINTER;
new = libAVEnumMediaTypes_Create(type);
if (!new)
return E_OUTOFMEMORY;
*enumtypes = (IEnumMediaTypes *) new;
return S_OK;
}
long WINAPI
libAVPin_QueryInternalConnections(libAVPin *this, IPin **pin,
unsigned long *npin)
{
dshowdebug("libAVPin_QueryInternalConnections(%p)\n", this);
return E_NOTIMPL;
}
long WINAPI
libAVPin_EndOfStream(libAVPin *this)
{
dshowdebug("libAVPin_EndOfStream(%p)\n", this);
/* I don't care. */
return S_OK;
}
long WINAPI
libAVPin_BeginFlush(libAVPin *this)
{
dshowdebug("libAVPin_BeginFlush(%p)\n", this);
/* I don't care. */
return S_OK;
}
long WINAPI
libAVPin_EndFlush(libAVPin *this)
{
dshowdebug("libAVPin_EndFlush(%p)\n", this);
/* I don't care. */
return S_OK;
}
long WINAPI
libAVPin_NewSegment(libAVPin *this, REFERENCE_TIME start, REFERENCE_TIME stop,
double rate)
{
dshowdebug("libAVPin_NewSegment(%p)\n", this);
/* I don't care. */
return S_OK;
}
static int
libAVPin_Setup(libAVPin *this, libAVFilter *filter)
{
IPinVtbl *vtbl = this->vtbl;
IMemInputPinVtbl *imemvtbl;
if (!filter)
return 0;
imemvtbl = av_malloc(sizeof(IMemInputPinVtbl));
if (!imemvtbl)
return 0;
SETVTBL(imemvtbl, libAVMemInputPin, QueryInterface);
SETVTBL(imemvtbl, libAVMemInputPin, AddRef);
SETVTBL(imemvtbl, libAVMemInputPin, Release);
SETVTBL(imemvtbl, libAVMemInputPin, GetAllocator);
SETVTBL(imemvtbl, libAVMemInputPin, NotifyAllocator);
SETVTBL(imemvtbl, libAVMemInputPin, GetAllocatorRequirements);
SETVTBL(imemvtbl, libAVMemInputPin, Receive);
SETVTBL(imemvtbl, libAVMemInputPin, ReceiveMultiple);
SETVTBL(imemvtbl, libAVMemInputPin, ReceiveCanBlock);
this->imemvtbl = imemvtbl;
SETVTBL(vtbl, libAVPin, QueryInterface);
SETVTBL(vtbl, libAVPin, AddRef);
SETVTBL(vtbl, libAVPin, Release);
SETVTBL(vtbl, libAVPin, Connect);
SETVTBL(vtbl, libAVPin, ReceiveConnection);
SETVTBL(vtbl, libAVPin, Disconnect);
SETVTBL(vtbl, libAVPin, ConnectedTo);
SETVTBL(vtbl, libAVPin, ConnectionMediaType);
SETVTBL(vtbl, libAVPin, QueryPinInfo);
SETVTBL(vtbl, libAVPin, QueryDirection);
SETVTBL(vtbl, libAVPin, QueryId);
SETVTBL(vtbl, libAVPin, QueryAccept);
SETVTBL(vtbl, libAVPin, EnumMediaTypes);
SETVTBL(vtbl, libAVPin, QueryInternalConnections);
SETVTBL(vtbl, libAVPin, EndOfStream);
SETVTBL(vtbl, libAVPin, BeginFlush);
SETVTBL(vtbl, libAVPin, EndFlush);
SETVTBL(vtbl, libAVPin, NewSegment);
this->filter = filter;
return 1;
}
DECLARE_CREATE(libAVPin, libAVPin_Setup(this, filter), libAVFilter *filter)
DECLARE_DESTROY(libAVPin, nothing)
/*****************************************************************************
* libAVMemInputPin
****************************************************************************/
long WINAPI
libAVMemInputPin_QueryInterface(libAVMemInputPin *this, const GUID *riid,
void **ppvObject)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
dshowdebug("libAVMemInputPin_QueryInterface(%p)\n", this);
return libAVPin_QueryInterface(pin, riid, ppvObject);
}
unsigned long WINAPI
libAVMemInputPin_AddRef(libAVMemInputPin *this)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
dshowdebug("libAVMemInputPin_AddRef(%p)\n", this);
return libAVPin_AddRef(pin);
}
unsigned long WINAPI
libAVMemInputPin_Release(libAVMemInputPin *this)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
dshowdebug("libAVMemInputPin_Release(%p)\n", this);
return libAVPin_Release(pin);
}
long WINAPI
libAVMemInputPin_GetAllocator(libAVMemInputPin *this, IMemAllocator **alloc)
{
dshowdebug("libAVMemInputPin_GetAllocator(%p)\n", this);
return VFW_E_NO_ALLOCATOR;
}
long WINAPI
libAVMemInputPin_NotifyAllocator(libAVMemInputPin *this, IMemAllocator *alloc,
BOOL rdwr)
{
dshowdebug("libAVMemInputPin_NotifyAllocator(%p)\n", this);
return S_OK;
}
long WINAPI
libAVMemInputPin_GetAllocatorRequirements(libAVMemInputPin *this,
ALLOCATOR_PROPERTIES *props)
{
dshowdebug("libAVMemInputPin_GetAllocatorRequirements(%p)\n", this);
return E_NOTIMPL;
}
long WINAPI
libAVMemInputPin_Receive(libAVMemInputPin *this, IMediaSample *sample)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
enum dshowDeviceType devtype = pin->filter->type;
void *priv_data;
AVFormatContext *s;
uint8_t *buf;
int buf_size; /* todo should be a long? */
int index;
int64_t curtime;
int64_t orig_curtime;
int64_t graphtime;
const char *devtypename = (devtype == VideoDevice) ? "video" : "audio";
IReferenceClock *clock = pin->filter->clock;
int64_t dummy;
struct dshow_ctx *ctx;
dshowdebug("libAVMemInputPin_Receive(%p)\n", this);
if (!sample)
return E_POINTER;
IMediaSample_GetTime(sample, &orig_curtime, &dummy);
orig_curtime += pin->filter->start_time;
IReferenceClock_GetTime(clock, &graphtime);
if (devtype == VideoDevice) {
/* PTS from video devices is unreliable. */
IReferenceClock_GetTime(clock, &curtime);
} else {
IMediaSample_GetTime(sample, &curtime, &dummy);
if(curtime > 400000000000000000LL) {
/* initial frames sometimes start < 0 (shown as a very large number here,
like 437650244077016960 which FFmpeg doesn't like.
TODO figure out math. For now just drop them. */
av_log(NULL, AV_LOG_DEBUG,
"dshow dropping initial (or ending) audio frame with odd PTS too high %"PRId64"\n", curtime);
return S_OK;
}
curtime += pin->filter->start_time;
}
buf_size = IMediaSample_GetActualDataLength(sample);
IMediaSample_GetPointer(sample, &buf);
priv_data = pin->filter->priv_data;
s = priv_data;
ctx = s->priv_data;
index = pin->filter->stream_index;
av_log(NULL, AV_LOG_VERBOSE, "dshow passing through packet of type %s size %8d "
"timestamp %"PRId64" orig timestamp %"PRId64" graph timestamp %"PRId64" diff %"PRId64" %s\n",
devtypename, buf_size, curtime, orig_curtime, graphtime, graphtime - orig_curtime, ctx->device_name[devtype]);
pin->filter->callback(priv_data, index, buf, buf_size, curtime, devtype);
return S_OK;
}
long WINAPI
libAVMemInputPin_ReceiveMultiple(libAVMemInputPin *this,
IMediaSample **samples, long n, long *nproc)
{
int i;
dshowdebug("libAVMemInputPin_ReceiveMultiple(%p)\n", this);
for (i = 0; i < n; i++)
libAVMemInputPin_Receive(this, samples[i]);
*nproc = n;
return S_OK;
}
long WINAPI
libAVMemInputPin_ReceiveCanBlock(libAVMemInputPin *this)
{
dshowdebug("libAVMemInputPin_ReceiveCanBlock(%p)\n", this);
/* I swear I will not block. */
return S_FALSE;
}
void
libAVMemInputPin_Destroy(libAVMemInputPin *this)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
dshowdebug("libAVMemInputPin_Destroy(%p)\n", this);
libAVPin_Destroy(pin);
}

View File

@@ -0,0 +1,239 @@
/*
* Linux DV1394 interface
* Copyright (c) 2003 Max Krasnyansky <maxk@qualcomm.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <poll.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "avdevice.h"
#include "libavformat/dv.h"
#include "dv1394.h"
struct dv1394_data {
AVClass *class;
int fd;
int channel;
int format;
uint8_t *ring; /* Ring buffer */
int index; /* Current frame index */
int avail; /* Number of frames available for reading */
int done; /* Number of completed frames */
DVDemuxContext* dv_demux; /* Generic DV muxing/demuxing context */
};
/*
* The trick here is to kludge around well known problem with kernel Ooopsing
* when you try to capture PAL on a device node configure for NTSC. That's
* why we have to configure the device node for PAL, and then read only NTSC
* amount of data.
*/
static int dv1394_reset(struct dv1394_data *dv)
{
struct dv1394_init init;
init.channel = dv->channel;
init.api_version = DV1394_API_VERSION;
init.n_frames = DV1394_RING_FRAMES;
init.format = DV1394_PAL;
if (ioctl(dv->fd, DV1394_INIT, &init) < 0)
return -1;
dv->avail = dv->done = 0;
return 0;
}
static int dv1394_start(struct dv1394_data *dv)
{
/* Tell DV1394 driver to enable receiver */
if (ioctl(dv->fd, DV1394_START_RECEIVE, 0) < 0) {
av_log(NULL, AV_LOG_ERROR, "Failed to start receiver: %s\n", strerror(errno));
return -1;
}
return 0;
}
static int dv1394_read_header(AVFormatContext * context)
{
struct dv1394_data *dv = context->priv_data;
dv->dv_demux = avpriv_dv_init_demux(context);
if (!dv->dv_demux)
goto failed;
/* Open and initialize DV1394 device */
dv->fd = avpriv_open(context->filename, O_RDONLY);
if (dv->fd < 0) {
av_log(context, AV_LOG_ERROR, "Failed to open DV interface: %s\n", strerror(errno));
goto failed;
}
if (dv1394_reset(dv) < 0) {
av_log(context, AV_LOG_ERROR, "Failed to initialize DV interface: %s\n", strerror(errno));
goto failed;
}
dv->ring = mmap(NULL, DV1394_PAL_FRAME_SIZE * DV1394_RING_FRAMES,
PROT_READ, MAP_PRIVATE, dv->fd, 0);
if (dv->ring == MAP_FAILED) {
av_log(context, AV_LOG_ERROR, "Failed to mmap DV ring buffer: %s\n", strerror(errno));
goto failed;
}
if (dv1394_start(dv) < 0)
goto failed;
return 0;
failed:
close(dv->fd);
return AVERROR(EIO);
}
static int dv1394_read_packet(AVFormatContext *context, AVPacket *pkt)
{
struct dv1394_data *dv = context->priv_data;
int size;
size = avpriv_dv_get_packet(dv->dv_demux, pkt);
if (size > 0)
return size;
if (!dv->avail) {
struct dv1394_status s;
struct pollfd p;
if (dv->done) {
/* Request more frames */
if (ioctl(dv->fd, DV1394_RECEIVE_FRAMES, dv->done) < 0) {
/* This usually means that ring buffer overflowed.
* We have to reset :(.
*/
av_log(context, AV_LOG_ERROR, "DV1394: Ring buffer overflow. Reseting ..\n");
dv1394_reset(dv);
dv1394_start(dv);
}
dv->done = 0;
}
/* Wait until more frames are available */
restart_poll:
p.fd = dv->fd;
p.events = POLLIN | POLLERR | POLLHUP;
if (poll(&p, 1, -1) < 0) {
if (errno == EAGAIN || errno == EINTR)
goto restart_poll;
av_log(context, AV_LOG_ERROR, "Poll failed: %s\n", strerror(errno));
return AVERROR(EIO);
}
if (ioctl(dv->fd, DV1394_GET_STATUS, &s) < 0) {
av_log(context, AV_LOG_ERROR, "Failed to get status: %s\n", strerror(errno));
return AVERROR(EIO);
}
av_log(context, AV_LOG_TRACE, "DV1394: status\n"
"\tactive_frame\t%d\n"
"\tfirst_clear_frame\t%d\n"
"\tn_clear_frames\t%d\n"
"\tdropped_frames\t%d\n",
s.active_frame, s.first_clear_frame,
s.n_clear_frames, s.dropped_frames);
dv->avail = s.n_clear_frames;
dv->index = s.first_clear_frame;
dv->done = 0;
if (s.dropped_frames) {
av_log(context, AV_LOG_ERROR, "DV1394: Frame drop detected (%d). Reseting ..\n",
s.dropped_frames);
dv1394_reset(dv);
dv1394_start(dv);
}
}
av_log(context, AV_LOG_TRACE, "index %d, avail %d, done %d\n", dv->index, dv->avail,
dv->done);
size = avpriv_dv_produce_packet(dv->dv_demux, pkt,
dv->ring + (dv->index * DV1394_PAL_FRAME_SIZE),
DV1394_PAL_FRAME_SIZE, -1);
dv->index = (dv->index + 1) % DV1394_RING_FRAMES;
dv->done++; dv->avail--;
return size;
}
static int dv1394_close(AVFormatContext * context)
{
struct dv1394_data *dv = context->priv_data;
/* Shutdown DV1394 receiver */
if (ioctl(dv->fd, DV1394_SHUTDOWN, 0) < 0)
av_log(context, AV_LOG_ERROR, "Failed to shutdown DV1394: %s\n", strerror(errno));
/* Unmap ring buffer */
if (munmap(dv->ring, DV1394_NTSC_FRAME_SIZE * DV1394_RING_FRAMES) < 0)
av_log(context, AV_LOG_ERROR, "Failed to munmap DV1394 ring buffer: %s\n", strerror(errno));
close(dv->fd);
av_freep(&dv->dv_demux);
return 0;
}
static const AVOption options[] = {
{ "standard", "", offsetof(struct dv1394_data, format), AV_OPT_TYPE_INT, {.i64 = DV1394_NTSC}, DV1394_NTSC, DV1394_PAL, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PAL", "", 0, AV_OPT_TYPE_CONST, {.i64 = DV1394_PAL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSC", "", 0, AV_OPT_TYPE_CONST, {.i64 = DV1394_NTSC}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "channel", "", offsetof(struct dv1394_data, channel), AV_OPT_TYPE_INT, {.i64 = DV1394_DEFAULT_CHANNEL}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass dv1394_class = {
.class_name = "DV1394 indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_dv1394_demuxer = {
.name = "dv1394",
.long_name = NULL_IF_CONFIG_SMALL("DV1394 A/V grab"),
.priv_data_size = sizeof(struct dv1394_data),
.read_header = dv1394_read_header,
.read_packet = dv1394_read_packet,
.read_close = dv1394_close,
.flags = AVFMT_NOFILE,
.priv_class = &dv1394_class,
};

View File

@@ -0,0 +1,357 @@
/*
* DV input/output over IEEE 1394 on OHCI chips
* Copyright (C)2001 Daniel Maas <dmaas@dcine.com>
* receive, proc_fs by Dan Dennedy <dan@dennedy.org>
*
* based on:
* video1394.h - driver for OHCI 1394 boards
* Copyright (C)1999,2000 Sebastien Rougeaux <sebastien.rougeaux@anu.edu.au>
* Peter Schlaile <udbz@rz.uni-karlsruhe.de>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_DV1394_H
#define AVDEVICE_DV1394_H
#define DV1394_DEFAULT_CHANNEL 63
#define DV1394_DEFAULT_CARD 0
#define DV1394_RING_FRAMES 20
#define DV1394_WIDTH 720
#define DV1394_NTSC_HEIGHT 480
#define DV1394_PAL_HEIGHT 576
/* This is the public user-space interface. Try not to break it. */
#define DV1394_API_VERSION 0x20011127
/* ********************
** **
** DV1394 API **
** **
********************
There are two methods of operating the DV1394 DV output device.
1)
The simplest is an interface based on write(): simply write
full DV frames of data to the device, and they will be transmitted
as quickly as possible. The FD may be set for non-blocking I/O,
in which case you can use select() or poll() to wait for output
buffer space.
To set the DV output parameters (e.g. whether you want NTSC or PAL
video), use the DV1394_INIT ioctl, passing in the parameters you
want in a struct dv1394_init.
Example 1:
To play a raw .DV file: cat foo.DV > /dev/dv1394
(cat will use write() internally)
Example 2:
static struct dv1394_init init = {
0x63, (broadcast channel)
4, (four-frame ringbuffer)
DV1394_NTSC, (send NTSC video)
0, 0 (default empty packet rate)
}
ioctl(fd, DV1394_INIT, &init);
while(1) {
read( <a raw DV file>, buf, DV1394_NTSC_FRAME_SIZE );
write( <the dv1394 FD>, buf, DV1394_NTSC_FRAME_SIZE );
}
2)
For more control over buffering, and to avoid unnecessary copies
of the DV data, you can use the more sophisticated the mmap() interface.
First, call the DV1394_INIT ioctl to specify your parameters,
including the number of frames in the ringbuffer. Then, calling mmap()
on the dv1394 device will give you direct access to the ringbuffer
from which the DV card reads your frame data.
The ringbuffer is simply one large, contiguous region of memory
containing two or more frames of packed DV data. Each frame of DV data
is 120000 bytes (NTSC) or 144000 bytes (PAL).
Fill one or more frames in the ringbuffer, then use the DV1394_SUBMIT_FRAMES
ioctl to begin I/O. You can use either the DV1394_WAIT_FRAMES ioctl
or select()/poll() to wait until the frames are transmitted. Next, you'll
need to call the DV1394_GET_STATUS ioctl to determine which ringbuffer
frames are clear (ready to be filled with new DV data). Finally, use
DV1394_SUBMIT_FRAMES again to send the new data to the DV output.
Example: here is what a four-frame ringbuffer might look like
during DV transmission:
frame 0 frame 1 frame 2 frame 3
*--------------------------------------*
| CLEAR | DV data | DV data | CLEAR |
*--------------------------------------*
<ACTIVE>
transmission goes in this direction --->>>
The DV hardware is currently transmitting the data in frame 1.
Once frame 1 is finished, it will automatically transmit frame 2.
(if frame 2 finishes before frame 3 is submitted, the device
will continue to transmit frame 2, and will increase the dropped_frames
counter each time it repeats the transmission).
If you called DV1394_GET_STATUS at this instant, you would
receive the following values:
n_frames = 4
active_frame = 1
first_clear_frame = 3
n_clear_frames = 2
At this point, you should write new DV data into frame 3 and optionally
frame 0. Then call DV1394_SUBMIT_FRAMES to inform the device that
it may transmit the new frames.
ERROR HANDLING
An error (buffer underflow/overflow or a break in the DV stream due
to a 1394 bus reset) can be detected by checking the dropped_frames
field of struct dv1394_status (obtained through the
DV1394_GET_STATUS ioctl).
The best way to recover from such an error is to re-initialize
dv1394, either by using the DV1394_INIT ioctl call, or closing the
file descriptor and opening it again. (note that you must unmap all
ringbuffer mappings when closing the file descriptor, or else
dv1394 will still be considered 'in use').
MAIN LOOP
For maximum efficiency and robustness against bus errors, you are
advised to model the main loop of your application after the
following pseudo-code example:
(checks of system call return values omitted for brevity; always
check return values in your code!)
while( frames left ) {
struct pollfd *pfd = ...;
pfd->fd = dv1394_fd;
pfd->revents = 0;
pfd->events = POLLOUT | POLLIN; (OUT for transmit, IN for receive)
(add other sources of I/O here)
poll(pfd, 1, -1); (or select(); add a timeout if you want)
if(pfd->revents) {
struct dv1394_status status;
ioctl(dv1394_fd, DV1394_GET_STATUS, &status);
if(status.dropped_frames > 0) {
reset_dv1394();
} else {
int i;
for (i = 0; i < status.n_clear_frames; i++) {
copy_DV_frame();
}
}
}
}
where copy_DV_frame() reads or writes on the dv1394 file descriptor
(read/write mode) or copies data to/from the mmap ringbuffer and
then calls ioctl(DV1394_SUBMIT_FRAMES) to notify dv1394 that new
frames are available (mmap mode).
reset_dv1394() is called in the event of a buffer
underflow/overflow or a halt in the DV stream (e.g. due to a 1394
bus reset). To guarantee recovery from the error, this function
should close the dv1394 file descriptor (and munmap() all
ringbuffer mappings, if you are using them), then re-open the
dv1394 device (and re-map the ringbuffer).
*/
/* maximum number of frames in the ringbuffer */
#define DV1394_MAX_FRAMES 32
/* number of *full* isochronous packets per DV frame */
#define DV1394_NTSC_PACKETS_PER_FRAME 250
#define DV1394_PAL_PACKETS_PER_FRAME 300
/* size of one frame's worth of DV data, in bytes */
#define DV1394_NTSC_FRAME_SIZE (480 * DV1394_NTSC_PACKETS_PER_FRAME)
#define DV1394_PAL_FRAME_SIZE (480 * DV1394_PAL_PACKETS_PER_FRAME)
/* ioctl() commands */
enum {
/* I don't like using 0 as a valid ioctl() */
DV1394_INVALID = 0,
/* get the driver ready to transmit video.
pass a struct dv1394_init* as the parameter (see below),
or NULL to get default parameters */
DV1394_INIT,
/* stop transmitting video and free the ringbuffer */
DV1394_SHUTDOWN,
/* submit N new frames to be transmitted, where
the index of the first new frame is first_clear_buffer,
and the index of the last new frame is
(first_clear_buffer + N) % n_frames */
DV1394_SUBMIT_FRAMES,
/* block until N buffers are clear (pass N as the parameter)
Because we re-transmit the last frame on underrun, there
will at most be n_frames - 1 clear frames at any time */
DV1394_WAIT_FRAMES,
/* capture new frames that have been received, where
the index of the first new frame is first_clear_buffer,
and the index of the last new frame is
(first_clear_buffer + N) % n_frames */
DV1394_RECEIVE_FRAMES,
DV1394_START_RECEIVE,
/* pass a struct dv1394_status* as the parameter (see below) */
DV1394_GET_STATUS,
};
enum pal_or_ntsc {
DV1394_NTSC = 0,
DV1394_PAL
};
/* this is the argument to DV1394_INIT */
struct dv1394_init {
/* DV1394_API_VERSION */
unsigned int api_version;
/* isochronous transmission channel to use */
unsigned int channel;
/* number of frames in the ringbuffer. Must be at least 2
and at most DV1394_MAX_FRAMES. */
unsigned int n_frames;
/* send/receive PAL or NTSC video format */
enum pal_or_ntsc format;
/* the following are used only for transmission */
/* set these to zero unless you want a
non-default empty packet rate (see below) */
unsigned long cip_n;
unsigned long cip_d;
/* set this to zero unless you want a
non-default SYT cycle offset (default = 3 cycles) */
unsigned int syt_offset;
};
/* NOTE: you may only allocate the DV frame ringbuffer once each time
you open the dv1394 device. DV1394_INIT will fail if you call it a
second time with different 'n_frames' or 'format' arguments (which
would imply a different size for the ringbuffer). If you need a
different buffer size, simply close and re-open the device, then
initialize it with your new settings. */
/* Q: What are cip_n and cip_d? */
/*
A: DV video streams do not utilize 100% of the potential bandwidth offered
by IEEE 1394 (FireWire). To achieve the correct rate of data transmission,
DV devices must periodically insert empty packets into the 1394 data stream.
Typically there is one empty packet per 14-16 data-carrying packets.
Some DV devices will accept a wide range of empty packet rates, while others
require a precise rate. If the dv1394 driver produces empty packets at
a rate that your device does not accept, you may see ugly patterns on the
DV output, or even no output at all.
The default empty packet insertion rate seems to work for many people; if
your DV output is stable, you can simply ignore this discussion. However,
we have exposed the empty packet rate as a parameter to support devices that
do not work with the default rate.
The decision to insert an empty packet is made with a numerator/denominator
algorithm. Empty packets are produced at an average rate of CIP_N / CIP_D.
You can alter the empty packet rate by passing non-zero values for cip_n
and cip_d to the INIT ioctl.
*/
struct dv1394_status {
/* this embedded init struct returns the current dv1394
parameters in use */
struct dv1394_init init;
/* the ringbuffer frame that is currently being
displayed. (-1 if the device is not transmitting anything) */
int active_frame;
/* index of the first buffer (ahead of active_frame) that
is ready to be filled with data */
unsigned int first_clear_frame;
/* how many buffers, including first_clear_buffer, are
ready to be filled with data */
unsigned int n_clear_frames;
/* how many times the DV stream has underflowed, overflowed,
or otherwise encountered an error, since the previous call
to DV1394_GET_STATUS */
unsigned int dropped_frames;
/* N.B. The dropped_frames counter is only a lower bound on the actual
number of dropped frames, with the special case that if dropped_frames
is zero, then it is guaranteed that NO frames have been dropped
since the last call to DV1394_GET_STATUS.
*/
};
#endif /* AVDEVICE_DV1394_H */

View File

@@ -0,0 +1,134 @@
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2009 Giliard B. de Freitas <giliarde@gmail.com>
* Copyright (C) 2002 Gunnar Monell <gmo@linux.nu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <stdlib.h>
#include "fbdev_common.h"
#include "libavutil/common.h"
#include "avdevice.h"
struct rgb_pixfmt_map_entry {
int bits_per_pixel;
int red_offset, green_offset, blue_offset, alpha_offset;
enum AVPixelFormat pixfmt;
};
static const struct rgb_pixfmt_map_entry rgb_pixfmt_map[] = {
// bpp, red_offset, green_offset, blue_offset, alpha_offset, pixfmt
{ 32, 0, 8, 16, 24, AV_PIX_FMT_RGBA },
{ 32, 16, 8, 0, 24, AV_PIX_FMT_BGRA },
{ 32, 8, 16, 24, 0, AV_PIX_FMT_ARGB },
{ 32, 3, 2, 8, 0, AV_PIX_FMT_ABGR },
{ 24, 0, 8, 16, 0, AV_PIX_FMT_RGB24 },
{ 24, 16, 8, 0, 0, AV_PIX_FMT_BGR24 },
{ 16, 11, 5, 0, 0, AV_PIX_FMT_RGB565 },
};
enum AVPixelFormat ff_get_pixfmt_from_fb_varinfo(struct fb_var_screeninfo *varinfo)
{
int i;
for (i = 0; i < FF_ARRAY_ELEMS(rgb_pixfmt_map); i++) {
const struct rgb_pixfmt_map_entry *entry = &rgb_pixfmt_map[i];
if (entry->bits_per_pixel == varinfo->bits_per_pixel &&
entry->red_offset == varinfo->red.offset &&
entry->green_offset == varinfo->green.offset &&
entry->blue_offset == varinfo->blue.offset)
return entry->pixfmt;
}
return AV_PIX_FMT_NONE;
}
const char* ff_fbdev_default_device()
{
const char *dev = getenv("FRAMEBUFFER");
if (!dev)
dev = "/dev/fb0";
return dev;
}
int ff_fbdev_get_device_list(AVDeviceInfoList *device_list)
{
struct fb_var_screeninfo varinfo;
struct fb_fix_screeninfo fixinfo;
char device_file[12];
AVDeviceInfo *device = NULL;
int i, fd, ret = 0;
const char *default_device = ff_fbdev_default_device();
if (!device_list)
return AVERROR(EINVAL);
for (i = 0; i <= 31; i++) {
snprintf(device_file, sizeof(device_file), "/dev/fb%d", i);
if ((fd = avpriv_open(device_file, O_RDWR)) < 0) {
int err = AVERROR(errno);
if (err != AVERROR(ENOENT))
av_log(NULL, AV_LOG_ERROR, "Could not open framebuffer device '%s': %s\n",
device_file, av_err2str(err));
continue;
}
if (ioctl(fd, FBIOGET_VSCREENINFO, &varinfo) == -1)
goto fail_device;
if (ioctl(fd, FBIOGET_FSCREENINFO, &fixinfo) == -1)
goto fail_device;
device = av_mallocz(sizeof(AVDeviceInfo));
if (!device) {
ret = AVERROR(ENOMEM);
goto fail_device;
}
device->device_name = av_strdup(device_file);
device->device_description = av_strdup(fixinfo.id);
if (!device->device_name || !device->device_description) {
ret = AVERROR(ENOMEM);
goto fail_device;
}
if ((ret = av_dynarray_add_nofree(&device_list->devices,
&device_list->nb_devices, device)) < 0)
goto fail_device;
if (default_device && !strcmp(device->device_name, default_device)) {
device_list->default_device = device_list->nb_devices - 1;
default_device = NULL;
}
close(fd);
continue;
fail_device:
if (device) {
av_freep(&device->device_name);
av_freep(&device->device_description);
av_freep(&device);
}
if (fd >= 0)
close(fd);
if (ret < 0)
return ret;
}
return 0;
}

View File

@@ -0,0 +1,38 @@
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2009 Giliard B. de Freitas <giliarde@gmail.com>
* Copyright (C) 2002 Gunnar Monell <gmo@linux.nu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_FBDEV_COMMON_H
#define AVDEVICE_FBDEV_COMMON_H
#include <features.h>
#include <linux/fb.h>
#include "libavutil/pixfmt.h"
struct AVDeviceInfoList;
enum AVPixelFormat ff_get_pixfmt_from_fb_varinfo(struct fb_var_screeninfo *varinfo);
const char* ff_fbdev_default_device(void);
int ff_fbdev_get_device_list(struct AVDeviceInfoList *device_list);
#endif /* AVDEVICE_FBDEV_COMMON_H */

View File

@@ -0,0 +1,245 @@
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2009 Giliard B. de Freitas <giliarde@gmail.com>
* Copyright (C) 2002 Gunnar Monell <gmo@linux.nu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Linux framebuffer input device,
* inspired by code from fbgrab.c by Gunnar Monell.
* @see http://linux-fbdev.sourceforge.net/
*/
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <time.h>
#include <linux/fb.h>
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavformat/internal.h"
#include "avdevice.h"
#include "fbdev_common.h"
typedef struct FBDevContext {
AVClass *class; ///< class for private options
int frame_size; ///< size in bytes of a grabbed frame
AVRational framerate_q; ///< framerate
int64_t time_frame; ///< time for the next frame to output (in 1/1000000 units)
int fd; ///< framebuffer device file descriptor
int width, height; ///< assumed frame resolution
int frame_linesize; ///< linesize of the output frame, it is assumed to be constant
int bytes_per_pixel;
struct fb_var_screeninfo varinfo; ///< variable info;
struct fb_fix_screeninfo fixinfo; ///< fixed info;
uint8_t *data; ///< framebuffer data
} FBDevContext;
static av_cold int fbdev_read_header(AVFormatContext *avctx)
{
FBDevContext *fbdev = avctx->priv_data;
AVStream *st = NULL;
enum AVPixelFormat pix_fmt;
int ret, flags = O_RDONLY;
const char* device;
if (!(st = avformat_new_stream(avctx, NULL)))
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in microseconds */
/* NONBLOCK is ignored by the fbdev driver, only set for consistency */
if (avctx->flags & AVFMT_FLAG_NONBLOCK)
flags |= O_NONBLOCK;
if (avctx->filename[0])
device = avctx->filename;
else
device = ff_fbdev_default_device();
if ((fbdev->fd = avpriv_open(device, flags)) == -1) {
ret = AVERROR(errno);
av_log(avctx, AV_LOG_ERROR,
"Could not open framebuffer device '%s': %s\n",
device, av_err2str(ret));
return ret;
}
if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0) {
ret = AVERROR(errno);
av_log(avctx, AV_LOG_ERROR,
"FBIOGET_VSCREENINFO: %s\n", av_err2str(ret));
goto fail;
}
if (ioctl(fbdev->fd, FBIOGET_FSCREENINFO, &fbdev->fixinfo) < 0) {
ret = AVERROR(errno);
av_log(avctx, AV_LOG_ERROR,
"FBIOGET_FSCREENINFO: %s\n", av_err2str(ret));
goto fail;
}
pix_fmt = ff_get_pixfmt_from_fb_varinfo(&fbdev->varinfo);
if (pix_fmt == AV_PIX_FMT_NONE) {
ret = AVERROR(EINVAL);
av_log(avctx, AV_LOG_ERROR,
"Framebuffer pixel format not supported.\n");
goto fail;
}
fbdev->width = fbdev->varinfo.xres;
fbdev->height = fbdev->varinfo.yres;
fbdev->bytes_per_pixel = (fbdev->varinfo.bits_per_pixel + 7) >> 3;
fbdev->frame_linesize = fbdev->width * fbdev->bytes_per_pixel;
fbdev->frame_size = fbdev->frame_linesize * fbdev->height;
fbdev->time_frame = AV_NOPTS_VALUE;
fbdev->data = mmap(NULL, fbdev->fixinfo.smem_len, PROT_READ, MAP_SHARED, fbdev->fd, 0);
if (fbdev->data == MAP_FAILED) {
ret = AVERROR(errno);
av_log(avctx, AV_LOG_ERROR, "Error in mmap(): %s\n", av_err2str(ret));
goto fail;
}
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->width = fbdev->width;
st->codec->height = fbdev->height;
st->codec->pix_fmt = pix_fmt;
st->codec->time_base = av_inv_q(fbdev->framerate_q);
st->codec->bit_rate =
fbdev->width * fbdev->height * fbdev->bytes_per_pixel * av_q2d(fbdev->framerate_q) * 8;
av_log(avctx, AV_LOG_INFO,
"w:%d h:%d bpp:%d pixfmt:%s fps:%d/%d bit_rate:%d\n",
fbdev->width, fbdev->height, fbdev->varinfo.bits_per_pixel,
av_get_pix_fmt_name(pix_fmt),
fbdev->framerate_q.num, fbdev->framerate_q.den,
st->codec->bit_rate);
return 0;
fail:
close(fbdev->fd);
return ret;
}
static int fbdev_read_packet(AVFormatContext *avctx, AVPacket *pkt)
{
FBDevContext *fbdev = avctx->priv_data;
int64_t curtime, delay;
struct timespec ts;
int i, ret;
uint8_t *pin, *pout;
if (fbdev->time_frame == AV_NOPTS_VALUE)
fbdev->time_frame = av_gettime();
/* wait based on the frame rate */
while (1) {
curtime = av_gettime();
delay = fbdev->time_frame - curtime;
av_log(avctx, AV_LOG_TRACE,
"time_frame:%"PRId64" curtime:%"PRId64" delay:%"PRId64"\n",
fbdev->time_frame, curtime, delay);
if (delay <= 0) {
fbdev->time_frame += INT64_C(1000000) / av_q2d(fbdev->framerate_q);
break;
}
if (avctx->flags & AVFMT_FLAG_NONBLOCK)
return AVERROR(EAGAIN);
ts.tv_sec = delay / 1000000;
ts.tv_nsec = (delay % 1000000) * 1000;
while (nanosleep(&ts, &ts) < 0 && errno == EINTR);
}
if ((ret = av_new_packet(pkt, fbdev->frame_size)) < 0)
return ret;
/* refresh fbdev->varinfo, visible data position may change at each call */
if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0) {
av_log(avctx, AV_LOG_WARNING,
"Error refreshing variable info: %s\n", av_err2str(AVERROR(errno)));
}
pkt->pts = curtime;
/* compute visible data offset */
pin = fbdev->data + fbdev->bytes_per_pixel * fbdev->varinfo.xoffset +
fbdev->varinfo.yoffset * fbdev->fixinfo.line_length;
pout = pkt->data;
for (i = 0; i < fbdev->height; i++) {
memcpy(pout, pin, fbdev->frame_linesize);
pin += fbdev->fixinfo.line_length;
pout += fbdev->frame_linesize;
}
return fbdev->frame_size;
}
static av_cold int fbdev_read_close(AVFormatContext *avctx)
{
FBDevContext *fbdev = avctx->priv_data;
munmap(fbdev->data, fbdev->fixinfo.smem_len);
close(fbdev->fd);
return 0;
}
static int fbdev_get_device_list(AVFormatContext *s, AVDeviceInfoList *device_list)
{
return ff_fbdev_get_device_list(device_list);
}
#define OFFSET(x) offsetof(FBDevContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "framerate","", OFFSET(framerate_q), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, 0, DEC },
{ NULL },
};
static const AVClass fbdev_class = {
.class_name = "fbdev indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_fbdev_demuxer = {
.name = "fbdev",
.long_name = NULL_IF_CONFIG_SMALL("Linux framebuffer"),
.priv_data_size = sizeof(FBDevContext),
.read_header = fbdev_read_header,
.read_packet = fbdev_read_packet,
.read_close = fbdev_read_close,
.get_device_list = fbdev_get_device_list,
.flags = AVFMT_NOFILE,
.priv_class = &fbdev_class,
};

View File

@@ -0,0 +1,220 @@
/*
* Copyright (c) 2013 Lukasz Marek
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <linux/fb.h>
#include "libavutil/pixdesc.h"
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavformat/avformat.h"
#include "fbdev_common.h"
#include "avdevice.h"
typedef struct {
AVClass *class; ///< class for private options
int xoffset; ///< x coordinate of top left corner
int yoffset; ///< y coordinate of top left corner
struct fb_var_screeninfo varinfo; ///< framebuffer variable info
struct fb_fix_screeninfo fixinfo; ///< framebuffer fixed info
int fd; ///< framebuffer device file descriptor
uint8_t *data; ///< framebuffer data
} FBDevContext;
static av_cold int fbdev_write_header(AVFormatContext *h)
{
FBDevContext *fbdev = h->priv_data;
enum AVPixelFormat pix_fmt;
int ret, flags = O_RDWR;
const char* device;
if (h->nb_streams != 1 || h->streams[0]->codec->codec_type != AVMEDIA_TYPE_VIDEO) {
av_log(fbdev, AV_LOG_ERROR, "Only a single video stream is supported.\n");
return AVERROR(EINVAL);
}
if (h->filename[0])
device = h->filename;
else
device = ff_fbdev_default_device();
if ((fbdev->fd = avpriv_open(device, flags)) == -1) {
ret = AVERROR(errno);
av_log(h, AV_LOG_ERROR,
"Could not open framebuffer device '%s': %s\n",
device, av_err2str(ret));
return ret;
}
if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0) {
ret = AVERROR(errno);
av_log(h, AV_LOG_ERROR, "FBIOGET_VSCREENINFO: %s\n", av_err2str(ret));
goto fail;
}
if (ioctl(fbdev->fd, FBIOGET_FSCREENINFO, &fbdev->fixinfo) < 0) {
ret = AVERROR(errno);
av_log(h, AV_LOG_ERROR, "FBIOGET_FSCREENINFO: %s\n", av_err2str(ret));
goto fail;
}
pix_fmt = ff_get_pixfmt_from_fb_varinfo(&fbdev->varinfo);
if (pix_fmt == AV_PIX_FMT_NONE) {
ret = AVERROR(EINVAL);
av_log(h, AV_LOG_ERROR, "Framebuffer pixel format not supported.\n");
goto fail;
}
fbdev->data = mmap(NULL, fbdev->fixinfo.smem_len, PROT_WRITE, MAP_SHARED, fbdev->fd, 0);
if (fbdev->data == MAP_FAILED) {
ret = AVERROR(errno);
av_log(h, AV_LOG_ERROR, "Error in mmap(): %s\n", av_err2str(ret));
goto fail;
}
return 0;
fail:
close(fbdev->fd);
return ret;
}
static int fbdev_write_packet(AVFormatContext *h, AVPacket *pkt)
{
FBDevContext *fbdev = h->priv_data;
uint8_t *pin, *pout;
enum AVPixelFormat fb_pix_fmt;
int disp_height;
int bytes_to_copy;
AVCodecContext *codec_ctx = h->streams[0]->codec;
enum AVPixelFormat video_pix_fmt = codec_ctx->pix_fmt;
int video_width = codec_ctx->width;
int video_height = codec_ctx->height;
int bytes_per_pixel = ((codec_ctx->bits_per_coded_sample + 7) >> 3);
int src_line_size = video_width * bytes_per_pixel;
int i;
if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0)
av_log(h, AV_LOG_WARNING,
"Error refreshing variable info: %s\n", av_err2str(AVERROR(errno)));
fb_pix_fmt = ff_get_pixfmt_from_fb_varinfo(&fbdev->varinfo);
if (fb_pix_fmt != video_pix_fmt) {
av_log(h, AV_LOG_ERROR, "Pixel format %s is not supported, use %s\n",
av_get_pix_fmt_name(video_pix_fmt), av_get_pix_fmt_name(fb_pix_fmt));
return AVERROR(EINVAL);
}
disp_height = FFMIN(fbdev->varinfo.yres, video_height);
bytes_to_copy = FFMIN(fbdev->varinfo.xres, video_width) * bytes_per_pixel;
pin = pkt->data;
pout = fbdev->data +
bytes_per_pixel * fbdev->varinfo.xoffset +
fbdev->varinfo.yoffset * fbdev->fixinfo.line_length;
if (fbdev->xoffset) {
if (fbdev->xoffset < 0) {
if (-fbdev->xoffset >= video_width) //nothing to display
return 0;
bytes_to_copy += fbdev->xoffset * bytes_per_pixel;
pin -= fbdev->xoffset * bytes_per_pixel;
} else {
int diff = (video_width + fbdev->xoffset) - fbdev->varinfo.xres;
if (diff > 0) {
if (diff >= video_width) //nothing to display
return 0;
bytes_to_copy -= diff * bytes_per_pixel;
}
pout += bytes_per_pixel * fbdev->xoffset;
}
}
if (fbdev->yoffset) {
if (fbdev->yoffset < 0) {
if (-fbdev->yoffset >= video_height) //nothing to display
return 0;
disp_height += fbdev->yoffset;
pin -= fbdev->yoffset * src_line_size;
} else {
int diff = (video_height + fbdev->yoffset) - fbdev->varinfo.yres;
if (diff > 0) {
if (diff >= video_height) //nothing to display
return 0;
disp_height -= diff;
}
pout += fbdev->yoffset * fbdev->fixinfo.line_length;
}
}
for (i = 0; i < disp_height; i++) {
memcpy(pout, pin, bytes_to_copy);
pout += fbdev->fixinfo.line_length;
pin += src_line_size;
}
return 0;
}
static av_cold int fbdev_write_trailer(AVFormatContext *h)
{
FBDevContext *fbdev = h->priv_data;
munmap(fbdev->data, fbdev->fixinfo.smem_len);
close(fbdev->fd);
return 0;
}
static int fbdev_get_device_list(AVFormatContext *s, AVDeviceInfoList *device_list)
{
return ff_fbdev_get_device_list(device_list);
}
#define OFFSET(x) offsetof(FBDevContext, x)
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "xoffset", "set x coordinate of top left corner", OFFSET(xoffset), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, ENC },
{ "yoffset", "set y coordinate of top left corner", OFFSET(yoffset), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, ENC },
{ NULL }
};
static const AVClass fbdev_class = {
.class_name = "fbdev outdev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
};
AVOutputFormat ff_fbdev_muxer = {
.name = "fbdev",
.long_name = NULL_IF_CONFIG_SMALL("Linux framebuffer"),
.priv_data_size = sizeof(FBDevContext),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = fbdev_write_header,
.write_packet = fbdev_write_packet,
.write_trailer = fbdev_write_trailer,
.get_device_list = fbdev_get_device_list,
.flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
.priv_class = &fbdev_class,
};

View File

@@ -0,0 +1 @@
#include "libavutil/file_open.c"

View File

@@ -0,0 +1,636 @@
/*
* GDI video grab interface
*
* This file is part of FFmpeg.
*
* Copyright (C) 2013 Calvin Walton <calvin.walton@kepstin.ca>
* Copyright (C) 2007-2010 Christophe Gisquet <word1.word2@gmail.com>
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* GDI frame device demuxer
* @author Calvin Walton <calvin.walton@kepstin.ca>
* @author Christophe Gisquet <word1.word2@gmail.com>
*/
#include "config.h"
#include "libavformat/internal.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include <windows.h>
/**
* GDI Device Demuxer context
*/
struct gdigrab {
const AVClass *class; /**< Class for private options */
int frame_size; /**< Size in bytes of the frame pixel data */
int header_size; /**< Size in bytes of the DIB header */
AVRational time_base; /**< Time base */
int64_t time_frame; /**< Current time */
int draw_mouse; /**< Draw mouse cursor (private option) */
int show_region; /**< Draw border (private option) */
AVRational framerate; /**< Capture framerate (private option) */
int width; /**< Width of the grab frame (private option) */
int height; /**< Height of the grab frame (private option) */
int offset_x; /**< Capture x offset (private option) */
int offset_y; /**< Capture y offset (private option) */
HWND hwnd; /**< Handle of the window for the grab */
HDC source_hdc; /**< Source device context */
HDC dest_hdc; /**< Destination, source-compatible DC */
BITMAPINFO bmi; /**< Information describing DIB format */
HBITMAP hbmp; /**< Information on the bitmap captured */
void *buffer; /**< The buffer containing the bitmap image data */
RECT clip_rect; /**< The subarea of the screen or window to clip */
HWND region_hwnd; /**< Handle of the region border window */
int cursor_error_printed;
};
#define WIN32_API_ERROR(str) \
av_log(s1, AV_LOG_ERROR, str " (error %li)\n", GetLastError())
#define REGION_WND_BORDER 3
/**
* Callback to handle Windows messages for the region outline window.
*
* In particular, this handles painting the frame rectangle.
*
* @param hwnd The region outline window handle.
* @param msg The Windows message.
* @param wparam First Windows message parameter.
* @param lparam Second Windows message parameter.
* @return 0 success, !0 failure
*/
static LRESULT CALLBACK
gdigrab_region_wnd_proc(HWND hwnd, UINT msg, WPARAM wparam, LPARAM lparam)
{
PAINTSTRUCT ps;
HDC hdc;
RECT rect;
switch (msg) {
case WM_PAINT:
hdc = BeginPaint(hwnd, &ps);
GetClientRect(hwnd, &rect);
FrameRect(hdc, &rect, GetStockObject(BLACK_BRUSH));
rect.left++; rect.top++; rect.right--; rect.bottom--;
FrameRect(hdc, &rect, GetStockObject(WHITE_BRUSH));
rect.left++; rect.top++; rect.right--; rect.bottom--;
FrameRect(hdc, &rect, GetStockObject(BLACK_BRUSH));
EndPaint(hwnd, &ps);
break;
default:
return DefWindowProc(hwnd, msg, wparam, lparam);
}
return 0;
}
/**
* Initialize the region outline window.
*
* @param s1 The format context.
* @param gdigrab gdigrab context.
* @return 0 success, !0 failure
*/
static int
gdigrab_region_wnd_init(AVFormatContext *s1, struct gdigrab *gdigrab)
{
HWND hwnd;
RECT rect = gdigrab->clip_rect;
HRGN region = NULL;
HRGN region_interior = NULL;
DWORD style = WS_POPUP | WS_VISIBLE;
DWORD ex = WS_EX_TOOLWINDOW | WS_EX_TOPMOST | WS_EX_TRANSPARENT;
rect.left -= REGION_WND_BORDER; rect.top -= REGION_WND_BORDER;
rect.right += REGION_WND_BORDER; rect.bottom += REGION_WND_BORDER;
AdjustWindowRectEx(&rect, style, FALSE, ex);
// Create a window with no owner; use WC_DIALOG instead of writing a custom
// window class
hwnd = CreateWindowEx(ex, WC_DIALOG, NULL, style, rect.left, rect.top,
rect.right - rect.left, rect.bottom - rect.top,
NULL, NULL, NULL, NULL);
if (!hwnd) {
WIN32_API_ERROR("Could not create region display window");
goto error;
}
// Set the window shape to only include the border area
GetClientRect(hwnd, &rect);
region = CreateRectRgn(0, 0,
rect.right - rect.left, rect.bottom - rect.top);
region_interior = CreateRectRgn(REGION_WND_BORDER, REGION_WND_BORDER,
rect.right - rect.left - REGION_WND_BORDER,
rect.bottom - rect.top - REGION_WND_BORDER);
CombineRgn(region, region, region_interior, RGN_DIFF);
if (!SetWindowRgn(hwnd, region, FALSE)) {
WIN32_API_ERROR("Could not set window region");
goto error;
}
// The "region" memory is now owned by the window
region = NULL;
DeleteObject(region_interior);
SetWindowLongPtr(hwnd, GWLP_WNDPROC, (LONG_PTR) gdigrab_region_wnd_proc);
ShowWindow(hwnd, SW_SHOW);
gdigrab->region_hwnd = hwnd;
return 0;
error:
if (region)
DeleteObject(region);
if (region_interior)
DeleteObject(region_interior);
if (hwnd)
DestroyWindow(hwnd);
return 1;
}
/**
* Cleanup/free the region outline window.
*
* @param s1 The format context.
* @param gdigrab gdigrab context.
*/
static void
gdigrab_region_wnd_destroy(AVFormatContext *s1, struct gdigrab *gdigrab)
{
if (gdigrab->region_hwnd)
DestroyWindow(gdigrab->region_hwnd);
gdigrab->region_hwnd = NULL;
}
/**
* Process the Windows message queue.
*
* This is important to prevent Windows from thinking the window has become
* unresponsive. As well, things like WM_PAINT (to actually draw the window
* contents) are handled from the message queue context.
*
* @param s1 The format context.
* @param gdigrab gdigrab context.
*/
static void
gdigrab_region_wnd_update(AVFormatContext *s1, struct gdigrab *gdigrab)
{
HWND hwnd = gdigrab->region_hwnd;
MSG msg;
while (PeekMessage(&msg, hwnd, 0, 0, PM_REMOVE)) {
DispatchMessage(&msg);
}
}
/**
* Initializes the gdi grab device demuxer (public device demuxer API).
*
* @param s1 Context from avformat core
* @return AVERROR_IO error, 0 success
*/
static int
gdigrab_read_header(AVFormatContext *s1)
{
struct gdigrab *gdigrab = s1->priv_data;
HWND hwnd;
HDC source_hdc = NULL;
HDC dest_hdc = NULL;
BITMAPINFO bmi;
HBITMAP hbmp = NULL;
void *buffer = NULL;
const char *filename = s1->filename;
const char *name = NULL;
AVStream *st = NULL;
int bpp;
RECT virtual_rect;
RECT clip_rect;
BITMAP bmp;
int ret;
if (!strncmp(filename, "title=", 6)) {
name = filename + 6;
hwnd = FindWindow(NULL, name);
if (!hwnd) {
av_log(s1, AV_LOG_ERROR,
"Can't find window '%s', aborting.\n", name);
ret = AVERROR(EIO);
goto error;
}
if (gdigrab->show_region) {
av_log(s1, AV_LOG_WARNING,
"Can't show region when grabbing a window.\n");
gdigrab->show_region = 0;
}
} else if (!strcmp(filename, "desktop")) {
hwnd = NULL;
} else {
av_log(s1, AV_LOG_ERROR,
"Please use \"desktop\" or \"title=<windowname>\" to specify your target.\n");
ret = AVERROR(EIO);
goto error;
}
if (hwnd) {
GetClientRect(hwnd, &virtual_rect);
} else {
virtual_rect.left = GetSystemMetrics(SM_XVIRTUALSCREEN);
virtual_rect.top = GetSystemMetrics(SM_YVIRTUALSCREEN);
virtual_rect.right = virtual_rect.left + GetSystemMetrics(SM_CXVIRTUALSCREEN);
virtual_rect.bottom = virtual_rect.top + GetSystemMetrics(SM_CYVIRTUALSCREEN);
}
/* If no width or height set, use full screen/window area */
if (!gdigrab->width || !gdigrab->height) {
clip_rect.left = virtual_rect.left;
clip_rect.top = virtual_rect.top;
clip_rect.right = virtual_rect.right;
clip_rect.bottom = virtual_rect.bottom;
} else {
clip_rect.left = gdigrab->offset_x;
clip_rect.top = gdigrab->offset_y;
clip_rect.right = gdigrab->width + gdigrab->offset_x;
clip_rect.bottom = gdigrab->height + gdigrab->offset_y;
}
if (clip_rect.left < virtual_rect.left ||
clip_rect.top < virtual_rect.top ||
clip_rect.right > virtual_rect.right ||
clip_rect.bottom > virtual_rect.bottom) {
av_log(s1, AV_LOG_ERROR,
"Capture area (%li,%li),(%li,%li) extends outside window area (%li,%li),(%li,%li)",
clip_rect.left, clip_rect.top,
clip_rect.right, clip_rect.bottom,
virtual_rect.left, virtual_rect.top,
virtual_rect.right, virtual_rect.bottom);
ret = AVERROR(EIO);
goto error;
}
/* This will get the device context for the selected window, or if
* none, the primary screen */
source_hdc = GetDC(hwnd);
if (!source_hdc) {
WIN32_API_ERROR("Couldn't get window device context");
ret = AVERROR(EIO);
goto error;
}
bpp = GetDeviceCaps(source_hdc, BITSPIXEL);
if (name) {
av_log(s1, AV_LOG_INFO,
"Found window %s, capturing %lix%lix%i at (%li,%li)\n",
name,
clip_rect.right - clip_rect.left,
clip_rect.bottom - clip_rect.top,
bpp, clip_rect.left, clip_rect.top);
} else {
av_log(s1, AV_LOG_INFO,
"Capturing whole desktop as %lix%lix%i at (%li,%li)\n",
clip_rect.right - clip_rect.left,
clip_rect.bottom - clip_rect.top,
bpp, clip_rect.left, clip_rect.top);
}
if (clip_rect.right - clip_rect.left <= 0 ||
clip_rect.bottom - clip_rect.top <= 0 || bpp%8) {
av_log(s1, AV_LOG_ERROR, "Invalid properties, aborting\n");
ret = AVERROR(EIO);
goto error;
}
dest_hdc = CreateCompatibleDC(source_hdc);
if (!dest_hdc) {
WIN32_API_ERROR("Screen DC CreateCompatibleDC");
ret = AVERROR(EIO);
goto error;
}
/* Create a DIB and select it into the dest_hdc */
bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmi.bmiHeader.biWidth = clip_rect.right - clip_rect.left;
bmi.bmiHeader.biHeight = -(clip_rect.bottom - clip_rect.top);
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biBitCount = bpp;
bmi.bmiHeader.biCompression = BI_RGB;
bmi.bmiHeader.biSizeImage = 0;
bmi.bmiHeader.biXPelsPerMeter = 0;
bmi.bmiHeader.biYPelsPerMeter = 0;
bmi.bmiHeader.biClrUsed = 0;
bmi.bmiHeader.biClrImportant = 0;
hbmp = CreateDIBSection(dest_hdc, &bmi, DIB_RGB_COLORS,
&buffer, NULL, 0);
if (!hbmp) {
WIN32_API_ERROR("Creating DIB Section");
ret = AVERROR(EIO);
goto error;
}
if (!SelectObject(dest_hdc, hbmp)) {
WIN32_API_ERROR("SelectObject");
ret = AVERROR(EIO);
goto error;
}
/* Get info from the bitmap */
GetObject(hbmp, sizeof(BITMAP), &bmp);
st = avformat_new_stream(s1, NULL);
if (!st) {
ret = AVERROR(ENOMEM);
goto error;
}
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
gdigrab->frame_size = bmp.bmWidthBytes * bmp.bmHeight * bmp.bmPlanes;
gdigrab->header_size = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) +
(bpp <= 8 ? (1 << bpp) : 0) * sizeof(RGBQUAD) /* palette size */;
gdigrab->time_base = av_inv_q(gdigrab->framerate);
gdigrab->time_frame = av_gettime() / av_q2d(gdigrab->time_base);
gdigrab->hwnd = hwnd;
gdigrab->source_hdc = source_hdc;
gdigrab->dest_hdc = dest_hdc;
gdigrab->hbmp = hbmp;
gdigrab->bmi = bmi;
gdigrab->buffer = buffer;
gdigrab->clip_rect = clip_rect;
gdigrab->cursor_error_printed = 0;
if (gdigrab->show_region) {
if (gdigrab_region_wnd_init(s1, gdigrab)) {
ret = AVERROR(EIO);
goto error;
}
}
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_BMP;
st->codec->time_base = gdigrab->time_base;
st->codec->bit_rate = (gdigrab->header_size + gdigrab->frame_size) * 1/av_q2d(gdigrab->time_base) * 8;
return 0;
error:
if (source_hdc)
ReleaseDC(hwnd, source_hdc);
if (dest_hdc)
DeleteDC(dest_hdc);
if (hbmp)
DeleteObject(hbmp);
if (source_hdc)
DeleteDC(source_hdc);
return ret;
}
/**
* Paints a mouse pointer in a Win32 image.
*
* @param s1 Context of the log information
* @param s Current grad structure
*/
static void paint_mouse_pointer(AVFormatContext *s1, struct gdigrab *gdigrab)
{
CURSORINFO ci = {0};
#define CURSOR_ERROR(str) \
if (!gdigrab->cursor_error_printed) { \
WIN32_API_ERROR(str); \
gdigrab->cursor_error_printed = 1; \
}
ci.cbSize = sizeof(ci);
if (GetCursorInfo(&ci)) {
HCURSOR icon = CopyCursor(ci.hCursor);
ICONINFO info;
POINT pos;
RECT clip_rect = gdigrab->clip_rect;
HWND hwnd = gdigrab->hwnd;
info.hbmMask = NULL;
info.hbmColor = NULL;
if (ci.flags != CURSOR_SHOWING)
return;
if (!icon) {
/* Use the standard arrow cursor as a fallback.
* You'll probably only hit this in Wine, which can't fetch
* the current system cursor. */
icon = CopyCursor(LoadCursor(NULL, IDC_ARROW));
}
if (!GetIconInfo(icon, &info)) {
CURSOR_ERROR("Could not get icon info");
goto icon_error;
}
pos.x = ci.ptScreenPos.x - clip_rect.left - info.xHotspot;
pos.y = ci.ptScreenPos.y - clip_rect.top - info.yHotspot;
if (hwnd) {
RECT rect;
if (GetWindowRect(hwnd, &rect)) {
pos.x -= rect.left;
pos.y -= rect.top;
} else {
CURSOR_ERROR("Couldn't get window rectangle");
goto icon_error;
}
}
av_log(s1, AV_LOG_DEBUG, "Cursor pos (%li,%li) -> (%li,%li)\n",
ci.ptScreenPos.x, ci.ptScreenPos.y, pos.x, pos.y);
if (pos.x >= 0 && pos.x <= clip_rect.right - clip_rect.left &&
pos.y >= 0 && pos.y <= clip_rect.bottom - clip_rect.top) {
if (!DrawIcon(gdigrab->dest_hdc, pos.x, pos.y, icon))
CURSOR_ERROR("Couldn't draw icon");
}
icon_error:
if (info.hbmMask)
DeleteObject(info.hbmMask);
if (info.hbmColor)
DeleteObject(info.hbmColor);
if (icon)
DestroyCursor(icon);
} else {
CURSOR_ERROR("Couldn't get cursor info");
}
}
/**
* Grabs a frame from gdi (public device demuxer API).
*
* @param s1 Context from avformat core
* @param pkt Packet holding the grabbed frame
* @return frame size in bytes
*/
static int gdigrab_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
struct gdigrab *gdigrab = s1->priv_data;
HDC dest_hdc = gdigrab->dest_hdc;
HDC source_hdc = gdigrab->source_hdc;
RECT clip_rect = gdigrab->clip_rect;
AVRational time_base = gdigrab->time_base;
int64_t time_frame = gdigrab->time_frame;
BITMAPFILEHEADER bfh;
int file_size = gdigrab->header_size + gdigrab->frame_size;
int64_t curtime, delay;
/* Calculate the time of the next frame */
time_frame += INT64_C(1000000);
/* Run Window message processing queue */
if (gdigrab->show_region)
gdigrab_region_wnd_update(s1, gdigrab);
/* wait based on the frame rate */
for (;;) {
curtime = av_gettime();
delay = time_frame * av_q2d(time_base) - curtime;
if (delay <= 0) {
if (delay < INT64_C(-1000000) * av_q2d(time_base)) {
time_frame += INT64_C(1000000);
}
break;
}
if (s1->flags & AVFMT_FLAG_NONBLOCK) {
return AVERROR(EAGAIN);
} else {
av_usleep(delay);
}
}
if (av_new_packet(pkt, file_size) < 0)
return AVERROR(ENOMEM);
pkt->pts = curtime;
/* Blit screen grab */
if (!BitBlt(dest_hdc, 0, 0,
clip_rect.right - clip_rect.left,
clip_rect.bottom - clip_rect.top,
source_hdc,
clip_rect.left, clip_rect.top, SRCCOPY | CAPTUREBLT)) {
WIN32_API_ERROR("Failed to capture image");
return AVERROR(EIO);
}
if (gdigrab->draw_mouse)
paint_mouse_pointer(s1, gdigrab);
/* Copy bits to packet data */
bfh.bfType = 0x4d42; /* "BM" in little-endian */
bfh.bfSize = file_size;
bfh.bfReserved1 = 0;
bfh.bfReserved2 = 0;
bfh.bfOffBits = gdigrab->header_size;
memcpy(pkt->data, &bfh, sizeof(bfh));
memcpy(pkt->data + sizeof(bfh), &gdigrab->bmi.bmiHeader, sizeof(gdigrab->bmi.bmiHeader));
if (gdigrab->bmi.bmiHeader.biBitCount <= 8)
GetDIBColorTable(dest_hdc, 0, 1 << gdigrab->bmi.bmiHeader.biBitCount,
(RGBQUAD *) (pkt->data + sizeof(bfh) + sizeof(gdigrab->bmi.bmiHeader)));
memcpy(pkt->data + gdigrab->header_size, gdigrab->buffer, gdigrab->frame_size);
gdigrab->time_frame = time_frame;
return gdigrab->header_size + gdigrab->frame_size;
}
/**
* Closes gdi frame grabber (public device demuxer API).
*
* @param s1 Context from avformat core
* @return 0 success, !0 failure
*/
static int gdigrab_read_close(AVFormatContext *s1)
{
struct gdigrab *s = s1->priv_data;
if (s->show_region)
gdigrab_region_wnd_destroy(s1, s);
if (s->source_hdc)
ReleaseDC(s->hwnd, s->source_hdc);
if (s->dest_hdc)
DeleteDC(s->dest_hdc);
if (s->hbmp)
DeleteObject(s->hbmp);
if (s->source_hdc)
DeleteDC(s->source_hdc);
return 0;
}
#define OFFSET(x) offsetof(struct gdigrab, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "draw_mouse", "draw the mouse pointer", OFFSET(draw_mouse), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, DEC },
{ "show_region", "draw border around capture area", OFFSET(show_region), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, DEC },
{ "framerate", "set video frame rate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "ntsc"}, 0, 0, DEC },
{ "video_size", "set video frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },
{ "offset_x", "capture area x offset", OFFSET(offset_x), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC },
{ "offset_y", "capture area y offset", OFFSET(offset_y), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC },
{ NULL },
};
static const AVClass gdigrab_class = {
.class_name = "GDIgrab indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
};
/** gdi grabber device demuxer declaration */
AVInputFormat ff_gdigrab_demuxer = {
.name = "gdigrab",
.long_name = NULL_IF_CONFIG_SMALL("GDI API Windows frame grabber"),
.priv_data_size = sizeof(struct gdigrab),
.read_header = gdigrab_read_header,
.read_packet = gdigrab_read_packet,
.read_close = gdigrab_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &gdigrab_class,
};

View File

@@ -0,0 +1,505 @@
/*
* Copyright (c) 2012 Georg Lippitsch <georg.lippitsch@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* libiec61883 interface
*/
#include <sys/poll.h>
#include <libraw1394/raw1394.h>
#include <libavc1394/avc1394.h>
#include <libavc1394/rom1394.h>
#include <libiec61883/iec61883.h>
#include "libavformat/dv.h"
#include "libavformat/mpegts.h"
#include "libavutil/opt.h"
#include "avdevice.h"
#define THREADS HAVE_PTHREADS
#if THREADS
#include <pthread.h>
#endif
#define MOTDCT_SPEC_ID 0x00005068
#define IEC61883_AUTO 0
#define IEC61883_DV 1
#define IEC61883_HDV 2
/**
* For DV, one packet corresponds exactly to one frame.
* For HDV, these are MPEG2 transport stream packets.
* The queue is implemented as linked list.
*/
typedef struct DVPacket {
uint8_t *buf; ///< actual buffer data
int len; ///< size of buffer allocated
struct DVPacket *next; ///< next DVPacket
} DVPacket;
struct iec61883_data {
AVClass *class;
raw1394handle_t raw1394; ///< handle for libraw1394
iec61883_dv_fb_t iec61883_dv; ///< handle for libiec61883 when used with DV
iec61883_mpeg2_t iec61883_mpeg2; ///< handle for libiec61883 when used with HDV
DVDemuxContext *dv_demux; ///< generic DV muxing/demuxing context
MpegTSContext *mpeg_demux; ///< generic HDV muxing/demuxing context
DVPacket *queue_first; ///< first element of packet queue
DVPacket *queue_last; ///< last element of packet queue
char *device_guid; ///< to select one of multiple DV devices
int packets; ///< Number of packets queued
int max_packets; ///< Max. number of packets in queue
int bandwidth; ///< returned by libiec61883
int channel; ///< returned by libiec61883
int input_port; ///< returned by libiec61883
int type; ///< Stream type, to distinguish DV/HDV
int node; ///< returned by libiec61883
int output_port; ///< returned by libiec61883
int thread_loop; ///< Condition for thread while-loop
int receiving; ///< True as soon data from device available
int receive_error; ///< Set in receive task in case of error
int eof; ///< True as soon as no more data available
struct pollfd raw1394_poll; ///< to poll for new data from libraw1394
/** Parse function for DV/HDV differs, so this is set before packets arrive */
int (*parse_queue)(struct iec61883_data *dv, AVPacket *pkt);
#if THREADS
pthread_t receive_task_thread;
pthread_mutex_t mutex;
pthread_cond_t cond;
#endif
};
static int iec61883_callback(unsigned char *data, int length,
int complete, void *callback_data)
{
struct iec61883_data *dv = callback_data;
DVPacket *packet;
int ret;
#if THREADS
pthread_mutex_lock(&dv->mutex);
#endif
if (dv->packets >= dv->max_packets) {
av_log(NULL, AV_LOG_ERROR, "DV packet queue overrun, dropping.\n");
ret = 0;
goto exit;
}
packet = av_mallocz(sizeof(*packet));
if (!packet) {
ret = -1;
goto exit;
}
packet->buf = av_malloc(length);
if (!packet->buf) {
ret = -1;
goto exit;
}
packet->len = length;
memcpy(packet->buf, data, length);
if (dv->queue_first) {
dv->queue_last->next = packet;
dv->queue_last = packet;
} else {
dv->queue_first = packet;
dv->queue_last = packet;
}
dv->packets++;
ret = 0;
exit:
#if THREADS
pthread_cond_broadcast(&dv->cond);
pthread_mutex_unlock(&dv->mutex);
#endif
return ret;
}
static void *iec61883_receive_task(void *opaque)
{
struct iec61883_data *dv = (struct iec61883_data *)opaque;
int result;
#if THREADS
while (dv->thread_loop)
#endif
{
while ((result = poll(&dv->raw1394_poll, 1, 200)) < 0) {
if (!(errno == EAGAIN || errno == EINTR)) {
av_log(NULL, AV_LOG_ERROR, "Raw1394 poll error occurred.\n");
dv->receive_error = AVERROR(EIO);
return NULL;
}
}
if (result > 0 && ((dv->raw1394_poll.revents & POLLIN)
|| (dv->raw1394_poll.revents & POLLPRI))) {
dv->receiving = 1;
raw1394_loop_iterate(dv->raw1394);
} else if (dv->receiving) {
av_log(NULL, AV_LOG_ERROR, "No more input data available\n");
#if THREADS
pthread_mutex_lock(&dv->mutex);
dv->eof = 1;
pthread_cond_broadcast(&dv->cond);
pthread_mutex_unlock(&dv->mutex);
#else
dv->eof = 1;
#endif
return NULL;
}
}
return NULL;
}
static int iec61883_parse_queue_dv(struct iec61883_data *dv, AVPacket *pkt)
{
DVPacket *packet;
int size;
size = avpriv_dv_get_packet(dv->dv_demux, pkt);
if (size > 0)
return size;
packet = dv->queue_first;
if (!packet)
return -1;
size = avpriv_dv_produce_packet(dv->dv_demux, pkt,
packet->buf, packet->len, -1);
#if FF_API_DESTRUCT_PACKET
FF_DISABLE_DEPRECATION_WARNINGS
pkt->destruct = av_destruct_packet;
FF_ENABLE_DEPRECATION_WARNINGS
#endif
dv->queue_first = packet->next;
av_free(packet);
dv->packets--;
if (size > 0)
return size;
return -1;
}
static int iec61883_parse_queue_hdv(struct iec61883_data *dv, AVPacket *pkt)
{
DVPacket *packet;
int size;
while (dv->queue_first) {
packet = dv->queue_first;
size = avpriv_mpegts_parse_packet(dv->mpeg_demux, pkt, packet->buf,
packet->len);
dv->queue_first = packet->next;
av_freep(&packet->buf);
av_freep(&packet);
dv->packets--;
if (size > 0)
return size;
}
return -1;
}
static int iec61883_read_header(AVFormatContext *context)
{
struct iec61883_data *dv = context->priv_data;
struct raw1394_portinfo pinf[16];
rom1394_directory rom_dir;
char *endptr;
int inport;
int nb_ports;
int port = -1;
int response;
int i, j = 0;
uint64_t guid = 0;
dv->input_port = -1;
dv->output_port = -1;
dv->channel = -1;
dv->raw1394 = raw1394_new_handle();
if (!dv->raw1394) {
av_log(context, AV_LOG_ERROR, "Failed to open IEEE1394 interface.\n");
return AVERROR(EIO);
}
if ((nb_ports = raw1394_get_port_info(dv->raw1394, pinf, 16)) < 0) {
av_log(context, AV_LOG_ERROR, "Failed to get number of IEEE1394 ports.\n");
goto fail;
}
inport = strtol(context->filename, &endptr, 10);
if (endptr != context->filename && *endptr == '\0') {
av_log(context, AV_LOG_INFO, "Selecting IEEE1394 port: %d\n", inport);
j = inport;
nb_ports = inport + 1;
} else if (strcmp(context->filename, "auto")) {
av_log(context, AV_LOG_ERROR, "Invalid input \"%s\", you should specify "
"\"auto\" for auto-detection, or the port number.\n", context->filename);
goto fail;
}
if (dv->device_guid) {
if (sscanf(dv->device_guid, "%"SCNu64, &guid) != 1) {
av_log(context, AV_LOG_INFO, "Invalid dvguid parameter: %s\n",
dv->device_guid);
goto fail;
}
}
for (; j < nb_ports && port==-1; ++j) {
raw1394_destroy_handle(dv->raw1394);
if (!(dv->raw1394 = raw1394_new_handle_on_port(j))) {
av_log(context, AV_LOG_ERROR, "Failed setting IEEE1394 port.\n");
goto fail;
}
for (i=0; i<raw1394_get_nodecount(dv->raw1394); ++i) {
/* Select device explicitly by GUID */
if (guid > 1) {
if (guid == rom1394_get_guid(dv->raw1394, i)) {
dv->node = i;
port = j;
break;
}
} else {
/* Select first AV/C tape recorder player node */
if (rom1394_get_directory(dv->raw1394, i, &rom_dir) < 0)
continue;
if (((rom1394_get_node_type(&rom_dir) == ROM1394_NODE_TYPE_AVC) &&
avc1394_check_subunit_type(dv->raw1394, i, AVC1394_SUBUNIT_TYPE_VCR)) ||
(rom_dir.unit_spec_id == MOTDCT_SPEC_ID)) {
rom1394_free_directory(&rom_dir);
dv->node = i;
port = j;
break;
}
rom1394_free_directory(&rom_dir);
}
}
}
if (port == -1) {
av_log(context, AV_LOG_ERROR, "No AV/C devices found.\n");
goto fail;
}
/* Provide bus sanity for multiple connections */
iec61883_cmp_normalize_output(dv->raw1394, 0xffc0 | dv->node);
/* Find out if device is DV or HDV */
if (dv->type == IEC61883_AUTO) {
response = avc1394_transaction(dv->raw1394, dv->node,
AVC1394_CTYPE_STATUS |
AVC1394_SUBUNIT_TYPE_TAPE_RECORDER |
AVC1394_SUBUNIT_ID_0 |
AVC1394_VCR_COMMAND_OUTPUT_SIGNAL_MODE |
0xFF, 2);
response = AVC1394_GET_OPERAND0(response);
dv->type = (response == 0x10 || response == 0x90 || response == 0x1A || response == 0x9A) ?
IEC61883_HDV : IEC61883_DV;
}
/* Connect to device, and do initialization */
dv->channel = iec61883_cmp_connect(dv->raw1394, dv->node, &dv->output_port,
raw1394_get_local_id(dv->raw1394),
&dv->input_port, &dv->bandwidth);
if (dv->channel < 0)
dv->channel = 63;
if (!dv->max_packets)
dv->max_packets = 100;
if (CONFIG_MPEGTS_DEMUXER && dv->type == IEC61883_HDV) {
/* Init HDV receive */
avformat_new_stream(context, NULL);
dv->mpeg_demux = avpriv_mpegts_parse_open(context);
if (!dv->mpeg_demux)
goto fail;
dv->parse_queue = iec61883_parse_queue_hdv;
dv->iec61883_mpeg2 = iec61883_mpeg2_recv_init(dv->raw1394,
(iec61883_mpeg2_recv_t)iec61883_callback,
dv);
dv->max_packets *= 766;
} else {
/* Init DV receive */
dv->dv_demux = avpriv_dv_init_demux(context);
if (!dv->dv_demux)
goto fail;
dv->parse_queue = iec61883_parse_queue_dv;
dv->iec61883_dv = iec61883_dv_fb_init(dv->raw1394, iec61883_callback, dv);
}
dv->raw1394_poll.fd = raw1394_get_fd(dv->raw1394);
dv->raw1394_poll.events = POLLIN | POLLERR | POLLHUP | POLLPRI;
/* Actually start receiving */
if (dv->type == IEC61883_HDV)
iec61883_mpeg2_recv_start(dv->iec61883_mpeg2, dv->channel);
else
iec61883_dv_fb_start(dv->iec61883_dv, dv->channel);
#if THREADS
dv->thread_loop = 1;
if (pthread_mutex_init(&dv->mutex, NULL))
goto fail;
if (pthread_cond_init(&dv->cond, NULL))
goto fail;
if (pthread_create(&dv->receive_task_thread, NULL, iec61883_receive_task, dv))
goto fail;
#endif
return 0;
fail:
raw1394_destroy_handle(dv->raw1394);
return AVERROR(EIO);
}
static int iec61883_read_packet(AVFormatContext *context, AVPacket *pkt)
{
struct iec61883_data *dv = context->priv_data;
int size;
/**
* Try to parse frames from queue
*/
#if THREADS
pthread_mutex_lock(&dv->mutex);
while ((size = dv->parse_queue(dv, pkt)) == -1)
if (!dv->eof)
pthread_cond_wait(&dv->cond, &dv->mutex);
else
break;
pthread_mutex_unlock(&dv->mutex);
#else
int result;
while ((size = dv->parse_queue(dv, pkt)) == -1) {
iec61883_receive_task((void *)dv);
if (dv->receive_error)
return dv->receive_error;
}
#endif
return size;
}
static int iec61883_close(AVFormatContext *context)
{
struct iec61883_data *dv = context->priv_data;
#if THREADS
dv->thread_loop = 0;
pthread_join(dv->receive_task_thread, NULL);
pthread_cond_destroy(&dv->cond);
pthread_mutex_destroy(&dv->mutex);
#endif
if (CONFIG_MPEGTS_DEMUXER && dv->type == IEC61883_HDV) {
iec61883_mpeg2_recv_stop(dv->iec61883_mpeg2);
iec61883_mpeg2_close(dv->iec61883_mpeg2);
avpriv_mpegts_parse_close(dv->mpeg_demux);
} else {
iec61883_dv_fb_stop(dv->iec61883_dv);
iec61883_dv_fb_close(dv->iec61883_dv);
}
while (dv->queue_first) {
DVPacket *packet = dv->queue_first;
dv->queue_first = packet->next;
av_freep(&packet->buf);
av_freep(&packet);
}
iec61883_cmp_disconnect(dv->raw1394, dv->node, dv->output_port,
raw1394_get_local_id(dv->raw1394),
dv->input_port, dv->channel, dv->bandwidth);
raw1394_destroy_handle(dv->raw1394);
return 0;
}
static const AVOption options[] = {
{ "dvtype", "override autodetection of DV/HDV", offsetof(struct iec61883_data, type), AV_OPT_TYPE_INT, {.i64 = IEC61883_AUTO}, IEC61883_AUTO, IEC61883_HDV, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
{ "auto", "auto detect DV/HDV", 0, AV_OPT_TYPE_CONST, {.i64 = IEC61883_AUTO}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
{ "dv", "force device being treated as DV device", 0, AV_OPT_TYPE_CONST, {.i64 = IEC61883_DV}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
{ "hdv" , "force device being treated as HDV device", 0, AV_OPT_TYPE_CONST, {.i64 = IEC61883_HDV}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
{ "dvbuffer", "set queue buffer size (in packets)", offsetof(struct iec61883_data, max_packets), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "dvguid", "select one of multiple DV devices by its GUID", offsetof(struct iec61883_data, device_guid), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass iec61883_class = {
.class_name = "iec61883 indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_iec61883_demuxer = {
.name = "iec61883",
.long_name = NULL_IF_CONFIG_SMALL("libiec61883 (new DV1394) A/V input device"),
.priv_data_size = sizeof(struct iec61883_data),
.read_header = iec61883_read_header,
.read_packet = iec61883_read_packet,
.read_close = iec61883_close,
.flags = AVFMT_NOFILE,
.priv_class = &iec61883_class,
};

View File

@@ -0,0 +1,27 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_INTERNAL_H
#define AVDEVICE_INTERNAL_H
#include "libavformat/avformat.h"
int ff_alloc_input_device_context(struct AVFormatContext **avctx, struct AVInputFormat *iformat,
const char *format);
#endif

View File

@@ -0,0 +1,358 @@
/*
* JACK Audio Connection Kit input device
* Copyright (c) 2009 Samalyse
* Author: Olivier Guilyardi <olivier samalyse com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <semaphore.h>
#include <jack/jack.h>
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/fifo.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "timefilter.h"
#include "avdevice.h"
/**
* Size of the internal FIFO buffers as a number of audio packets
*/
#define FIFO_PACKETS_NUM 16
typedef struct JackData {
AVClass *class;
jack_client_t * client;
int activated;
sem_t packet_count;
jack_nframes_t sample_rate;
jack_nframes_t buffer_size;
jack_port_t ** ports;
int nports;
TimeFilter * timefilter;
AVFifoBuffer * new_pkts;
AVFifoBuffer * filled_pkts;
int pkt_xrun;
int jack_xrun;
} JackData;
static int process_callback(jack_nframes_t nframes, void *arg)
{
/* Warning: this function runs in realtime. One mustn't allocate memory here
* or do any other thing that could block. */
int i, j;
JackData *self = arg;
float * buffer;
jack_nframes_t latency, cycle_delay;
AVPacket pkt;
float *pkt_data;
double cycle_time;
if (!self->client)
return 0;
/* The approximate delay since the hardware interrupt as a number of frames */
cycle_delay = jack_frames_since_cycle_start(self->client);
/* Retrieve filtered cycle time */
cycle_time = ff_timefilter_update(self->timefilter,
av_gettime() / 1000000.0 - (double) cycle_delay / self->sample_rate,
self->buffer_size);
/* Check if an empty packet is available, and if there's enough space to send it back once filled */
if ((av_fifo_size(self->new_pkts) < sizeof(pkt)) || (av_fifo_space(self->filled_pkts) < sizeof(pkt))) {
self->pkt_xrun = 1;
return 0;
}
/* Retrieve empty (but allocated) packet */
av_fifo_generic_read(self->new_pkts, &pkt, sizeof(pkt), NULL);
pkt_data = (float *) pkt.data;
latency = 0;
/* Copy and interleave audio data from the JACK buffer into the packet */
for (i = 0; i < self->nports; i++) {
#if HAVE_JACK_PORT_GET_LATENCY_RANGE
jack_latency_range_t range;
jack_port_get_latency_range(self->ports[i], JackCaptureLatency, &range);
latency += range.max;
#else
latency += jack_port_get_total_latency(self->client, self->ports[i]);
#endif
buffer = jack_port_get_buffer(self->ports[i], self->buffer_size);
for (j = 0; j < self->buffer_size; j++)
pkt_data[j * self->nports + i] = buffer[j];
}
/* Timestamp the packet with the cycle start time minus the average latency */
pkt.pts = (cycle_time - (double) latency / (self->nports * self->sample_rate)) * 1000000.0;
/* Send the now filled packet back, and increase packet counter */
av_fifo_generic_write(self->filled_pkts, &pkt, sizeof(pkt), NULL);
sem_post(&self->packet_count);
return 0;
}
static void shutdown_callback(void *arg)
{
JackData *self = arg;
self->client = NULL;
}
static int xrun_callback(void *arg)
{
JackData *self = arg;
self->jack_xrun = 1;
ff_timefilter_reset(self->timefilter);
return 0;
}
static int supply_new_packets(JackData *self, AVFormatContext *context)
{
AVPacket pkt;
int test, pkt_size = self->buffer_size * self->nports * sizeof(float);
/* Supply the process callback with new empty packets, by filling the new
* packets FIFO buffer with as many packets as possible. process_callback()
* can't do this by itself, because it can't allocate memory in realtime. */
while (av_fifo_space(self->new_pkts) >= sizeof(pkt)) {
if ((test = av_new_packet(&pkt, pkt_size)) < 0) {
av_log(context, AV_LOG_ERROR, "Could not create packet of size %d\n", pkt_size);
return test;
}
av_fifo_generic_write(self->new_pkts, &pkt, sizeof(pkt), NULL);
}
return 0;
}
static int start_jack(AVFormatContext *context)
{
JackData *self = context->priv_data;
jack_status_t status;
int i, test;
/* Register as a JACK client, using the context filename as client name. */
self->client = jack_client_open(context->filename, JackNullOption, &status);
if (!self->client) {
av_log(context, AV_LOG_ERROR, "Unable to register as a JACK client\n");
return AVERROR(EIO);
}
sem_init(&self->packet_count, 0, 0);
self->sample_rate = jack_get_sample_rate(self->client);
self->ports = av_malloc_array(self->nports, sizeof(*self->ports));
if (!self->ports)
return AVERROR(ENOMEM);
self->buffer_size = jack_get_buffer_size(self->client);
/* Register JACK ports */
for (i = 0; i < self->nports; i++) {
char str[16];
snprintf(str, sizeof(str), "input_%d", i + 1);
self->ports[i] = jack_port_register(self->client, str,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsInput, 0);
if (!self->ports[i]) {
av_log(context, AV_LOG_ERROR, "Unable to register port %s:%s\n",
context->filename, str);
jack_client_close(self->client);
return AVERROR(EIO);
}
}
/* Register JACK callbacks */
jack_set_process_callback(self->client, process_callback, self);
jack_on_shutdown(self->client, shutdown_callback, self);
jack_set_xrun_callback(self->client, xrun_callback, self);
/* Create time filter */
self->timefilter = ff_timefilter_new (1.0 / self->sample_rate, self->buffer_size, 1.5);
if (!self->timefilter) {
jack_client_close(self->client);
return AVERROR(ENOMEM);
}
/* Create FIFO buffers */
self->filled_pkts = av_fifo_alloc_array(FIFO_PACKETS_NUM, sizeof(AVPacket));
/* New packets FIFO with one extra packet for safety against underruns */
self->new_pkts = av_fifo_alloc_array((FIFO_PACKETS_NUM + 1), sizeof(AVPacket));
if (!self->new_pkts) {
jack_client_close(self->client);
return AVERROR(ENOMEM);
}
if ((test = supply_new_packets(self, context))) {
jack_client_close(self->client);
return test;
}
return 0;
}
static void free_pkt_fifo(AVFifoBuffer **fifo)
{
AVPacket pkt;
while (av_fifo_size(*fifo)) {
av_fifo_generic_read(*fifo, &pkt, sizeof(pkt), NULL);
av_free_packet(&pkt);
}
av_fifo_freep(fifo);
}
static void stop_jack(JackData *self)
{
if (self->client) {
if (self->activated)
jack_deactivate(self->client);
jack_client_close(self->client);
}
sem_destroy(&self->packet_count);
free_pkt_fifo(&self->new_pkts);
free_pkt_fifo(&self->filled_pkts);
av_freep(&self->ports);
ff_timefilter_destroy(self->timefilter);
}
static int audio_read_header(AVFormatContext *context)
{
JackData *self = context->priv_data;
AVStream *stream;
int test;
if ((test = start_jack(context)))
return test;
stream = avformat_new_stream(context, NULL);
if (!stream) {
stop_jack(self);
return AVERROR(ENOMEM);
}
stream->codec->codec_type = AVMEDIA_TYPE_AUDIO;
#if HAVE_BIGENDIAN
stream->codec->codec_id = AV_CODEC_ID_PCM_F32BE;
#else
stream->codec->codec_id = AV_CODEC_ID_PCM_F32LE;
#endif
stream->codec->sample_rate = self->sample_rate;
stream->codec->channels = self->nports;
avpriv_set_pts_info(stream, 64, 1, 1000000); /* 64 bits pts in us */
return 0;
}
static int audio_read_packet(AVFormatContext *context, AVPacket *pkt)
{
JackData *self = context->priv_data;
struct timespec timeout = {0, 0};
int test;
/* Activate the JACK client on first packet read. Activating the JACK client
* means that process_callback() starts to get called at regular interval.
* If we activate it in audio_read_header(), we're actually reading audio data
* from the device before instructed to, and that may result in an overrun. */
if (!self->activated) {
if (!jack_activate(self->client)) {
self->activated = 1;
av_log(context, AV_LOG_INFO,
"JACK client registered and activated (rate=%dHz, buffer_size=%d frames)\n",
self->sample_rate, self->buffer_size);
} else {
av_log(context, AV_LOG_ERROR, "Unable to activate JACK client\n");
return AVERROR(EIO);
}
}
/* Wait for a packet coming back from process_callback(), if one isn't available yet */
timeout.tv_sec = av_gettime() / 1000000 + 2;
if (sem_timedwait(&self->packet_count, &timeout)) {
if (errno == ETIMEDOUT) {
av_log(context, AV_LOG_ERROR,
"Input error: timed out when waiting for JACK process callback output\n");
} else {
char errbuf[128];
int ret = AVERROR(errno);
av_strerror(ret, errbuf, sizeof(errbuf));
av_log(context, AV_LOG_ERROR, "Error while waiting for audio packet: %s\n",
errbuf);
}
if (!self->client)
av_log(context, AV_LOG_ERROR, "Input error: JACK server is gone\n");
return AVERROR(EIO);
}
if (self->pkt_xrun) {
av_log(context, AV_LOG_WARNING, "Audio packet xrun\n");
self->pkt_xrun = 0;
}
if (self->jack_xrun) {
av_log(context, AV_LOG_WARNING, "JACK xrun\n");
self->jack_xrun = 0;
}
/* Retrieve the packet filled with audio data by process_callback() */
av_fifo_generic_read(self->filled_pkts, pkt, sizeof(*pkt), NULL);
if ((test = supply_new_packets(self, context)))
return test;
return 0;
}
static int audio_read_close(AVFormatContext *context)
{
JackData *self = context->priv_data;
stop_jack(self);
return 0;
}
#define OFFSET(x) offsetof(JackData, x)
static const AVOption options[] = {
{ "channels", "Number of audio channels.", OFFSET(nports), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass jack_indev_class = {
.class_name = "JACK indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_jack_demuxer = {
.name = "jack",
.long_name = NULL_IF_CONFIG_SMALL("JACK Audio Connection Kit"),
.priv_data_size = sizeof(JackData),
.read_header = audio_read_header,
.read_packet = audio_read_packet,
.read_close = audio_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &jack_indev_class,
};

View File

@@ -0,0 +1,515 @@
/*
* Copyright (c) 2011 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* libavfilter virtual input device
*/
/* #define DEBUG */
#include <float.h> /* DBL_MIN, DBL_MAX */
#include "libavutil/bprint.h"
#include "libavutil/channel_layout.h"
#include "libavutil/file.h"
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/avfiltergraph.h"
#include "libavfilter/buffersink.h"
#include "libavformat/internal.h"
#include "avdevice.h"
typedef struct {
AVClass *class; ///< class for private options
char *graph_str;
char *graph_filename;
char *dump_graph;
AVFilterGraph *graph;
AVFilterContext **sinks;
int *sink_stream_map;
int *sink_eof;
int *stream_sink_map;
int *sink_stream_subcc_map;
AVFrame *decoded_frame;
int nb_sinks;
AVPacket subcc_packet;
} LavfiContext;
static int *create_all_formats(int n)
{
int i, j, *fmts, count = 0;
for (i = 0; i < n; i++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
count++;
}
if (!(fmts = av_malloc((count+1) * sizeof(int))))
return NULL;
for (j = 0, i = 0; i < n; i++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
fmts[j++] = i;
}
fmts[j] = -1;
return fmts;
}
av_cold static int lavfi_read_close(AVFormatContext *avctx)
{
LavfiContext *lavfi = avctx->priv_data;
av_freep(&lavfi->sink_stream_map);
av_freep(&lavfi->sink_eof);
av_freep(&lavfi->stream_sink_map);
av_freep(&lavfi->sink_stream_subcc_map);
av_freep(&lavfi->sinks);
avfilter_graph_free(&lavfi->graph);
av_frame_free(&lavfi->decoded_frame);
return 0;
}
static int create_subcc_streams(AVFormatContext *avctx)
{
LavfiContext *lavfi = avctx->priv_data;
AVStream *st;
int stream_idx, sink_idx;
for (stream_idx = 0; stream_idx < lavfi->nb_sinks; stream_idx++) {
sink_idx = lavfi->stream_sink_map[stream_idx];
if (lavfi->sink_stream_subcc_map[sink_idx]) {
lavfi->sink_stream_subcc_map[sink_idx] = avctx->nb_streams;
if (!(st = avformat_new_stream(avctx, NULL)))
return AVERROR(ENOMEM);
st->codec->codec_id = AV_CODEC_ID_EIA_608;
st->codec->codec_type = AVMEDIA_TYPE_SUBTITLE;
} else {
lavfi->sink_stream_subcc_map[sink_idx] = -1;
}
}
return 0;
}
av_cold static int lavfi_read_header(AVFormatContext *avctx)
{
LavfiContext *lavfi = avctx->priv_data;
AVFilterInOut *input_links = NULL, *output_links = NULL, *inout;
AVFilter *buffersink, *abuffersink;
int *pix_fmts = create_all_formats(AV_PIX_FMT_NB);
enum AVMediaType type;
int ret = 0, i, n;
#define FAIL(ERR) { ret = ERR; goto end; }
if (!pix_fmts)
FAIL(AVERROR(ENOMEM));
avfilter_register_all();
buffersink = avfilter_get_by_name("buffersink");
abuffersink = avfilter_get_by_name("abuffersink");
if (lavfi->graph_filename && lavfi->graph_str) {
av_log(avctx, AV_LOG_ERROR,
"Only one of the graph or graph_file options must be specified\n");
FAIL(AVERROR(EINVAL));
}
if (lavfi->graph_filename) {
AVBPrint graph_file_pb;
AVIOContext *avio = NULL;
ret = avio_open(&avio, lavfi->graph_filename, AVIO_FLAG_READ);
if (ret < 0)
goto end;
av_bprint_init(&graph_file_pb, 0, AV_BPRINT_SIZE_UNLIMITED);
ret = avio_read_to_bprint(avio, &graph_file_pb, INT_MAX);
avio_closep(&avio);
av_bprint_chars(&graph_file_pb, '\0', 1);
if (!ret && !av_bprint_is_complete(&graph_file_pb))
ret = AVERROR(ENOMEM);
if (ret) {
av_bprint_finalize(&graph_file_pb, NULL);
goto end;
}
if ((ret = av_bprint_finalize(&graph_file_pb, &lavfi->graph_str)))
goto end;
}
if (!lavfi->graph_str)
lavfi->graph_str = av_strdup(avctx->filename);
/* parse the graph, create a stream for each open output */
if (!(lavfi->graph = avfilter_graph_alloc()))
FAIL(AVERROR(ENOMEM));
if ((ret = avfilter_graph_parse_ptr(lavfi->graph, lavfi->graph_str,
&input_links, &output_links, avctx)) < 0)
goto end;
if (input_links) {
av_log(avctx, AV_LOG_ERROR,
"Open inputs in the filtergraph are not acceptable\n");
FAIL(AVERROR(EINVAL));
}
/* count the outputs */
for (n = 0, inout = output_links; inout; n++, inout = inout->next);
lavfi->nb_sinks = n;
if (!(lavfi->sink_stream_map = av_malloc(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
if (!(lavfi->sink_eof = av_mallocz(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
if (!(lavfi->stream_sink_map = av_malloc(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
if (!(lavfi->sink_stream_subcc_map = av_malloc(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
for (i = 0; i < n; i++)
lavfi->stream_sink_map[i] = -1;
/* parse the output link names - they need to be of the form out0, out1, ...
* create a mapping between them and the streams */
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
int stream_idx = 0, suffix = 0, use_subcc = 0;
sscanf(inout->name, "out%n%d%n", &suffix, &stream_idx, &suffix);
if (!suffix) {
av_log(avctx, AV_LOG_ERROR,
"Invalid outpad name '%s'\n", inout->name);
FAIL(AVERROR(EINVAL));
}
if (inout->name[suffix]) {
if (!strcmp(inout->name + suffix, "+subcc")) {
use_subcc = 1;
} else {
av_log(avctx, AV_LOG_ERROR,
"Invalid outpad suffix '%s'\n", inout->name);
FAIL(AVERROR(EINVAL));
}
}
if ((unsigned)stream_idx >= n) {
av_log(avctx, AV_LOG_ERROR,
"Invalid index was specified in output '%s', "
"must be a non-negative value < %d\n",
inout->name, n);
FAIL(AVERROR(EINVAL));
}
if (lavfi->stream_sink_map[stream_idx] != -1) {
av_log(avctx, AV_LOG_ERROR,
"An output with stream index %d was already specified\n",
stream_idx);
FAIL(AVERROR(EINVAL));
}
lavfi->sink_stream_map[i] = stream_idx;
lavfi->stream_sink_map[stream_idx] = i;
lavfi->sink_stream_subcc_map[i] = !!use_subcc;
}
/* for each open output create a corresponding stream */
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
AVStream *st;
if (!(st = avformat_new_stream(avctx, NULL)))
FAIL(AVERROR(ENOMEM));
st->id = i;
}
/* create a sink for each output and connect them to the graph */
lavfi->sinks = av_malloc_array(lavfi->nb_sinks, sizeof(AVFilterContext *));
if (!lavfi->sinks)
FAIL(AVERROR(ENOMEM));
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
AVFilterContext *sink;
type = avfilter_pad_get_type(inout->filter_ctx->output_pads, inout->pad_idx);
if (type == AVMEDIA_TYPE_VIDEO && ! buffersink ||
type == AVMEDIA_TYPE_AUDIO && ! abuffersink) {
av_log(avctx, AV_LOG_ERROR, "Missing required buffersink filter, aborting.\n");
FAIL(AVERROR_FILTER_NOT_FOUND);
}
if (type == AVMEDIA_TYPE_VIDEO) {
ret = avfilter_graph_create_filter(&sink, buffersink,
inout->name, NULL,
NULL, lavfi->graph);
if (ret >= 0)
ret = av_opt_set_int_list(sink, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
goto end;
} else if (type == AVMEDIA_TYPE_AUDIO) {
enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_DBL, -1 };
ret = avfilter_graph_create_filter(&sink, abuffersink,
inout->name, NULL,
NULL, lavfi->graph);
if (ret >= 0)
ret = av_opt_set_int_list(sink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
goto end;
ret = av_opt_set_int(sink, "all_channel_counts", 1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
goto end;
} else {
av_log(avctx, AV_LOG_ERROR,
"Output '%s' is not a video or audio output, not yet supported\n", inout->name);
FAIL(AVERROR(EINVAL));
}
lavfi->sinks[i] = sink;
if ((ret = avfilter_link(inout->filter_ctx, inout->pad_idx, sink, 0)) < 0)
goto end;
}
/* configure the graph */
if ((ret = avfilter_graph_config(lavfi->graph, avctx)) < 0)
goto end;
if (lavfi->dump_graph) {
char *dump = avfilter_graph_dump(lavfi->graph, lavfi->dump_graph);
fputs(dump, stderr);
fflush(stderr);
av_free(dump);
}
/* fill each stream with the information in the corresponding sink */
for (i = 0; i < lavfi->nb_sinks; i++) {
AVFilterLink *link = lavfi->sinks[lavfi->stream_sink_map[i]]->inputs[0];
AVStream *st = avctx->streams[i];
st->codec->codec_type = link->type;
avpriv_set_pts_info(st, 64, link->time_base.num, link->time_base.den);
if (link->type == AVMEDIA_TYPE_VIDEO) {
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->pix_fmt = link->format;
st->codec->time_base = link->time_base;
st->codec->width = link->w;
st->codec->height = link->h;
st ->sample_aspect_ratio =
st->codec->sample_aspect_ratio = link->sample_aspect_ratio;
avctx->probesize = FFMAX(avctx->probesize,
link->w * link->h *
av_get_padded_bits_per_pixel(av_pix_fmt_desc_get(link->format)) *
30);
} else if (link->type == AVMEDIA_TYPE_AUDIO) {
st->codec->codec_id = av_get_pcm_codec(link->format, -1);
st->codec->channels = avfilter_link_get_channels(link);
st->codec->sample_fmt = link->format;
st->codec->sample_rate = link->sample_rate;
st->codec->time_base = link->time_base;
st->codec->channel_layout = link->channel_layout;
if (st->codec->codec_id == AV_CODEC_ID_NONE)
av_log(avctx, AV_LOG_ERROR,
"Could not find PCM codec for sample format %s.\n",
av_get_sample_fmt_name(link->format));
}
}
if ((ret = create_subcc_streams(avctx)) < 0)
goto end;
if (!(lavfi->decoded_frame = av_frame_alloc()))
FAIL(AVERROR(ENOMEM));
end:
av_free(pix_fmts);
avfilter_inout_free(&input_links);
avfilter_inout_free(&output_links);
if (ret < 0)
lavfi_read_close(avctx);
return ret;
}
static int create_subcc_packet(AVFormatContext *avctx, AVFrame *frame,
int sink_idx)
{
LavfiContext *lavfi = avctx->priv_data;
AVFrameSideData *sd;
int stream_idx, i, ret;
if ((stream_idx = lavfi->sink_stream_subcc_map[sink_idx]) < 0)
return 0;
for (i = 0; i < frame->nb_side_data; i++)
if (frame->side_data[i]->type == AV_FRAME_DATA_A53_CC)
break;
if (i >= frame->nb_side_data)
return 0;
sd = frame->side_data[i];
if ((ret = av_new_packet(&lavfi->subcc_packet, sd->size)) < 0)
return ret;
memcpy(lavfi->subcc_packet.data, sd->data, sd->size);
lavfi->subcc_packet.stream_index = stream_idx;
lavfi->subcc_packet.pts = frame->pts;
lavfi->subcc_packet.pos = av_frame_get_pkt_pos(frame);
return 0;
}
static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
{
LavfiContext *lavfi = avctx->priv_data;
double min_pts = DBL_MAX;
int stream_idx, min_pts_sink_idx = 0;
AVFrame *frame = lavfi->decoded_frame;
AVPicture pict;
AVDictionary *frame_metadata;
int ret, i;
int size = 0;
if (lavfi->subcc_packet.size) {
*pkt = lavfi->subcc_packet;
av_init_packet(&lavfi->subcc_packet);
lavfi->subcc_packet.size = 0;
lavfi->subcc_packet.data = NULL;
return pkt->size;
}
/* iterate through all the graph sinks. Select the sink with the
* minimum PTS */
for (i = 0; i < lavfi->nb_sinks; i++) {
AVRational tb = lavfi->sinks[i]->inputs[0]->time_base;
double d;
int ret;
if (lavfi->sink_eof[i])
continue;
ret = av_buffersink_get_frame_flags(lavfi->sinks[i], frame,
AV_BUFFERSINK_FLAG_PEEK);
if (ret == AVERROR_EOF) {
ff_dlog(avctx, "EOF sink_idx:%d\n", i);
lavfi->sink_eof[i] = 1;
continue;
} else if (ret < 0)
return ret;
d = av_rescale_q_rnd(frame->pts, tb, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
ff_dlog(avctx, "sink_idx:%d time:%f\n", i, d);
av_frame_unref(frame);
if (d < min_pts) {
min_pts = d;
min_pts_sink_idx = i;
}
}
if (min_pts == DBL_MAX)
return AVERROR_EOF;
ff_dlog(avctx, "min_pts_sink_idx:%i\n", min_pts_sink_idx);
av_buffersink_get_frame_flags(lavfi->sinks[min_pts_sink_idx], frame, 0);
stream_idx = lavfi->sink_stream_map[min_pts_sink_idx];
if (frame->width /* FIXME best way of testing a video */) {
size = avpicture_get_size(frame->format, frame->width, frame->height);
if ((ret = av_new_packet(pkt, size)) < 0)
return ret;
memcpy(pict.data, frame->data, 4*sizeof(frame->data[0]));
memcpy(pict.linesize, frame->linesize, 4*sizeof(frame->linesize[0]));
avpicture_layout(&pict, frame->format, frame->width, frame->height,
pkt->data, size);
} else if (av_frame_get_channels(frame) /* FIXME test audio */) {
size = frame->nb_samples * av_get_bytes_per_sample(frame->format) *
av_frame_get_channels(frame);
if ((ret = av_new_packet(pkt, size)) < 0)
return ret;
memcpy(pkt->data, frame->data[0], size);
}
frame_metadata = av_frame_get_metadata(frame);
if (frame_metadata) {
uint8_t *metadata;
AVDictionaryEntry *e = NULL;
AVBPrint meta_buf;
av_bprint_init(&meta_buf, 0, AV_BPRINT_SIZE_UNLIMITED);
while ((e = av_dict_get(frame_metadata, "", e, AV_DICT_IGNORE_SUFFIX))) {
av_bprintf(&meta_buf, "%s", e->key);
av_bprint_chars(&meta_buf, '\0', 1);
av_bprintf(&meta_buf, "%s", e->value);
av_bprint_chars(&meta_buf, '\0', 1);
}
if (!av_bprint_is_complete(&meta_buf) ||
!(metadata = av_packet_new_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA,
meta_buf.len))) {
av_bprint_finalize(&meta_buf, NULL);
return AVERROR(ENOMEM);
}
memcpy(metadata, meta_buf.str, meta_buf.len);
av_bprint_finalize(&meta_buf, NULL);
}
if ((ret = create_subcc_packet(avctx, frame, min_pts_sink_idx)) < 0) {
av_frame_unref(frame);
av_packet_unref(pkt);
return ret;
}
pkt->stream_index = stream_idx;
pkt->pts = frame->pts;
pkt->pos = av_frame_get_pkt_pos(frame);
pkt->size = size;
av_frame_unref(frame);
return size;
}
#define OFFSET(x) offsetof(LavfiContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "graph", "set libavfilter graph", OFFSET(graph_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ "graph_file","set libavfilter graph filename", OFFSET(graph_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC},
{ "dumpgraph", "dump graph to stderr", OFFSET(dump_graph), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ NULL },
};
static const AVClass lavfi_class = {
.class_name = "lavfi indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_INPUT,
};
AVInputFormat ff_lavfi_demuxer = {
.name = "lavfi",
.long_name = NULL_IF_CONFIG_SMALL("Libavfilter virtual input device"),
.priv_data_size = sizeof(LavfiContext),
.read_header = lavfi_read_header,
.read_packet = lavfi_read_packet,
.read_close = lavfi_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &lavfi_class,
};

View File

@@ -0,0 +1,14 @@
prefix=/usr/local
exec_prefix=${prefix}
libdir=${prefix}/lib
includedir=${prefix}/include
Name: libavdevice
Description: FFmpeg device handling library
Version: 56.4.100
Requires:
Requires.private: libavformat >= 56.40.101, libavcodec >= 56.60.100, libswresample >= 1.2.101, libavutil >= 54.31.100
Conflicts:
Libs: -L${libdir} -lavdevice
Libs.private: -lm -lz -lpsapi -ladvapi32 -lshell32
Cflags: -I${includedir}

View File

@@ -0,0 +1,4 @@
LIBAVDEVICE_$MAJOR {
global: DllStartup; avdevice_*;
local: *;
};

View File

@@ -0,0 +1,4 @@
LIBAVDEVICE_56 {
global: DllStartup; avdevice_*;
local: *;
};

View File

@@ -0,0 +1,194 @@
/*
* Copyright (c) 2011 Anton Khirnov <anton@khirnov.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* libcdio CD grabbing
*/
#include "config.h"
#if HAVE_CDIO_PARANOIA_H
#include <cdio/cdda.h>
#include <cdio/paranoia.h>
#elif HAVE_CDIO_PARANOIA_PARANOIA_H
#include <cdio/paranoia/cdda.h>
#include <cdio/paranoia/paranoia.h>
#endif
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
typedef struct CDIOContext {
const AVClass *class;
cdrom_drive_t *drive;
cdrom_paranoia_t *paranoia;
int32_t last_sector;
/* private options */
int speed;
int paranoia_mode;
} CDIOContext;
static av_cold int read_header(AVFormatContext *ctx)
{
CDIOContext *s = ctx->priv_data;
AVStream *st;
int ret, i;
char *err = NULL;
if (!(st = avformat_new_stream(ctx, NULL)))
return AVERROR(ENOMEM);
s->drive = cdio_cddap_identify(ctx->filename, CDDA_MESSAGE_LOGIT, &err);
if (!s->drive) {
av_log(ctx, AV_LOG_ERROR, "Could not open drive %s.\n", ctx->filename);
return AVERROR(EINVAL);
}
if (err) {
av_log(ctx, AV_LOG_VERBOSE, "%s\n", err);
free(err);
}
if ((ret = cdio_cddap_open(s->drive)) < 0 || !s->drive->opened) {
av_log(ctx, AV_LOG_ERROR, "Could not open disk in drive %s.\n", ctx->filename);
return AVERROR(EINVAL);
}
cdio_cddap_verbose_set(s->drive, CDDA_MESSAGE_LOGIT, CDDA_MESSAGE_LOGIT);
if (s->speed)
cdio_cddap_speed_set(s->drive, s->speed);
s->paranoia = cdio_paranoia_init(s->drive);
if (!s->paranoia) {
av_log(ctx, AV_LOG_ERROR, "Could not init paranoia.\n");
return AVERROR(EINVAL);
}
cdio_paranoia_modeset(s->paranoia, s->paranoia_mode);
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
if (s->drive->bigendianp)
st->codec->codec_id = AV_CODEC_ID_PCM_S16BE;
else
st->codec->codec_id = AV_CODEC_ID_PCM_S16LE;
st->codec->sample_rate = 44100;
st->codec->channels = 2;
if (s->drive->audio_last_sector != CDIO_INVALID_LSN &&
s->drive->audio_first_sector != CDIO_INVALID_LSN)
st->duration = s->drive->audio_last_sector - s->drive->audio_first_sector;
else if (s->drive->tracks)
st->duration = s->drive->disc_toc[s->drive->tracks].dwStartSector;
avpriv_set_pts_info(st, 64, CDIO_CD_FRAMESIZE_RAW, 2*st->codec->channels*st->codec->sample_rate);
for (i = 0; i < s->drive->tracks; i++) {
char title[16];
snprintf(title, sizeof(title), "track %02d", s->drive->disc_toc[i].bTrack);
avpriv_new_chapter(ctx, i, st->time_base, s->drive->disc_toc[i].dwStartSector,
s->drive->disc_toc[i+1].dwStartSector, title);
}
s->last_sector = cdio_cddap_disc_lastsector(s->drive);
return 0;
}
static int read_packet(AVFormatContext *ctx, AVPacket *pkt)
{
CDIOContext *s = ctx->priv_data;
int ret;
uint16_t *buf;
char *err = NULL;
if (ctx->streams[0]->cur_dts > s->last_sector)
return AVERROR_EOF;
buf = cdio_paranoia_read(s->paranoia, NULL);
if (!buf)
return AVERROR_EOF;
if (err = cdio_cddap_errors(s->drive)) {
av_log(ctx, AV_LOG_ERROR, "%s\n", err);
free(err);
err = NULL;
}
if (err = cdio_cddap_messages(s->drive)) {
av_log(ctx, AV_LOG_VERBOSE, "%s\n", err);
free(err);
err = NULL;
}
if ((ret = av_new_packet(pkt, CDIO_CD_FRAMESIZE_RAW)) < 0)
return ret;
memcpy(pkt->data, buf, CDIO_CD_FRAMESIZE_RAW);
return 0;
}
static av_cold int read_close(AVFormatContext *ctx)
{
CDIOContext *s = ctx->priv_data;
cdio_paranoia_free(s->paranoia);
cdio_cddap_close(s->drive);
return 0;
}
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp,
int flags)
{
CDIOContext *s = ctx->priv_data;
AVStream *st = ctx->streams[0];
cdio_paranoia_seek(s->paranoia, timestamp, SEEK_SET);
st->cur_dts = timestamp;
return 0;
}
#define OFFSET(x) offsetof(CDIOContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "speed", "set drive reading speed", OFFSET(speed), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, DEC },
{ "paranoia_mode", "set error recovery mode", OFFSET(paranoia_mode), AV_OPT_TYPE_FLAGS, { .i64 = PARANOIA_MODE_DISABLE }, INT_MIN, INT_MAX, DEC, "paranoia_mode" },
{ "disable", "apply no fixups", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_DISABLE }, 0, 0, DEC, "paranoia_mode" },
{ "verify", "verify data integrity in overlap area", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_VERIFY }, 0, 0, DEC, "paranoia_mode" },
{ "overlap", "perform overlapped reads", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_OVERLAP }, 0, 0, DEC, "paranoia_mode" },
{ "neverskip", "do not skip failed reads", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_NEVERSKIP }, 0, 0, DEC, "paranoia_mode" },
{ "full", "apply all recovery modes", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_FULL }, 0, 0, DEC, "paranoia_mode" },
{ NULL },
};
static const AVClass libcdio_class = {
.class_name = "libcdio indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_libcdio_demuxer = {
.name = "libcdio",
.read_header = read_header,
.read_packet = read_packet,
.read_close = read_close,
.read_seek = read_seek,
.priv_data_size = sizeof(CDIOContext),
.flags = AVFMT_NOFILE,
.priv_class = &libcdio_class,
};

View File

@@ -0,0 +1,425 @@
/*
* IIDC1394 grab interface (uses libdc1394 and libraw1394)
* Copyright (c) 2004 Roman Shaposhnik
* Copyright (c) 2008 Alessandro Sappia
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#if HAVE_LIBDC1394_2
#include <dc1394/dc1394.h>
#elif HAVE_LIBDC1394_1
#include <libraw1394/raw1394.h>
#include <libdc1394/dc1394_control.h>
#define DC1394_VIDEO_MODE_320x240_YUV422 MODE_320x240_YUV422
#define DC1394_VIDEO_MODE_640x480_YUV411 MODE_640x480_YUV411
#define DC1394_VIDEO_MODE_640x480_YUV422 MODE_640x480_YUV422
#define DC1394_FRAMERATE_1_875 FRAMERATE_1_875
#define DC1394_FRAMERATE_3_75 FRAMERATE_3_75
#define DC1394_FRAMERATE_7_5 FRAMERATE_7_5
#define DC1394_FRAMERATE_15 FRAMERATE_15
#define DC1394_FRAMERATE_30 FRAMERATE_30
#define DC1394_FRAMERATE_60 FRAMERATE_60
#define DC1394_FRAMERATE_120 FRAMERATE_120
#define DC1394_FRAMERATE_240 FRAMERATE_240
#endif
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
typedef struct dc1394_data {
AVClass *class;
#if HAVE_LIBDC1394_1
raw1394handle_t handle;
dc1394_cameracapture camera;
int channel;
#elif HAVE_LIBDC1394_2
dc1394_t *d;
dc1394camera_t *camera;
dc1394video_frame_t *frame;
#endif
int current_frame;
int frame_rate; /**< frames per 1000 seconds (fps * 1000) */
char *video_size; /**< String describing video size, set by a private option. */
char *pixel_format; /**< Set by a private option. */
char *framerate; /**< Set by a private option. */
AVPacket packet;
} dc1394_data;
static const struct dc1394_frame_format {
int width;
int height;
enum AVPixelFormat pix_fmt;
int frame_size_id;
} dc1394_frame_formats[] = {
{ 320, 240, AV_PIX_FMT_UYVY422, DC1394_VIDEO_MODE_320x240_YUV422 },
{ 640, 480, AV_PIX_FMT_GRAY8, DC1394_VIDEO_MODE_640x480_MONO8 },
{ 640, 480, AV_PIX_FMT_UYYVYY411, DC1394_VIDEO_MODE_640x480_YUV411 },
{ 640, 480, AV_PIX_FMT_UYVY422, DC1394_VIDEO_MODE_640x480_YUV422 },
{ 0, 0, 0, 0 } /* gotta be the last one */
};
static const struct dc1394_frame_rate {
int frame_rate;
int frame_rate_id;
} dc1394_frame_rates[] = {
{ 1875, DC1394_FRAMERATE_1_875 },
{ 3750, DC1394_FRAMERATE_3_75 },
{ 7500, DC1394_FRAMERATE_7_5 },
{ 15000, DC1394_FRAMERATE_15 },
{ 30000, DC1394_FRAMERATE_30 },
{ 60000, DC1394_FRAMERATE_60 },
{120000, DC1394_FRAMERATE_120 },
{240000, DC1394_FRAMERATE_240 },
{ 0, 0 } /* gotta be the last one */
};
#define OFFSET(x) offsetof(dc1394_data, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
#if HAVE_LIBDC1394_1
{ "channel", "", offsetof(dc1394_data, channel), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
#endif
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = "qvga"}, 0, 0, DEC },
{ "pixel_format", "", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = "uyvy422"}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC },
{ NULL },
};
static const AVClass libdc1394_class = {
.class_name = "libdc1394 indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
static inline int dc1394_read_common(AVFormatContext *c,
struct dc1394_frame_format **select_fmt, struct dc1394_frame_rate **select_fps)
{
dc1394_data* dc1394 = c->priv_data;
AVStream* vst;
struct dc1394_frame_format *fmt;
struct dc1394_frame_rate *fps;
enum AVPixelFormat pix_fmt;
int width, height;
AVRational framerate;
int ret = 0;
if ((pix_fmt = av_get_pix_fmt(dc1394->pixel_format)) == AV_PIX_FMT_NONE) {
av_log(c, AV_LOG_ERROR, "No such pixel format: %s.\n", dc1394->pixel_format);
ret = AVERROR(EINVAL);
goto out;
}
if ((ret = av_parse_video_size(&width, &height, dc1394->video_size)) < 0) {
av_log(c, AV_LOG_ERROR, "Could not parse video size '%s'.\n", dc1394->video_size);
goto out;
}
if ((ret = av_parse_video_rate(&framerate, dc1394->framerate)) < 0) {
av_log(c, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", dc1394->framerate);
goto out;
}
dc1394->frame_rate = av_rescale(1000, framerate.num, framerate.den);
for (fmt = dc1394_frame_formats; fmt->width; fmt++)
if (fmt->pix_fmt == pix_fmt && fmt->width == width && fmt->height == height)
break;
for (fps = dc1394_frame_rates; fps->frame_rate; fps++)
if (fps->frame_rate == dc1394->frame_rate)
break;
if (!fps->frame_rate || !fmt->width) {
av_log(c, AV_LOG_ERROR, "Can't find matching camera format for %s, %dx%d@%d:1000fps\n", av_get_pix_fmt_name(pix_fmt),
width, height, dc1394->frame_rate);
ret = AVERROR(EINVAL);
goto out;
}
/* create a video stream */
vst = avformat_new_stream(c, NULL);
if (!vst) {
ret = AVERROR(ENOMEM);
goto out;
}
avpriv_set_pts_info(vst, 64, 1, 1000);
vst->codec->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
vst->codec->time_base.den = framerate.num;
vst->codec->time_base.num = framerate.den;
vst->codec->width = fmt->width;
vst->codec->height = fmt->height;
vst->codec->pix_fmt = fmt->pix_fmt;
/* packet init */
av_init_packet(&dc1394->packet);
dc1394->packet.size = avpicture_get_size(fmt->pix_fmt, fmt->width, fmt->height);
dc1394->packet.stream_index = vst->index;
dc1394->packet.flags |= AV_PKT_FLAG_KEY;
dc1394->current_frame = 0;
vst->codec->bit_rate = av_rescale(dc1394->packet.size * 8, fps->frame_rate, 1000);
*select_fps = fps;
*select_fmt = fmt;
out:
return ret;
}
#if HAVE_LIBDC1394_1
static int dc1394_v1_read_header(AVFormatContext *c)
{
dc1394_data* dc1394 = c->priv_data;
AVStream* vst;
nodeid_t* camera_nodes;
int res;
struct dc1394_frame_format *fmt = NULL;
struct dc1394_frame_rate *fps = NULL;
if (dc1394_read_common(c, &fmt, &fps) != 0)
return -1;
/* Now let us prep the hardware. */
dc1394->handle = dc1394_create_handle(0); /* FIXME: gotta have ap->port */
if (!dc1394->handle) {
av_log(c, AV_LOG_ERROR, "Can't acquire dc1394 handle on port %d\n", 0 /* ap->port */);
goto out;
}
camera_nodes = dc1394_get_camera_nodes(dc1394->handle, &res, 1);
if (!camera_nodes || camera_nodes[dc1394->channel] == DC1394_NO_CAMERA) {
av_log(c, AV_LOG_ERROR, "There's no IIDC camera on the channel %d\n", dc1394->channel);
goto out_handle;
}
res = dc1394_dma_setup_capture(dc1394->handle, camera_nodes[dc1394->channel],
0,
FORMAT_VGA_NONCOMPRESSED,
fmt->frame_size_id,
SPEED_400,
fps->frame_rate_id, 8, 1,
c->filename,
&dc1394->camera);
dc1394_free_camera_nodes(camera_nodes);
if (res != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Can't prepare camera for the DMA capture\n");
goto out_handle;
}
res = dc1394_start_iso_transmission(dc1394->handle, dc1394->camera.node);
if (res != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Can't start isochronous transmission\n");
goto out_handle_dma;
}
return 0;
out_handle_dma:
dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
out_handle:
dc1394_destroy_handle(dc1394->handle);
out:
return -1;
}
static int dc1394_v1_read_packet(AVFormatContext *c, AVPacket *pkt)
{
struct dc1394_data *dc1394 = c->priv_data;
int res;
/* discard stale frame */
if (dc1394->current_frame++) {
if (dc1394_dma_done_with_buffer(&dc1394->camera) != DC1394_SUCCESS)
av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame);
}
res = dc1394_dma_single_capture(&dc1394->camera);
if (res == DC1394_SUCCESS) {
dc1394->packet.data = (uint8_t *)(dc1394->camera.capture_buffer);
dc1394->packet.pts = (dc1394->current_frame * 1000000) / dc1394->frame_rate;
res = dc1394->packet.size;
} else {
av_log(c, AV_LOG_ERROR, "DMA capture failed\n");
dc1394->packet.data = NULL;
res = -1;
}
*pkt = dc1394->packet;
return res;
}
static int dc1394_v1_close(AVFormatContext * context)
{
struct dc1394_data *dc1394 = context->priv_data;
dc1394_stop_iso_transmission(dc1394->handle, dc1394->camera.node);
dc1394_dma_unlisten(dc1394->handle, &dc1394->camera);
dc1394_dma_release_camera(dc1394->handle, &dc1394->camera);
dc1394_destroy_handle(dc1394->handle);
return 0;
}
#elif HAVE_LIBDC1394_2
static int dc1394_v2_read_header(AVFormatContext *c)
{
dc1394_data* dc1394 = c->priv_data;
dc1394camera_list_t *list;
int res, i;
struct dc1394_frame_format *fmt = NULL;
struct dc1394_frame_rate *fps = NULL;
if (dc1394_read_common(c, &fmt, &fps) != 0)
return -1;
/* Now let us prep the hardware. */
dc1394->d = dc1394_new();
dc1394_camera_enumerate (dc1394->d, &list);
if ( !list || list->num == 0) {
av_log(c, AV_LOG_ERROR, "Unable to look for an IIDC camera\n\n");
goto out;
}
/* FIXME: To select a specific camera I need to search in list its guid */
dc1394->camera = dc1394_camera_new (dc1394->d, list->ids[0].guid);
if (list->num > 1) {
av_log(c, AV_LOG_INFO, "Working with the first camera found\n");
}
/* Freeing list of cameras */
dc1394_camera_free_list (list);
/* Select MAX Speed possible from the cam */
if (dc1394->camera->bmode_capable>0) {
dc1394_video_set_operation_mode(dc1394->camera, DC1394_OPERATION_MODE_1394B);
i = DC1394_ISO_SPEED_800;
} else {
i = DC1394_ISO_SPEED_400;
}
for (res = DC1394_FAILURE; i >= DC1394_ISO_SPEED_MIN && res != DC1394_SUCCESS; i--) {
res=dc1394_video_set_iso_speed(dc1394->camera, i);
}
if (res != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Couldn't set ISO Speed\n");
goto out_camera;
}
if (dc1394_video_set_mode(dc1394->camera, fmt->frame_size_id) != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Couldn't set video format\n");
goto out_camera;
}
if (dc1394_video_set_framerate(dc1394->camera,fps->frame_rate_id) != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Couldn't set framerate %d \n",fps->frame_rate);
goto out_camera;
}
if (dc1394_capture_setup(dc1394->camera, 10, DC1394_CAPTURE_FLAGS_DEFAULT)!=DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Cannot setup camera \n");
goto out_camera;
}
if (dc1394_video_set_transmission(dc1394->camera, DC1394_ON) !=DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Cannot start capture\n");
goto out_camera;
}
return 0;
out_camera:
dc1394_capture_stop(dc1394->camera);
dc1394_video_set_transmission(dc1394->camera, DC1394_OFF);
dc1394_camera_free (dc1394->camera);
out:
dc1394_free(dc1394->d);
return -1;
}
static int dc1394_v2_read_packet(AVFormatContext *c, AVPacket *pkt)
{
struct dc1394_data *dc1394 = c->priv_data;
int res;
/* discard stale frame */
if (dc1394->current_frame++) {
if (dc1394_capture_enqueue(dc1394->camera, dc1394->frame) != DC1394_SUCCESS)
av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame);
}
res = dc1394_capture_dequeue(dc1394->camera, DC1394_CAPTURE_POLICY_WAIT, &dc1394->frame);
if (res == DC1394_SUCCESS) {
dc1394->packet.data = (uint8_t *) dc1394->frame->image;
dc1394->packet.pts = dc1394->current_frame * 1000000 / dc1394->frame_rate;
res = dc1394->frame->image_bytes;
} else {
av_log(c, AV_LOG_ERROR, "DMA capture failed\n");
dc1394->packet.data = NULL;
res = -1;
}
*pkt = dc1394->packet;
return res;
}
static int dc1394_v2_close(AVFormatContext * context)
{
struct dc1394_data *dc1394 = context->priv_data;
dc1394_video_set_transmission(dc1394->camera, DC1394_OFF);
dc1394_capture_stop(dc1394->camera);
dc1394_camera_free(dc1394->camera);
dc1394_free(dc1394->d);
return 0;
}
AVInputFormat ff_libdc1394_demuxer = {
.name = "libdc1394",
.long_name = NULL_IF_CONFIG_SMALL("dc1394 v.2 A/V grab"),
.priv_data_size = sizeof(struct dc1394_data),
.read_header = dc1394_v2_read_header,
.read_packet = dc1394_v2_read_packet,
.read_close = dc1394_v2_close,
.flags = AVFMT_NOFILE,
.priv_class = &libdc1394_class,
};
#endif
#if HAVE_LIBDC1394_1
AVInputFormat ff_libdc1394_demuxer = {
.name = "libdc1394",
.long_name = NULL_IF_CONFIG_SMALL("dc1394 v.1 A/V grab"),
.priv_data_size = sizeof(struct dc1394_data),
.read_header = dc1394_v1_read_header,
.read_packet = dc1394_v1_read_packet,
.read_close = dc1394_v1_close,
.flags = AVFMT_NOFILE,
.priv_class = &libdc1394_class,
};
#endif

View File

@@ -0,0 +1,254 @@
/*
* Copyright (c) 2011 Jonathan Baldwin
*
* This file is part of FFmpeg.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/**
* @file
* OpenAL 1.1 capture device for libavdevice
**/
#include <AL/al.h>
#include <AL/alc.h>
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavformat/internal.h"
#include "avdevice.h"
typedef struct {
AVClass *class;
/** OpenAL capture device context. **/
ALCdevice *device;
/** The number of channels in the captured audio. **/
int channels;
/** The sample rate (in Hz) of the captured audio. **/
int sample_rate;
/** The sample size (in bits) of the captured audio. **/
int sample_size;
/** The OpenAL sample format of the captured audio. **/
ALCenum sample_format;
/** The number of bytes between two consecutive samples of the same channel/component. **/
ALCint sample_step;
/** If true, print a list of capture devices on this system and exit. **/
int list_devices;
} al_data;
typedef struct {
ALCenum al_fmt;
enum AVCodecID codec_id;
int channels;
} al_format_info;
#define LOWEST_AL_FORMAT FFMIN(FFMIN(AL_FORMAT_MONO8,AL_FORMAT_MONO16),FFMIN(AL_FORMAT_STEREO8,AL_FORMAT_STEREO16))
/**
* Get information about an AL_FORMAT value.
* @param al_fmt the AL_FORMAT value to find information about.
* @return A pointer to a structure containing information about the AL_FORMAT value.
*/
static const inline al_format_info* get_al_format_info(ALCenum al_fmt)
{
static const al_format_info info_table[] = {
[AL_FORMAT_MONO8-LOWEST_AL_FORMAT] = {AL_FORMAT_MONO8, AV_CODEC_ID_PCM_U8, 1},
[AL_FORMAT_MONO16-LOWEST_AL_FORMAT] = {AL_FORMAT_MONO16, AV_NE (AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE), 1},
[AL_FORMAT_STEREO8-LOWEST_AL_FORMAT] = {AL_FORMAT_STEREO8, AV_CODEC_ID_PCM_U8, 2},
[AL_FORMAT_STEREO16-LOWEST_AL_FORMAT] = {AL_FORMAT_STEREO16, AV_NE (AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE), 2},
};
return &info_table[al_fmt-LOWEST_AL_FORMAT];
}
/**
* Get the OpenAL error code, translated into an av/errno error code.
* @param device The ALC device to check for errors.
* @param error_msg_ret A pointer to a char* in which to return the error message, or NULL if desired.
* @return The error code, or 0 if there is no error.
*/
static inline int al_get_error(ALCdevice *device, const char** error_msg_ret)
{
ALCenum error = alcGetError(device);
if (error_msg_ret)
*error_msg_ret = (const char*) alcGetString(device, error);
switch (error) {
case ALC_NO_ERROR:
return 0;
case ALC_INVALID_DEVICE:
return AVERROR(ENODEV);
break;
case ALC_INVALID_CONTEXT:
case ALC_INVALID_ENUM:
case ALC_INVALID_VALUE:
return AVERROR(EINVAL);
break;
case ALC_OUT_OF_MEMORY:
return AVERROR(ENOMEM);
break;
default:
return AVERROR(EIO);
}
}
/**
* Print out a list of OpenAL capture devices on this system.
*/
static inline void print_al_capture_devices(void *log_ctx)
{
const char *devices;
if (!(devices = alcGetString(NULL, ALC_CAPTURE_DEVICE_SPECIFIER)))
return;
av_log(log_ctx, AV_LOG_INFO, "List of OpenAL capture devices on this system:\n");
for (; *devices != '\0'; devices += strlen(devices) + 1)
av_log(log_ctx, AV_LOG_INFO, " %s\n", devices);
}
static int read_header(AVFormatContext *ctx)
{
al_data *ad = ctx->priv_data;
static const ALCenum sample_formats[2][2] = {
{ AL_FORMAT_MONO8, AL_FORMAT_STEREO8 },
{ AL_FORMAT_MONO16, AL_FORMAT_STEREO16 }
};
int error = 0;
const char *error_msg;
AVStream *st = NULL;
AVCodecContext *codec = NULL;
if (ad->list_devices) {
print_al_capture_devices(ctx);
return AVERROR_EXIT;
}
ad->sample_format = sample_formats[ad->sample_size/8-1][ad->channels-1];
/* Open device for capture */
ad->device =
alcCaptureOpenDevice(ctx->filename[0] ? ctx->filename : NULL,
ad->sample_rate,
ad->sample_format,
ad->sample_rate); /* Maximum 1 second of sample data to be read at once */
if (error = al_get_error(ad->device, &error_msg)) goto fail;
/* Create stream */
if (!(st = avformat_new_stream(ctx, NULL))) {
error = AVERROR(ENOMEM);
goto fail;
}
/* We work in microseconds */
avpriv_set_pts_info(st, 64, 1, 1000000);
/* Set codec parameters */
codec = st->codec;
codec->codec_type = AVMEDIA_TYPE_AUDIO;
codec->sample_rate = ad->sample_rate;
codec->channels = get_al_format_info(ad->sample_format)->channels;
codec->codec_id = get_al_format_info(ad->sample_format)->codec_id;
/* This is needed to read the audio data */
ad->sample_step = (av_get_bits_per_sample(get_al_format_info(ad->sample_format)->codec_id) *
get_al_format_info(ad->sample_format)->channels) / 8;
/* Finally, start the capture process */
alcCaptureStart(ad->device);
return 0;
fail:
/* Handle failure */
if (ad->device)
alcCaptureCloseDevice(ad->device);
if (error_msg)
av_log(ctx, AV_LOG_ERROR, "Cannot open device: %s\n", error_msg);
return error;
}
static int read_packet(AVFormatContext* ctx, AVPacket *pkt)
{
al_data *ad = ctx->priv_data;
int error=0;
const char *error_msg;
ALCint nb_samples;
/* Get number of samples available */
alcGetIntegerv(ad->device, ALC_CAPTURE_SAMPLES, (ALCsizei) sizeof(ALCint), &nb_samples);
if (error = al_get_error(ad->device, &error_msg)) goto fail;
/* Create a packet of appropriate size */
if ((error = av_new_packet(pkt, nb_samples*ad->sample_step)) < 0)
goto fail;
pkt->pts = av_gettime();
/* Fill the packet with the available samples */
alcCaptureSamples(ad->device, pkt->data, nb_samples);
if (error = al_get_error(ad->device, &error_msg)) goto fail;
return pkt->size;
fail:
/* Handle failure */
if (pkt->data)
av_free_packet(pkt);
if (error_msg)
av_log(ctx, AV_LOG_ERROR, "Error: %s\n", error_msg);
return error;
}
static int read_close(AVFormatContext* ctx)
{
al_data *ad = ctx->priv_data;
if (ad->device) {
alcCaptureStop(ad->device);
alcCaptureCloseDevice(ad->device);
}
return 0;
}
#define OFFSET(x) offsetof(al_data, x)
static const AVOption options[] = {
{"channels", "set number of channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, AV_OPT_FLAG_DECODING_PARAM },
{"sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, 192000, AV_OPT_FLAG_DECODING_PARAM },
{"sample_size", "set sample size", OFFSET(sample_size), AV_OPT_TYPE_INT, {.i64=16}, 8, 16, AV_OPT_FLAG_DECODING_PARAM },
{"list_devices", "list available devices", OFFSET(list_devices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
{"true", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
{"false", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
{NULL},
};
static const AVClass class = {
.class_name = "openal",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_openal_demuxer = {
.name = "openal",
.long_name = NULL_IF_CONFIG_SMALL("OpenAL audio capture device"),
.priv_data_size = sizeof(al_data),
.read_probe = NULL,
.read_header = read_header,
.read_packet = read_packet,
.read_close = read_close,
.flags = AVFMT_NOFILE,
.priv_class = &class
};

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,188 @@
/*
* Copyright (c) 2014 Lukasz Marek
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_OPENGL_SHADERS_H
#define AVDEVICE_OPENGL_SHADERS_H
#include "libavutil/pixfmt.h"
static const char * const FF_OPENGL_VERTEX_SHADER =
"uniform mat4 u_projectionMatrix;"
"uniform mat4 u_modelViewMatrix;"
"attribute vec4 a_position;"
"attribute vec2 a_textureCoords;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"gl_Position = u_projectionMatrix * (a_position * u_modelViewMatrix);"
"texture_coordinate = a_textureCoords;"
"}";
/**
* Fragment shader for packet RGBA formats.
*/
static const char * const FF_OPENGL_FRAGMENT_SHADER_RGBA_PACKET =
#if defined(GL_ES_VERSION_2_0)
"precision mediump float;"
#endif
"uniform sampler2D u_texture0;"
"uniform mat4 u_colorMap;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"gl_FragColor = texture2D(u_texture0, texture_coordinate) * u_colorMap;"
"}";
/**
* Fragment shader for packet RGB formats.
*/
static const char * const FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET =
#if defined(GL_ES_VERSION_2_0)
"precision mediump float;"
#endif
"uniform sampler2D u_texture0;"
"uniform mat4 u_colorMap;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"gl_FragColor = vec4((texture2D(u_texture0, texture_coordinate) * u_colorMap).rgb, 1.0);"
"}";
/**
* Fragment shader for planar RGBA formats.
*/
static const char * const FF_OPENGL_FRAGMENT_SHADER_RGBA_PLANAR =
#if defined(GL_ES_VERSION_2_0)
"precision mediump float;"
#endif
"uniform sampler2D u_texture0;"
"uniform sampler2D u_texture1;"
"uniform sampler2D u_texture2;"
"uniform sampler2D u_texture3;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"gl_FragColor = vec4(texture2D(u_texture0, texture_coordinate).r,"
"texture2D(u_texture1, texture_coordinate).r,"
"texture2D(u_texture2, texture_coordinate).r,"
"texture2D(u_texture3, texture_coordinate).r);"
"}";
/**
* Fragment shader for planar RGB formats.
*/
static const char * const FF_OPENGL_FRAGMENT_SHADER_RGB_PLANAR =
#if defined(GL_ES_VERSION_2_0)
"precision mediump float;"
#endif
"uniform sampler2D u_texture0;"
"uniform sampler2D u_texture1;"
"uniform sampler2D u_texture2;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"gl_FragColor = vec4(texture2D(u_texture0, texture_coordinate).r,"
"texture2D(u_texture1, texture_coordinate).r,"
"texture2D(u_texture2, texture_coordinate).r,"
"1.0);"
"}";
/**
* Fragment shader for planar YUV formats.
*/
static const char * const FF_OPENGL_FRAGMENT_SHADER_YUV_PLANAR =
#if defined(GL_ES_VERSION_2_0)
"precision mediump float;"
#endif
"uniform sampler2D u_texture0;"
"uniform sampler2D u_texture1;"
"uniform sampler2D u_texture2;"
"uniform float u_chroma_div_w;"
"uniform float u_chroma_div_h;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"vec3 yuv;"
"yuv.r = texture2D(u_texture0, texture_coordinate).r - 0.0625;"
"yuv.g = texture2D(u_texture1, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;"
"yuv.b = texture2D(u_texture2, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;"
"gl_FragColor = clamp(vec4(mat3(1.1643, 1.16430, 1.1643,"
"0.0, -0.39173, 2.0170,"
"1.5958, -0.81290, 0.0) * yuv, 1.0), 0.0, 1.0);"
"}";
/**
* Fragment shader for planar YUVA formats.
*/
static const char * const FF_OPENGL_FRAGMENT_SHADER_YUVA_PLANAR =
#if defined(GL_ES_VERSION_2_0)
"precision mediump float;"
#endif
"uniform sampler2D u_texture0;"
"uniform sampler2D u_texture1;"
"uniform sampler2D u_texture2;"
"uniform sampler2D u_texture3;"
"uniform float u_chroma_div_w;"
"uniform float u_chroma_div_h;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"vec3 yuv;"
"yuv.r = texture2D(u_texture0, texture_coordinate).r - 0.0625;"
"yuv.g = texture2D(u_texture1, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;"
"yuv.b = texture2D(u_texture2, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;"
"gl_FragColor = clamp(vec4(mat3(1.1643, 1.16430, 1.1643,"
"0.0, -0.39173, 2.0170,"
"1.5958, -0.81290, 0.0) * yuv, texture2D(u_texture3, texture_coordinate).r), 0.0, 1.0);"
"}";
static const char * const FF_OPENGL_FRAGMENT_SHADER_GRAY =
#if defined(GL_ES_VERSION_2_0)
"precision mediump float;"
#endif
"uniform sampler2D u_texture0;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"float c = texture2D(u_texture0, texture_coordinate).r;"
"gl_FragColor = vec4(c, c, c, 1.0);"
"}";
#endif /* AVDEVICE_OPENGL_SHADERS_H */

View File

@@ -0,0 +1,144 @@
/*
* Linux audio play and grab interface
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <string.h>
#if HAVE_SOUNDCARD_H
#include <soundcard.h>
#else
#include <sys/soundcard.h>
#endif
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <fcntl.h>
#include <sys/ioctl.h>
#include "libavutil/log.h"
#include "libavcodec/avcodec.h"
#include "avdevice.h"
#include "oss.h"
int ff_oss_audio_open(AVFormatContext *s1, int is_output,
const char *audio_device)
{
OSSAudioData *s = s1->priv_data;
int audio_fd;
int tmp, err;
char *flip = getenv("AUDIO_FLIP_LEFT");
if (is_output)
audio_fd = avpriv_open(audio_device, O_WRONLY);
else
audio_fd = avpriv_open(audio_device, O_RDONLY);
if (audio_fd < 0) {
av_log(s1, AV_LOG_ERROR, "%s: %s\n", audio_device, av_err2str(AVERROR(errno)));
return AVERROR(EIO);
}
if (flip && *flip == '1') {
s->flip_left = 1;
}
/* non blocking mode */
if (!is_output) {
if (fcntl(audio_fd, F_SETFL, O_NONBLOCK) < 0) {
av_log(s1, AV_LOG_WARNING, "%s: Could not enable non block mode (%s)\n", audio_device, av_err2str(AVERROR(errno)));
}
}
s->frame_size = OSS_AUDIO_BLOCK_SIZE;
#define CHECK_IOCTL_ERROR(event) \
if (err < 0) { \
av_log(s1, AV_LOG_ERROR, #event ": %s\n", av_err2str(AVERROR(errno)));\
goto fail; \
}
/* select format : favour native format
* We don't CHECK_IOCTL_ERROR here because even if failed OSS still may be
* usable. If OSS is not usable the SNDCTL_DSP_SETFMTS later is going to
* fail anyway. */
err = ioctl(audio_fd, SNDCTL_DSP_GETFMTS, &tmp);
if (err < 0) {
av_log(s1, AV_LOG_WARNING, "SNDCTL_DSP_GETFMTS: %s\n", av_err2str(AVERROR(errno)));
}
#if HAVE_BIGENDIAN
if (tmp & AFMT_S16_BE) {
tmp = AFMT_S16_BE;
} else if (tmp & AFMT_S16_LE) {
tmp = AFMT_S16_LE;
} else {
tmp = 0;
}
#else
if (tmp & AFMT_S16_LE) {
tmp = AFMT_S16_LE;
} else if (tmp & AFMT_S16_BE) {
tmp = AFMT_S16_BE;
} else {
tmp = 0;
}
#endif
switch(tmp) {
case AFMT_S16_LE:
s->codec_id = AV_CODEC_ID_PCM_S16LE;
break;
case AFMT_S16_BE:
s->codec_id = AV_CODEC_ID_PCM_S16BE;
break;
default:
av_log(s1, AV_LOG_ERROR, "Soundcard does not support 16 bit sample format\n");
close(audio_fd);
return AVERROR(EIO);
}
err=ioctl(audio_fd, SNDCTL_DSP_SETFMT, &tmp);
CHECK_IOCTL_ERROR(SNDCTL_DSP_SETFMTS)
tmp = (s->channels == 2);
err = ioctl(audio_fd, SNDCTL_DSP_STEREO, &tmp);
CHECK_IOCTL_ERROR(SNDCTL_DSP_STEREO)
tmp = s->sample_rate;
err = ioctl(audio_fd, SNDCTL_DSP_SPEED, &tmp);
CHECK_IOCTL_ERROR(SNDCTL_DSP_SPEED)
s->sample_rate = tmp; /* store real sample rate */
s->fd = audio_fd;
return 0;
fail:
close(audio_fd);
return AVERROR(EIO);
#undef CHECK_IOCTL_ERROR
}
int ff_oss_audio_close(OSSAudioData *s)
{
close(s->fd);
return 0;
}

View File

@@ -0,0 +1,45 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_OSS_H
#define AVDEVICE_OSS_H
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#define OSS_AUDIO_BLOCK_SIZE 4096
typedef struct OSSAudioData {
AVClass *class;
int fd;
int sample_rate;
int channels;
int frame_size; /* in bytes ! */
enum AVCodecID codec_id;
unsigned int flip_left : 1;
uint8_t buffer[OSS_AUDIO_BLOCK_SIZE];
int buffer_ptr;
} OSSAudioData;
int ff_oss_audio_open(AVFormatContext *s1, int is_output,
const char *audio_device);
int ff_oss_audio_close(OSSAudioData *s);
#endif /* AVDEVICE_OSS_H */

View File

@@ -0,0 +1,149 @@
/*
* Linux audio play interface
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <stdint.h>
#if HAVE_SOUNDCARD_H
#include <soundcard.h>
#else
#include <sys/soundcard.h>
#endif
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <fcntl.h>
#include <sys/ioctl.h>
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavcodec/avcodec.h"
#include "avdevice.h"
#include "libavformat/internal.h"
#include "oss.h"
static int audio_read_header(AVFormatContext *s1)
{
OSSAudioData *s = s1->priv_data;
AVStream *st;
int ret;
st = avformat_new_stream(s1, NULL);
if (!st) {
return AVERROR(ENOMEM);
}
ret = ff_oss_audio_open(s1, 0, s1->filename);
if (ret < 0) {
return AVERROR(EIO);
}
/* take real parameters */
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = s->codec_id;
st->codec->sample_rate = s->sample_rate;
st->codec->channels = s->channels;
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
return 0;
}
static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
OSSAudioData *s = s1->priv_data;
int ret, bdelay;
int64_t cur_time;
struct audio_buf_info abufi;
if ((ret=av_new_packet(pkt, s->frame_size)) < 0)
return ret;
ret = read(s->fd, pkt->data, pkt->size);
if (ret <= 0){
av_free_packet(pkt);
pkt->size = 0;
if (ret<0) return AVERROR(errno);
else return AVERROR_EOF;
}
pkt->size = ret;
/* compute pts of the start of the packet */
cur_time = av_gettime();
bdelay = ret;
if (ioctl(s->fd, SNDCTL_DSP_GETISPACE, &abufi) == 0) {
bdelay += abufi.bytes;
}
/* subtract time represented by the number of bytes in the audio fifo */
cur_time -= (bdelay * 1000000LL) / (s->sample_rate * s->channels);
/* convert to wanted units */
pkt->pts = cur_time;
if (s->flip_left && s->channels == 2) {
int i;
short *p = (short *) pkt->data;
for (i = 0; i < ret; i += 4) {
*p = ~*p;
p += 2;
}
}
return 0;
}
static int audio_read_close(AVFormatContext *s1)
{
OSSAudioData *s = s1->priv_data;
ff_oss_audio_close(s);
return 0;
}
static const AVOption options[] = {
{ "sample_rate", "", offsetof(OSSAudioData, sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "channels", "", offsetof(OSSAudioData, channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass oss_demuxer_class = {
.class_name = "OSS demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_oss_demuxer = {
.name = "oss",
.long_name = NULL_IF_CONFIG_SMALL("OSS (Open Sound System) capture"),
.priv_data_size = sizeof(OSSAudioData),
.read_header = audio_read_header,
.read_packet = audio_read_packet,
.read_close = audio_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &oss_demuxer_class,
};

View File

@@ -0,0 +1,118 @@
/*
* Linux audio grab interface
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#if HAVE_SOUNDCARD_H
#include <soundcard.h>
#else
#include <sys/soundcard.h>
#endif
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <fcntl.h>
#include <sys/ioctl.h>
#include "libavutil/internal.h"
#include "libavcodec/avcodec.h"
#include "avdevice.h"
#include "libavformat/internal.h"
#include "oss.h"
static int audio_write_header(AVFormatContext *s1)
{
OSSAudioData *s = s1->priv_data;
AVStream *st;
int ret;
st = s1->streams[0];
s->sample_rate = st->codec->sample_rate;
s->channels = st->codec->channels;
ret = ff_oss_audio_open(s1, 1, s1->filename);
if (ret < 0) {
return AVERROR(EIO);
} else {
return 0;
}
}
static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
{
OSSAudioData *s = s1->priv_data;
int len, ret;
int size= pkt->size;
uint8_t *buf= pkt->data;
while (size > 0) {
len = FFMIN(OSS_AUDIO_BLOCK_SIZE - s->buffer_ptr, size);
memcpy(s->buffer + s->buffer_ptr, buf, len);
s->buffer_ptr += len;
if (s->buffer_ptr >= OSS_AUDIO_BLOCK_SIZE) {
for(;;) {
ret = write(s->fd, s->buffer, OSS_AUDIO_BLOCK_SIZE);
if (ret > 0)
break;
if (ret < 0 && (errno != EAGAIN && errno != EINTR))
return AVERROR(EIO);
}
s->buffer_ptr = 0;
}
buf += len;
size -= len;
}
return 0;
}
static int audio_write_trailer(AVFormatContext *s1)
{
OSSAudioData *s = s1->priv_data;
ff_oss_audio_close(s);
return 0;
}
static const AVClass oss_muxer_class = {
.class_name = "OSS muxer",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
};
AVOutputFormat ff_oss_muxer = {
.name = "oss",
.long_name = NULL_IF_CONFIG_SMALL("OSS (Open Sound System) playback"),
.priv_data_size = sizeof(OSSAudioData),
/* XXX: we make the assumption that the soundcard accepts this format */
/* XXX: find better solution with "preinit" method, needed also in
other formats */
.audio_codec = AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE),
.video_codec = AV_CODEC_ID_NONE,
.write_header = audio_write_header,
.write_packet = audio_write_packet,
.write_trailer = audio_write_trailer,
.flags = AVFMT_NOFILE,
.priv_class = &oss_muxer_class,
};

View File

@@ -0,0 +1,249 @@
/*
* Pulseaudio common
* Copyright (c) 2014 Lukasz Marek
* Copyright (c) 2011 Luca Barbato <lu_zero@gentoo.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "pulse_audio_common.h"
#include "libavutil/attributes.h"
#include "libavutil/avstring.h"
#include "libavutil/mem.h"
#include "libavutil/avassert.h"
pa_sample_format_t av_cold ff_codec_id_to_pulse_format(enum AVCodecID codec_id)
{
switch (codec_id) {
case AV_CODEC_ID_PCM_U8: return PA_SAMPLE_U8;
case AV_CODEC_ID_PCM_ALAW: return PA_SAMPLE_ALAW;
case AV_CODEC_ID_PCM_MULAW: return PA_SAMPLE_ULAW;
case AV_CODEC_ID_PCM_S16LE: return PA_SAMPLE_S16LE;
case AV_CODEC_ID_PCM_S16BE: return PA_SAMPLE_S16BE;
case AV_CODEC_ID_PCM_F32LE: return PA_SAMPLE_FLOAT32LE;
case AV_CODEC_ID_PCM_F32BE: return PA_SAMPLE_FLOAT32BE;
case AV_CODEC_ID_PCM_S32LE: return PA_SAMPLE_S32LE;
case AV_CODEC_ID_PCM_S32BE: return PA_SAMPLE_S32BE;
case AV_CODEC_ID_PCM_S24LE: return PA_SAMPLE_S24LE;
case AV_CODEC_ID_PCM_S24BE: return PA_SAMPLE_S24BE;
default: return PA_SAMPLE_INVALID;
}
}
enum PulseAudioContextState {
PULSE_CONTEXT_INITIALIZING,
PULSE_CONTEXT_READY,
PULSE_CONTEXT_FINISHED
};
typedef struct PulseAudioDeviceList {
AVDeviceInfoList *devices;
int error_code;
int output;
char *default_device;
} PulseAudioDeviceList;
static void pa_state_cb(pa_context *c, void *userdata)
{
enum PulseAudioContextState *context_state = userdata;
switch (pa_context_get_state(c)) {
case PA_CONTEXT_FAILED:
case PA_CONTEXT_TERMINATED:
*context_state = PULSE_CONTEXT_FINISHED;
break;
case PA_CONTEXT_READY:
*context_state = PULSE_CONTEXT_READY;
break;
default:
break;
}
}
void ff_pulse_audio_disconnect_context(pa_mainloop **pa_ml, pa_context **pa_ctx)
{
av_assert0(pa_ml);
av_assert0(pa_ctx);
if (*pa_ctx) {
pa_context_set_state_callback(*pa_ctx, NULL, NULL);
pa_context_disconnect(*pa_ctx);
pa_context_unref(*pa_ctx);
}
if (*pa_ml)
pa_mainloop_free(*pa_ml);
*pa_ml = NULL;
*pa_ctx = NULL;
}
int ff_pulse_audio_connect_context(pa_mainloop **pa_ml, pa_context **pa_ctx,
const char *server, const char *description)
{
int ret;
pa_mainloop_api *pa_mlapi = NULL;
enum PulseAudioContextState context_state = PULSE_CONTEXT_INITIALIZING;
av_assert0(pa_ml);
av_assert0(pa_ctx);
*pa_ml = NULL;
*pa_ctx = NULL;
if (!(*pa_ml = pa_mainloop_new()))
return AVERROR(ENOMEM);
if (!(pa_mlapi = pa_mainloop_get_api(*pa_ml))) {
ret = AVERROR_EXTERNAL;
goto fail;
}
if (!(*pa_ctx = pa_context_new(pa_mlapi, description))) {
ret = AVERROR(ENOMEM);
goto fail;
}
pa_context_set_state_callback(*pa_ctx, pa_state_cb, &context_state);
if (pa_context_connect(*pa_ctx, server, 0, NULL) < 0) {
ret = AVERROR_EXTERNAL;
goto fail;
}
while (context_state == PULSE_CONTEXT_INITIALIZING)
pa_mainloop_iterate(*pa_ml, 1, NULL);
if (context_state == PULSE_CONTEXT_FINISHED) {
ret = AVERROR_EXTERNAL;
goto fail;
}
return 0;
fail:
ff_pulse_audio_disconnect_context(pa_ml, pa_ctx);
return ret;
}
static void pulse_add_detected_device(PulseAudioDeviceList *info,
const char *name, const char *description)
{
int ret;
AVDeviceInfo *new_device = NULL;
if (info->error_code)
return;
new_device = av_mallocz(sizeof(AVDeviceInfo));
if (!new_device) {
info->error_code = AVERROR(ENOMEM);
return;
}
new_device->device_description = av_strdup(description);
new_device->device_name = av_strdup(name);
if (!new_device->device_description || !new_device->device_name) {
info->error_code = AVERROR(ENOMEM);
goto fail;
}
if ((ret = av_dynarray_add_nofree(&info->devices->devices,
&info->devices->nb_devices, new_device)) < 0) {
info->error_code = ret;
goto fail;
}
return;
fail:
av_freep(&new_device->device_description);
av_freep(&new_device->device_name);
av_free(new_device);
}
static void pulse_audio_source_device_cb(pa_context *c, const pa_source_info *dev,
int eol, void *userdata)
{
if (!eol)
pulse_add_detected_device(userdata, dev->name, dev->description);
}
static void pulse_audio_sink_device_cb(pa_context *c, const pa_sink_info *dev,
int eol, void *userdata)
{
if (!eol)
pulse_add_detected_device(userdata, dev->name, dev->description);
}
static void pulse_server_info_cb(pa_context *c, const pa_server_info *i, void *userdata)
{
PulseAudioDeviceList *info = userdata;
if (info->output)
info->default_device = av_strdup(i->default_sink_name);
else
info->default_device = av_strdup(i->default_source_name);
if (!info->default_device)
info->error_code = AVERROR(ENOMEM);
}
int ff_pulse_audio_get_devices(AVDeviceInfoList *devices, const char *server, int output)
{
pa_mainloop *pa_ml = NULL;
pa_operation *pa_op = NULL;
pa_context *pa_ctx = NULL;
enum pa_operation_state op_state;
PulseAudioDeviceList dev_list = { 0 };
int i;
dev_list.output = output;
dev_list.devices = devices;
if (!devices)
return AVERROR(EINVAL);
devices->nb_devices = 0;
devices->devices = NULL;
if ((dev_list.error_code = ff_pulse_audio_connect_context(&pa_ml, &pa_ctx, server, "Query devices")) < 0)
goto fail;
if (output)
pa_op = pa_context_get_sink_info_list(pa_ctx, pulse_audio_sink_device_cb, &dev_list);
else
pa_op = pa_context_get_source_info_list(pa_ctx, pulse_audio_source_device_cb, &dev_list);
while ((op_state = pa_operation_get_state(pa_op)) == PA_OPERATION_RUNNING)
pa_mainloop_iterate(pa_ml, 1, NULL);
if (op_state != PA_OPERATION_DONE)
dev_list.error_code = AVERROR_EXTERNAL;
pa_operation_unref(pa_op);
if (dev_list.error_code < 0)
goto fail;
pa_op = pa_context_get_server_info(pa_ctx, pulse_server_info_cb, &dev_list);
while ((op_state = pa_operation_get_state(pa_op)) == PA_OPERATION_RUNNING)
pa_mainloop_iterate(pa_ml, 1, NULL);
if (op_state != PA_OPERATION_DONE)
dev_list.error_code = AVERROR_EXTERNAL;
pa_operation_unref(pa_op);
if (dev_list.error_code < 0)
goto fail;
devices->default_device = -1;
for (i = 0; i < devices->nb_devices; i++) {
if (!strcmp(devices->devices[i]->device_name, dev_list.default_device)) {
devices->default_device = i;
break;
}
}
fail:
av_free(dev_list.default_device);
ff_pulse_audio_disconnect_context(&pa_ml, &pa_ctx);
return dev_list.error_code;
}

View File

@@ -0,0 +1,38 @@
/*
* Pulseaudio input
* Copyright (c) 2011 Luca Barbato <lu_zero@gentoo.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_PULSE_AUDIO_COMMON_H
#define AVDEVICE_PULSE_AUDIO_COMMON_H
#include <pulse/pulseaudio.h>
#include "libavcodec/avcodec.h"
#include "avdevice.h"
pa_sample_format_t ff_codec_id_to_pulse_format(enum AVCodecID codec_id);
int ff_pulse_audio_get_devices(AVDeviceInfoList *devices, const char *server, int output);
int ff_pulse_audio_connect_context(pa_mainloop **pa_ml, pa_context **pa_ctx,
const char *server, const char *description);
void ff_pulse_audio_disconnect_context(pa_mainloop **pa_ml, pa_context **pa_ctx);
#endif /* AVDEVICE_PULSE_AUDIO_COMMON_H */

View File

@@ -0,0 +1,376 @@
/*
* Pulseaudio input
* Copyright (c) 2011 Luca Barbato <lu_zero@gentoo.org>
* Copyright 2004-2006 Lennart Poettering
* Copyright (c) 2014 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <pulse/rtclock.h>
#include <pulse/error.h>
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "pulse_audio_common.h"
#include "timefilter.h"
#define DEFAULT_CODEC_ID AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE)
typedef struct PulseData {
AVClass *class;
char *server;
char *name;
char *stream_name;
int sample_rate;
int channels;
int frame_size;
int fragment_size;
pa_threaded_mainloop *mainloop;
pa_context *context;
pa_stream *stream;
TimeFilter *timefilter;
int last_period;
int wallclock;
} PulseData;
#define CHECK_SUCCESS_GOTO(rerror, expression, label) \
do { \
if (!(expression)) { \
rerror = AVERROR_EXTERNAL; \
goto label; \
} \
} while (0)
#define CHECK_DEAD_GOTO(p, rerror, label) \
do { \
if (!(p)->context || !PA_CONTEXT_IS_GOOD(pa_context_get_state((p)->context)) || \
!(p)->stream || !PA_STREAM_IS_GOOD(pa_stream_get_state((p)->stream))) { \
rerror = AVERROR_EXTERNAL; \
goto label; \
} \
} while (0)
static void context_state_cb(pa_context *c, void *userdata) {
PulseData *p = userdata;
switch (pa_context_get_state(c)) {
case PA_CONTEXT_READY:
case PA_CONTEXT_TERMINATED:
case PA_CONTEXT_FAILED:
pa_threaded_mainloop_signal(p->mainloop, 0);
break;
}
}
static void stream_state_cb(pa_stream *s, void * userdata) {
PulseData *p = userdata;
switch (pa_stream_get_state(s)) {
case PA_STREAM_READY:
case PA_STREAM_FAILED:
case PA_STREAM_TERMINATED:
pa_threaded_mainloop_signal(p->mainloop, 0);
break;
}
}
static void stream_request_cb(pa_stream *s, size_t length, void *userdata) {
PulseData *p = userdata;
pa_threaded_mainloop_signal(p->mainloop, 0);
}
static void stream_latency_update_cb(pa_stream *s, void *userdata) {
PulseData *p = userdata;
pa_threaded_mainloop_signal(p->mainloop, 0);
}
static av_cold int pulse_close(AVFormatContext *s)
{
PulseData *pd = s->priv_data;
if (pd->mainloop)
pa_threaded_mainloop_stop(pd->mainloop);
if (pd->stream)
pa_stream_unref(pd->stream);
pd->stream = NULL;
if (pd->context) {
pa_context_disconnect(pd->context);
pa_context_unref(pd->context);
}
pd->context = NULL;
if (pd->mainloop)
pa_threaded_mainloop_free(pd->mainloop);
pd->mainloop = NULL;
ff_timefilter_destroy(pd->timefilter);
pd->timefilter = NULL;
return 0;
}
static av_cold int pulse_read_header(AVFormatContext *s)
{
PulseData *pd = s->priv_data;
AVStream *st;
char *device = NULL;
int ret;
enum AVCodecID codec_id =
s->audio_codec_id == AV_CODEC_ID_NONE ? DEFAULT_CODEC_ID : s->audio_codec_id;
const pa_sample_spec ss = { ff_codec_id_to_pulse_format(codec_id),
pd->sample_rate,
pd->channels };
pa_buffer_attr attr = { -1 };
st = avformat_new_stream(s, NULL);
if (!st) {
av_log(s, AV_LOG_ERROR, "Cannot add stream\n");
return AVERROR(ENOMEM);
}
attr.fragsize = pd->fragment_size;
if (s->filename[0] != '\0' && strcmp(s->filename, "default"))
device = s->filename;
if (!(pd->mainloop = pa_threaded_mainloop_new())) {
pulse_close(s);
return AVERROR_EXTERNAL;
}
if (!(pd->context = pa_context_new(pa_threaded_mainloop_get_api(pd->mainloop), pd->name))) {
pulse_close(s);
return AVERROR_EXTERNAL;
}
pa_context_set_state_callback(pd->context, context_state_cb, pd);
if (pa_context_connect(pd->context, pd->server, 0, NULL) < 0) {
pulse_close(s);
return AVERROR(pa_context_errno(pd->context));
}
pa_threaded_mainloop_lock(pd->mainloop);
if (pa_threaded_mainloop_start(pd->mainloop) < 0) {
ret = -1;
goto unlock_and_fail;
}
for (;;) {
pa_context_state_t state;
state = pa_context_get_state(pd->context);
if (state == PA_CONTEXT_READY)
break;
if (!PA_CONTEXT_IS_GOOD(state)) {
ret = AVERROR(pa_context_errno(pd->context));
goto unlock_and_fail;
}
/* Wait until the context is ready */
pa_threaded_mainloop_wait(pd->mainloop);
}
if (!(pd->stream = pa_stream_new(pd->context, pd->stream_name, &ss, NULL))) {
ret = AVERROR(pa_context_errno(pd->context));
goto unlock_and_fail;
}
pa_stream_set_state_callback(pd->stream, stream_state_cb, pd);
pa_stream_set_read_callback(pd->stream, stream_request_cb, pd);
pa_stream_set_write_callback(pd->stream, stream_request_cb, pd);
pa_stream_set_latency_update_callback(pd->stream, stream_latency_update_cb, pd);
ret = pa_stream_connect_record(pd->stream, device, &attr,
PA_STREAM_INTERPOLATE_TIMING
|PA_STREAM_ADJUST_LATENCY
|PA_STREAM_AUTO_TIMING_UPDATE);
if (ret < 0) {
ret = AVERROR(pa_context_errno(pd->context));
goto unlock_and_fail;
}
for (;;) {
pa_stream_state_t state;
state = pa_stream_get_state(pd->stream);
if (state == PA_STREAM_READY)
break;
if (!PA_STREAM_IS_GOOD(state)) {
ret = AVERROR(pa_context_errno(pd->context));
goto unlock_and_fail;
}
/* Wait until the stream is ready */
pa_threaded_mainloop_wait(pd->mainloop);
}
pa_threaded_mainloop_unlock(pd->mainloop);
/* take real parameters */
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = codec_id;
st->codec->sample_rate = pd->sample_rate;
st->codec->channels = pd->channels;
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
pd->timefilter = ff_timefilter_new(1000000.0 / pd->sample_rate,
1000, 1.5E-6);
if (!pd->timefilter) {
pulse_close(s);
return AVERROR(ENOMEM);
}
return 0;
unlock_and_fail:
pa_threaded_mainloop_unlock(pd->mainloop);
pulse_close(s);
return ret;
}
static int pulse_read_packet(AVFormatContext *s, AVPacket *pkt)
{
PulseData *pd = s->priv_data;
int ret;
size_t read_length;
const void *read_data = NULL;
int64_t dts;
pa_usec_t latency;
int negative;
pa_threaded_mainloop_lock(pd->mainloop);
CHECK_DEAD_GOTO(pd, ret, unlock_and_fail);
while (!read_data) {
int r;
r = pa_stream_peek(pd->stream, &read_data, &read_length);
CHECK_SUCCESS_GOTO(ret, r == 0, unlock_and_fail);
if (read_length <= 0) {
pa_threaded_mainloop_wait(pd->mainloop);
CHECK_DEAD_GOTO(pd, ret, unlock_and_fail);
} else if (!read_data) {
/* There's a hole in the stream, skip it. We could generate
* silence, but that wouldn't work for compressed streams. */
r = pa_stream_drop(pd->stream);
CHECK_SUCCESS_GOTO(ret, r == 0, unlock_and_fail);
}
}
if (av_new_packet(pkt, read_length) < 0) {
ret = AVERROR(ENOMEM);
goto unlock_and_fail;
}
dts = av_gettime();
pa_operation_unref(pa_stream_update_timing_info(pd->stream, NULL, NULL));
if (pa_stream_get_latency(pd->stream, &latency, &negative) >= 0) {
enum AVCodecID codec_id =
s->audio_codec_id == AV_CODEC_ID_NONE ? DEFAULT_CODEC_ID : s->audio_codec_id;
int frame_size = ((av_get_bits_per_sample(codec_id) >> 3) * pd->channels);
int frame_duration = read_length / frame_size;
if (negative) {
dts += latency;
} else
dts -= latency;
if (pd->wallclock)
pkt->pts = ff_timefilter_update(pd->timefilter, dts, pd->last_period);
pd->last_period = frame_duration;
} else {
av_log(s, AV_LOG_WARNING, "pa_stream_get_latency() failed\n");
}
memcpy(pkt->data, read_data, read_length);
pa_stream_drop(pd->stream);
pa_threaded_mainloop_unlock(pd->mainloop);
return 0;
unlock_and_fail:
pa_threaded_mainloop_unlock(pd->mainloop);
return ret;
}
static int pulse_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
{
PulseData *s = h->priv_data;
return ff_pulse_audio_get_devices(device_list, s->server, 0);
}
#define OFFSET(a) offsetof(PulseData, a)
#define D AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "server", "set PulseAudio server", OFFSET(server), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, D },
{ "name", "set application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = LIBAVFORMAT_IDENT}, 0, 0, D },
{ "stream_name", "set stream description", OFFSET(stream_name), AV_OPT_TYPE_STRING, {.str = "record"}, 0, 0, D },
{ "sample_rate", "set sample rate in Hz", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, D },
{ "channels", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, D },
{ "frame_size", "set number of bytes per frame", OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, D },
{ "fragment_size", "set buffering size, affects latency and cpu usage", OFFSET(fragment_size), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, D },
{ "wallclock", "set the initial pts using the current time", OFFSET(wallclock), AV_OPT_TYPE_INT, {.i64 = 1}, -1, 1, D },
{ NULL },
};
static const AVClass pulse_demuxer_class = {
.class_name = "Pulse demuxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_pulse_demuxer = {
.name = "pulse",
.long_name = NULL_IF_CONFIG_SMALL("Pulse audio input"),
.priv_data_size = sizeof(PulseData),
.read_header = pulse_read_header,
.read_packet = pulse_read_packet,
.read_close = pulse_close,
.get_device_list = pulse_get_device_list,
.flags = AVFMT_NOFILE,
.priv_class = &pulse_demuxer_class,
};

View File

@@ -0,0 +1,796 @@
/*
* Copyright (c) 2013 Lukasz Marek <lukasz.m.luki@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <math.h>
#include <pulse/pulseaudio.h>
#include <pulse/error.h>
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavutil/log.h"
#include "libavutil/attributes.h"
#include "pulse_audio_common.h"
typedef struct PulseData {
AVClass *class;
const char *server;
const char *name;
const char *stream_name;
const char *device;
int64_t timestamp;
int buffer_size; /**< Buffer size in bytes */
int buffer_duration; /**< Buffer size in ms, recalculated to buffer_size */
int prebuf;
int minreq;
int last_result;
pa_threaded_mainloop *mainloop;
pa_context *ctx;
pa_stream *stream;
int nonblocking;
int mute;
pa_volume_t base_volume;
pa_volume_t last_volume;
} PulseData;
static void pulse_audio_sink_device_cb(pa_context *ctx, const pa_sink_info *dev,
int eol, void *userdata)
{
PulseData *s = userdata;
if (s->ctx != ctx)
return;
if (eol) {
pa_threaded_mainloop_signal(s->mainloop, 0);
} else {
if (dev->flags & PA_SINK_FLAT_VOLUME)
s->base_volume = dev->base_volume;
else
s->base_volume = PA_VOLUME_NORM;
av_log(s, AV_LOG_DEBUG, "base volume: %u\n", s->base_volume);
}
}
/* Mainloop must be locked before calling this function as it uses pa_threaded_mainloop_wait. */
static int pulse_update_sink_info(AVFormatContext *h)
{
PulseData *s = h->priv_data;
pa_operation *op;
if (!(op = pa_context_get_sink_info_by_name(s->ctx, s->device,
pulse_audio_sink_device_cb, s))) {
av_log(s, AV_LOG_ERROR, "pa_context_get_sink_info_by_name failed.\n");
return AVERROR_EXTERNAL;
}
while (pa_operation_get_state(op) == PA_OPERATION_RUNNING)
pa_threaded_mainloop_wait(s->mainloop);
pa_operation_unref(op);
return 0;
}
static void pulse_audio_sink_input_cb(pa_context *ctx, const pa_sink_input_info *i,
int eol, void *userdata)
{
AVFormatContext *h = userdata;
PulseData *s = h->priv_data;
if (s->ctx != ctx)
return;
if (!eol) {
double val;
pa_volume_t vol = pa_cvolume_avg(&i->volume);
if (s->mute < 0 || (s->mute && !i->mute) || (!s->mute && i->mute)) {
s->mute = i->mute;
avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_MUTE_STATE_CHANGED, &s->mute, sizeof(s->mute));
}
vol = pa_sw_volume_divide(vol, s->base_volume);
if (s->last_volume != vol) {
val = (double)vol / PA_VOLUME_NORM;
avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED, &val, sizeof(val));
s->last_volume = vol;
}
}
}
/* This function creates new loop so may be called from PA callbacks.
Mainloop must be locked before calling this function as it operates on streams. */
static int pulse_update_sink_input_info(AVFormatContext *h)
{
PulseData *s = h->priv_data;
pa_operation *op;
enum pa_operation_state op_state;
pa_mainloop *ml = NULL;
pa_context *ctx = NULL;
int ret = 0;
if ((ret = ff_pulse_audio_connect_context(&ml, &ctx, s->server, "Update sink input information")) < 0)
return ret;
if (!(op = pa_context_get_sink_input_info(ctx, pa_stream_get_index(s->stream),
pulse_audio_sink_input_cb, h))) {
ret = AVERROR_EXTERNAL;
goto fail;
}
while ((op_state = pa_operation_get_state(op)) == PA_OPERATION_RUNNING)
pa_mainloop_iterate(ml, 1, NULL);
pa_operation_unref(op);
if (op_state != PA_OPERATION_DONE) {
ret = AVERROR_EXTERNAL;
goto fail;
}
fail:
ff_pulse_audio_disconnect_context(&ml, &ctx);
if (ret)
av_log(s, AV_LOG_ERROR, "pa_context_get_sink_input_info failed.\n");
return ret;
}
static void pulse_event(pa_context *ctx, pa_subscription_event_type_t t,
uint32_t idx, void *userdata)
{
AVFormatContext *h = userdata;
PulseData *s = h->priv_data;
if (s->ctx != ctx)
return;
if ((t & PA_SUBSCRIPTION_EVENT_FACILITY_MASK) == PA_SUBSCRIPTION_EVENT_SINK_INPUT) {
if ((t & PA_SUBSCRIPTION_EVENT_TYPE_MASK) == PA_SUBSCRIPTION_EVENT_CHANGE)
// Calling from mainloop callback. No need to lock mainloop.
pulse_update_sink_input_info(h);
}
}
static void pulse_stream_writable(pa_stream *stream, size_t nbytes, void *userdata)
{
AVFormatContext *h = userdata;
PulseData *s = h->priv_data;
int64_t val = nbytes;
if (stream != s->stream)
return;
avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_WRITABLE, &val, sizeof(val));
pa_threaded_mainloop_signal(s->mainloop, 0);
}
static void pulse_overflow(pa_stream *stream, void *userdata)
{
AVFormatContext *h = userdata;
avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_OVERFLOW, NULL, 0);
}
static void pulse_underflow(pa_stream *stream, void *userdata)
{
AVFormatContext *h = userdata;
avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_UNDERFLOW, NULL, 0);
}
static void pulse_stream_state(pa_stream *stream, void *userdata)
{
PulseData *s = userdata;
if (stream != s->stream)
return;
switch (pa_stream_get_state(s->stream)) {
case PA_STREAM_READY:
case PA_STREAM_FAILED:
case PA_STREAM_TERMINATED:
pa_threaded_mainloop_signal(s->mainloop, 0);
default:
break;
}
}
static int pulse_stream_wait(PulseData *s)
{
pa_stream_state_t state;
while ((state = pa_stream_get_state(s->stream)) != PA_STREAM_READY) {
if (state == PA_STREAM_FAILED || state == PA_STREAM_TERMINATED)
return AVERROR_EXTERNAL;
pa_threaded_mainloop_wait(s->mainloop);
}
return 0;
}
static void pulse_context_state(pa_context *ctx, void *userdata)
{
PulseData *s = userdata;
if (s->ctx != ctx)
return;
switch (pa_context_get_state(ctx)) {
case PA_CONTEXT_READY:
case PA_CONTEXT_FAILED:
case PA_CONTEXT_TERMINATED:
pa_threaded_mainloop_signal(s->mainloop, 0);
default:
break;
}
}
static int pulse_context_wait(PulseData *s)
{
pa_context_state_t state;
while ((state = pa_context_get_state(s->ctx)) != PA_CONTEXT_READY) {
if (state == PA_CONTEXT_FAILED || state == PA_CONTEXT_TERMINATED)
return AVERROR_EXTERNAL;
pa_threaded_mainloop_wait(s->mainloop);
}
return 0;
}
static void pulse_stream_result(pa_stream *stream, int success, void *userdata)
{
PulseData *s = userdata;
if (stream != s->stream)
return;
s->last_result = success ? 0 : AVERROR_EXTERNAL;
pa_threaded_mainloop_signal(s->mainloop, 0);
}
static int pulse_finish_stream_operation(PulseData *s, pa_operation *op, const char *name)
{
if (!op) {
pa_threaded_mainloop_unlock(s->mainloop);
av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
return AVERROR_EXTERNAL;
}
s->last_result = 2;
while (s->last_result == 2)
pa_threaded_mainloop_wait(s->mainloop);
pa_operation_unref(op);
pa_threaded_mainloop_unlock(s->mainloop);
if (s->last_result != 0)
av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
return s->last_result;
}
static int pulse_set_pause(PulseData *s, int pause)
{
pa_operation *op;
pa_threaded_mainloop_lock(s->mainloop);
op = pa_stream_cork(s->stream, pause, pulse_stream_result, s);
return pulse_finish_stream_operation(s, op, "pa_stream_cork");
}
static int pulse_flash_stream(PulseData *s)
{
pa_operation *op;
pa_threaded_mainloop_lock(s->mainloop);
op = pa_stream_flush(s->stream, pulse_stream_result, s);
return pulse_finish_stream_operation(s, op, "pa_stream_flush");
}
static void pulse_context_result(pa_context *ctx, int success, void *userdata)
{
PulseData *s = userdata;
if (s->ctx != ctx)
return;
s->last_result = success ? 0 : AVERROR_EXTERNAL;
pa_threaded_mainloop_signal(s->mainloop, 0);
}
static int pulse_finish_context_operation(PulseData *s, pa_operation *op, const char *name)
{
if (!op) {
pa_threaded_mainloop_unlock(s->mainloop);
av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
return AVERROR_EXTERNAL;
}
s->last_result = 2;
while (s->last_result == 2)
pa_threaded_mainloop_wait(s->mainloop);
pa_operation_unref(op);
pa_threaded_mainloop_unlock(s->mainloop);
if (s->last_result != 0)
av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
return s->last_result;
}
static int pulse_set_mute(PulseData *s)
{
pa_operation *op;
pa_threaded_mainloop_lock(s->mainloop);
op = pa_context_set_sink_input_mute(s->ctx, pa_stream_get_index(s->stream),
s->mute, pulse_context_result, s);
return pulse_finish_context_operation(s, op, "pa_context_set_sink_input_mute");
}
static int pulse_set_volume(PulseData *s, double volume)
{
pa_operation *op;
pa_cvolume cvol;
pa_volume_t vol;
const pa_sample_spec *ss = pa_stream_get_sample_spec(s->stream);
vol = pa_sw_volume_multiply(lround(volume * PA_VOLUME_NORM), s->base_volume);
pa_cvolume_set(&cvol, ss->channels, PA_VOLUME_NORM);
pa_sw_cvolume_multiply_scalar(&cvol, &cvol, vol);
pa_threaded_mainloop_lock(s->mainloop);
op = pa_context_set_sink_input_volume(s->ctx, pa_stream_get_index(s->stream),
&cvol, pulse_context_result, s);
return pulse_finish_context_operation(s, op, "pa_context_set_sink_input_volume");
}
static int pulse_subscribe_events(PulseData *s)
{
pa_operation *op;
pa_threaded_mainloop_lock(s->mainloop);
op = pa_context_subscribe(s->ctx, PA_SUBSCRIPTION_MASK_SINK_INPUT, pulse_context_result, s);
return pulse_finish_context_operation(s, op, "pa_context_subscribe");
}
static void pulse_map_channels_to_pulse(int64_t channel_layout, pa_channel_map *channel_map)
{
channel_map->channels = 0;
if (channel_layout & AV_CH_FRONT_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT;
if (channel_layout & AV_CH_FRONT_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT;
if (channel_layout & AV_CH_FRONT_CENTER)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_CENTER;
if (channel_layout & AV_CH_LOW_FREQUENCY)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_LFE;
if (channel_layout & AV_CH_BACK_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_LEFT;
if (channel_layout & AV_CH_BACK_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_RIGHT;
if (channel_layout & AV_CH_FRONT_LEFT_OF_CENTER)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER;
if (channel_layout & AV_CH_FRONT_RIGHT_OF_CENTER)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER;
if (channel_layout & AV_CH_BACK_CENTER)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_CENTER;
if (channel_layout & AV_CH_SIDE_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_SIDE_LEFT;
if (channel_layout & AV_CH_SIDE_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_SIDE_RIGHT;
if (channel_layout & AV_CH_TOP_CENTER)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_CENTER;
if (channel_layout & AV_CH_TOP_FRONT_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_LEFT;
if (channel_layout & AV_CH_TOP_FRONT_CENTER)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_CENTER;
if (channel_layout & AV_CH_TOP_FRONT_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_RIGHT;
if (channel_layout & AV_CH_TOP_BACK_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_LEFT;
if (channel_layout & AV_CH_TOP_BACK_CENTER)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_CENTER;
if (channel_layout & AV_CH_TOP_BACK_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_RIGHT;
if (channel_layout & AV_CH_STEREO_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT;
if (channel_layout & AV_CH_STEREO_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT;
if (channel_layout & AV_CH_WIDE_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX0;
if (channel_layout & AV_CH_WIDE_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX1;
if (channel_layout & AV_CH_SURROUND_DIRECT_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX2;
if (channel_layout & AV_CH_SURROUND_DIRECT_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX3;
if (channel_layout & AV_CH_LOW_FREQUENCY_2)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_LFE;
}
static av_cold int pulse_write_trailer(AVFormatContext *h)
{
PulseData *s = h->priv_data;
if (s->mainloop) {
pa_threaded_mainloop_lock(s->mainloop);
if (s->stream) {
pa_stream_disconnect(s->stream);
pa_stream_set_state_callback(s->stream, NULL, NULL);
pa_stream_set_write_callback(s->stream, NULL, NULL);
pa_stream_set_overflow_callback(s->stream, NULL, NULL);
pa_stream_set_underflow_callback(s->stream, NULL, NULL);
pa_stream_unref(s->stream);
s->stream = NULL;
}
if (s->ctx) {
pa_context_disconnect(s->ctx);
pa_context_set_state_callback(s->ctx, NULL, NULL);
pa_context_set_subscribe_callback(s->ctx, NULL, NULL);
pa_context_unref(s->ctx);
s->ctx = NULL;
}
pa_threaded_mainloop_unlock(s->mainloop);
pa_threaded_mainloop_stop(s->mainloop);
pa_threaded_mainloop_free(s->mainloop);
s->mainloop = NULL;
}
return 0;
}
static av_cold int pulse_write_header(AVFormatContext *h)
{
PulseData *s = h->priv_data;
AVStream *st = NULL;
int ret;
pa_sample_spec sample_spec;
pa_buffer_attr buffer_attributes = { -1, -1, -1, -1, -1 };
pa_channel_map channel_map;
pa_mainloop_api *mainloop_api;
const char *stream_name = s->stream_name;
static const pa_stream_flags_t stream_flags = PA_STREAM_INTERPOLATE_TIMING |
PA_STREAM_AUTO_TIMING_UPDATE |
PA_STREAM_NOT_MONOTONIC;
if (h->nb_streams != 1 || h->streams[0]->codec->codec_type != AVMEDIA_TYPE_AUDIO) {
av_log(s, AV_LOG_ERROR, "Only a single audio stream is supported.\n");
return AVERROR(EINVAL);
}
st = h->streams[0];
if (!stream_name) {
if (h->filename[0])
stream_name = h->filename;
else
stream_name = "Playback";
}
s->nonblocking = (h->flags & AVFMT_FLAG_NONBLOCK);
if (s->buffer_duration) {
int64_t bytes = s->buffer_duration;
bytes *= st->codec->channels * st->codec->sample_rate *
av_get_bytes_per_sample(st->codec->sample_fmt);
bytes /= 1000;
buffer_attributes.tlength = FFMAX(s->buffer_size, av_clip64(bytes, 0, UINT32_MAX - 1));
av_log(s, AV_LOG_DEBUG,
"Buffer duration: %ums recalculated into %"PRId64" bytes buffer.\n",
s->buffer_duration, bytes);
av_log(s, AV_LOG_DEBUG, "Real buffer length is %u bytes\n", buffer_attributes.tlength);
} else if (s->buffer_size)
buffer_attributes.tlength = s->buffer_size;
if (s->prebuf)
buffer_attributes.prebuf = s->prebuf;
if (s->minreq)
buffer_attributes.minreq = s->minreq;
sample_spec.format = ff_codec_id_to_pulse_format(st->codec->codec_id);
sample_spec.rate = st->codec->sample_rate;
sample_spec.channels = st->codec->channels;
if (!pa_sample_spec_valid(&sample_spec)) {
av_log(s, AV_LOG_ERROR, "Invalid sample spec.\n");
return AVERROR(EINVAL);
}
if (sample_spec.channels == 1) {
channel_map.channels = 1;
channel_map.map[0] = PA_CHANNEL_POSITION_MONO;
} else if (st->codec->channel_layout) {
if (av_get_channel_layout_nb_channels(st->codec->channel_layout) != st->codec->channels)
return AVERROR(EINVAL);
pulse_map_channels_to_pulse(st->codec->channel_layout, &channel_map);
/* Unknown channel is present in channel_layout, let PulseAudio use its default. */
if (channel_map.channels != sample_spec.channels) {
av_log(s, AV_LOG_WARNING, "Unknown channel. Using defaul channel map.\n");
channel_map.channels = 0;
}
} else
channel_map.channels = 0;
if (!channel_map.channels)
av_log(s, AV_LOG_WARNING, "Using PulseAudio's default channel map.\n");
else if (!pa_channel_map_valid(&channel_map)) {
av_log(s, AV_LOG_ERROR, "Invalid channel map.\n");
return AVERROR(EINVAL);
}
/* start main loop */
s->mainloop = pa_threaded_mainloop_new();
if (!s->mainloop) {
av_log(s, AV_LOG_ERROR, "Cannot create threaded mainloop.\n");
return AVERROR(ENOMEM);
}
if ((ret = pa_threaded_mainloop_start(s->mainloop)) < 0) {
av_log(s, AV_LOG_ERROR, "Cannot start threaded mainloop: %s.\n", pa_strerror(ret));
pa_threaded_mainloop_free(s->mainloop);
s->mainloop = NULL;
return AVERROR_EXTERNAL;
}
pa_threaded_mainloop_lock(s->mainloop);
mainloop_api = pa_threaded_mainloop_get_api(s->mainloop);
if (!mainloop_api) {
av_log(s, AV_LOG_ERROR, "Cannot get mainloop API.\n");
ret = AVERROR_EXTERNAL;
goto fail;
}
s->ctx = pa_context_new(mainloop_api, s->name);
if (!s->ctx) {
av_log(s, AV_LOG_ERROR, "Cannot create context.\n");
ret = AVERROR(ENOMEM);
goto fail;
}
pa_context_set_state_callback(s->ctx, pulse_context_state, s);
pa_context_set_subscribe_callback(s->ctx, pulse_event, h);
if ((ret = pa_context_connect(s->ctx, s->server, 0, NULL)) < 0) {
av_log(s, AV_LOG_ERROR, "Cannot connect context: %s.\n", pa_strerror(ret));
ret = AVERROR_EXTERNAL;
goto fail;
}
if ((ret = pulse_context_wait(s)) < 0) {
av_log(s, AV_LOG_ERROR, "Context failed.\n");
goto fail;
}
s->stream = pa_stream_new(s->ctx, stream_name, &sample_spec,
channel_map.channels ? &channel_map : NULL);
if ((ret = pulse_update_sink_info(h)) < 0) {
av_log(s, AV_LOG_ERROR, "Updating sink info failed.\n");
goto fail;
}
if (!s->stream) {
av_log(s, AV_LOG_ERROR, "Cannot create stream.\n");
ret = AVERROR(ENOMEM);
goto fail;
}
pa_stream_set_state_callback(s->stream, pulse_stream_state, s);
pa_stream_set_write_callback(s->stream, pulse_stream_writable, h);
pa_stream_set_overflow_callback(s->stream, pulse_overflow, h);
pa_stream_set_underflow_callback(s->stream, pulse_underflow, h);
if ((ret = pa_stream_connect_playback(s->stream, s->device, &buffer_attributes,
stream_flags, NULL, NULL)) < 0) {
av_log(s, AV_LOG_ERROR, "pa_stream_connect_playback failed: %s.\n", pa_strerror(ret));
ret = AVERROR_EXTERNAL;
goto fail;
}
if ((ret = pulse_stream_wait(s)) < 0) {
av_log(s, AV_LOG_ERROR, "Stream failed.\n");
goto fail;
}
/* read back buffer attributes for future use */
buffer_attributes = *pa_stream_get_buffer_attr(s->stream);
s->buffer_size = buffer_attributes.tlength;
s->prebuf = buffer_attributes.prebuf;
s->minreq = buffer_attributes.minreq;
av_log(s, AV_LOG_DEBUG, "Real buffer attributes: size: %d, prebuf: %d, minreq: %d\n",
s->buffer_size, s->prebuf, s->minreq);
pa_threaded_mainloop_unlock(s->mainloop);
if ((ret = pulse_subscribe_events(s)) < 0) {
av_log(s, AV_LOG_ERROR, "Event subscription failed.\n");
/* a bit ugly but the simplest to lock here*/
pa_threaded_mainloop_lock(s->mainloop);
goto fail;
}
/* force control messages */
s->mute = -1;
s->last_volume = PA_VOLUME_INVALID;
pa_threaded_mainloop_lock(s->mainloop);
if ((ret = pulse_update_sink_input_info(h)) < 0) {
av_log(s, AV_LOG_ERROR, "Updating sink input info failed.\n");
goto fail;
}
pa_threaded_mainloop_unlock(s->mainloop);
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
return 0;
fail:
pa_threaded_mainloop_unlock(s->mainloop);
pulse_write_trailer(h);
return ret;
}
static int pulse_write_packet(AVFormatContext *h, AVPacket *pkt)
{
PulseData *s = h->priv_data;
int ret;
int64_t writable_size;
if (!pkt)
return pulse_flash_stream(s);
if (pkt->dts != AV_NOPTS_VALUE)
s->timestamp = pkt->dts;
if (pkt->duration) {
s->timestamp += pkt->duration;
} else {
AVStream *st = h->streams[0];
AVCodecContext *codec_ctx = st->codec;
AVRational r = { 1, codec_ctx->sample_rate };
int64_t samples = pkt->size / (av_get_bytes_per_sample(codec_ctx->sample_fmt) * codec_ctx->channels);
s->timestamp += av_rescale_q(samples, r, st->time_base);
}
pa_threaded_mainloop_lock(s->mainloop);
if (!PA_STREAM_IS_GOOD(pa_stream_get_state(s->stream))) {
av_log(s, AV_LOG_ERROR, "PulseAudio stream is in invalid state.\n");
goto fail;
}
while (pa_stream_writable_size(s->stream) < s->minreq) {
if (s->nonblocking) {
pa_threaded_mainloop_unlock(s->mainloop);
return AVERROR(EAGAIN);
} else
pa_threaded_mainloop_wait(s->mainloop);
}
if ((ret = pa_stream_write(s->stream, pkt->data, pkt->size, NULL, 0, PA_SEEK_RELATIVE)) < 0) {
av_log(s, AV_LOG_ERROR, "pa_stream_write failed: %s\n", pa_strerror(ret));
goto fail;
}
if ((writable_size = pa_stream_writable_size(s->stream)) >= s->minreq)
avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_WRITABLE, &writable_size, sizeof(writable_size));
pa_threaded_mainloop_unlock(s->mainloop);
return 0;
fail:
pa_threaded_mainloop_unlock(s->mainloop);
return AVERROR_EXTERNAL;
}
static int pulse_write_frame(AVFormatContext *h, int stream_index,
AVFrame **frame, unsigned flags)
{
AVPacket pkt;
/* Planar formats are not supported yet. */
if (flags & AV_WRITE_UNCODED_FRAME_QUERY)
return av_sample_fmt_is_planar(h->streams[stream_index]->codec->sample_fmt) ?
AVERROR(EINVAL) : 0;
pkt.data = (*frame)->data[0];
pkt.size = (*frame)->nb_samples * av_get_bytes_per_sample((*frame)->format) * av_frame_get_channels(*frame);
pkt.dts = (*frame)->pkt_dts;
pkt.duration = av_frame_get_pkt_duration(*frame);
return pulse_write_packet(h, &pkt);
}
static void pulse_get_output_timestamp(AVFormatContext *h, int stream, int64_t *dts, int64_t *wall)
{
PulseData *s = h->priv_data;
pa_usec_t latency;
int neg;
pa_threaded_mainloop_lock(s->mainloop);
pa_stream_get_latency(s->stream, &latency, &neg);
pa_threaded_mainloop_unlock(s->mainloop);
if (wall)
*wall = av_gettime();
if (dts)
*dts = s->timestamp - (neg ? -latency : latency);
}
static int pulse_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
{
PulseData *s = h->priv_data;
return ff_pulse_audio_get_devices(device_list, s->server, 1);
}
static int pulse_control_message(AVFormatContext *h, int type,
void *data, size_t data_size)
{
PulseData *s = h->priv_data;
int ret;
switch(type) {
case AV_APP_TO_DEV_PAUSE:
return pulse_set_pause(s, 1);
case AV_APP_TO_DEV_PLAY:
return pulse_set_pause(s, 0);
case AV_APP_TO_DEV_TOGGLE_PAUSE:
return pulse_set_pause(s, !pa_stream_is_corked(s->stream));
case AV_APP_TO_DEV_MUTE:
if (!s->mute) {
s->mute = 1;
return pulse_set_mute(s);
}
return 0;
case AV_APP_TO_DEV_UNMUTE:
if (s->mute) {
s->mute = 0;
return pulse_set_mute(s);
}
return 0;
case AV_APP_TO_DEV_TOGGLE_MUTE:
s->mute = !s->mute;
return pulse_set_mute(s);
case AV_APP_TO_DEV_SET_VOLUME:
return pulse_set_volume(s, *(double *)data);
case AV_APP_TO_DEV_GET_VOLUME:
s->last_volume = PA_VOLUME_INVALID;
pa_threaded_mainloop_lock(s->mainloop);
ret = pulse_update_sink_input_info(h);
pa_threaded_mainloop_unlock(s->mainloop);
return ret;
case AV_APP_TO_DEV_GET_MUTE:
s->mute = -1;
pa_threaded_mainloop_lock(s->mainloop);
ret = pulse_update_sink_input_info(h);
pa_threaded_mainloop_unlock(s->mainloop);
return ret;
default:
break;
}
return AVERROR(ENOSYS);
}
#define OFFSET(a) offsetof(PulseData, a)
#define E AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "server", "set PulseAudio server", OFFSET(server), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
{ "name", "set application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = LIBAVFORMAT_IDENT}, 0, 0, E },
{ "stream_name", "set stream description", OFFSET(stream_name), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
{ "device", "set device name", OFFSET(device), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
{ "buffer_size", "set buffer size in bytes", OFFSET(buffer_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
{ "buffer_duration", "set buffer duration in millisecs", OFFSET(buffer_duration), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
{ "prebuf", "set pre-buffering size", OFFSET(prebuf), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
{ "minreq", "set minimum request size", OFFSET(minreq), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
{ NULL }
};
static const AVClass pulse_muxer_class = {
.class_name = "PulseAudio muxer",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
};
AVOutputFormat ff_pulse_muxer = {
.name = "pulse",
.long_name = NULL_IF_CONFIG_SMALL("Pulse audio output"),
.priv_data_size = sizeof(PulseData),
.audio_codec = AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE),
.video_codec = AV_CODEC_ID_NONE,
.write_header = pulse_write_header,
.write_packet = pulse_write_packet,
.write_uncoded_frame = pulse_write_frame,
.write_trailer = pulse_write_trailer,
.get_output_timestamp = pulse_get_output_timestamp,
.get_device_list = pulse_get_device_list,
.control_message = pulse_control_message,
.flags = AVFMT_NOFILE | AVFMT_ALLOW_FLUSH,
.priv_class = &pulse_muxer_class,
};

View File

@@ -0,0 +1,362 @@
/*
* QTKit input device
* Copyright (c) 2013 Vadim Kalinsky <vadim@kalinsky.ru>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* QTKit input device
* @author Vadim Kalinsky <vadim@kalinsky.ru>
*/
#if defined(__clang__)
#pragma clang diagnostic ignored "-Wdeprecated-declarations"
#endif
#import <QTKit/QTKit.h>
#include <pthread.h>
#include "libavutil/pixdesc.h"
#include "libavutil/opt.h"
#include "libavformat/internal.h"
#include "libavutil/internal.h"
#include "libavutil/time.h"
#include "avdevice.h"
#define QTKIT_TIMEBASE 100
static const AVRational kQTKitTimeBase_q = {
.num = 1,
.den = QTKIT_TIMEBASE
};
typedef struct
{
AVClass* class;
float frame_rate;
int frames_captured;
int64_t first_pts;
pthread_mutex_t frame_lock;
pthread_cond_t frame_wait_cond;
id qt_delegate;
int list_devices;
int video_device_index;
QTCaptureSession* capture_session;
QTCaptureDecompressedVideoOutput* video_output;
CVImageBufferRef current_frame;
} CaptureContext;
static void lock_frames(CaptureContext* ctx)
{
pthread_mutex_lock(&ctx->frame_lock);
}
static void unlock_frames(CaptureContext* ctx)
{
pthread_mutex_unlock(&ctx->frame_lock);
}
/** FrameReciever class - delegate for QTCaptureSession
*/
@interface FFMPEG_FrameReceiver : NSObject
{
CaptureContext* _context;
}
- (id)initWithContext:(CaptureContext*)context;
- (void)captureOutput:(QTCaptureOutput *)captureOutput
didOutputVideoFrame:(CVImageBufferRef)videoFrame
withSampleBuffer:(QTSampleBuffer *)sampleBuffer
fromConnection:(QTCaptureConnection *)connection;
@end
@implementation FFMPEG_FrameReceiver
- (id)initWithContext:(CaptureContext*)context
{
if (self = [super init]) {
_context = context;
}
return self;
}
- (void)captureOutput:(QTCaptureOutput *)captureOutput
didOutputVideoFrame:(CVImageBufferRef)videoFrame
withSampleBuffer:(QTSampleBuffer *)sampleBuffer
fromConnection:(QTCaptureConnection *)connection
{
lock_frames(_context);
if (_context->current_frame != nil) {
CVBufferRelease(_context->current_frame);
}
_context->current_frame = CVBufferRetain(videoFrame);
pthread_cond_signal(&_context->frame_wait_cond);
unlock_frames(_context);
++_context->frames_captured;
}
@end
static void destroy_context(CaptureContext* ctx)
{
[ctx->capture_session stopRunning];
[ctx->capture_session release];
[ctx->video_output release];
[ctx->qt_delegate release];
ctx->capture_session = NULL;
ctx->video_output = NULL;
ctx->qt_delegate = NULL;
pthread_mutex_destroy(&ctx->frame_lock);
pthread_cond_destroy(&ctx->frame_wait_cond);
if (ctx->current_frame)
CVBufferRelease(ctx->current_frame);
}
static int qtkit_read_header(AVFormatContext *s)
{
NSAutoreleasePool* pool = [[NSAutoreleasePool alloc] init];
CaptureContext* ctx = (CaptureContext*)s->priv_data;
ctx->first_pts = av_gettime();
pthread_mutex_init(&ctx->frame_lock, NULL);
pthread_cond_init(&ctx->frame_wait_cond, NULL);
// List devices if requested
if (ctx->list_devices) {
av_log(ctx, AV_LOG_INFO, "QTKit video devices:\n");
NSArray *devices = [QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo];
for (QTCaptureDevice *device in devices) {
const char *name = [[device localizedDisplayName] UTF8String];
int index = [devices indexOfObject:device];
av_log(ctx, AV_LOG_INFO, "[%d] %s\n", index, name);
}
goto fail;
}
// Find capture device
QTCaptureDevice *video_device = nil;
// check for device index given in filename
if (ctx->video_device_index == -1) {
sscanf(s->filename, "%d", &ctx->video_device_index);
}
if (ctx->video_device_index >= 0) {
NSArray *devices = [QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo];
if (ctx->video_device_index >= [devices count]) {
av_log(ctx, AV_LOG_ERROR, "Invalid device index\n");
goto fail;
}
video_device = [devices objectAtIndex:ctx->video_device_index];
} else if (strncmp(s->filename, "", 1) &&
strncmp(s->filename, "default", 7)) {
NSArray *devices = [QTCaptureDevice inputDevicesWithMediaType:QTMediaTypeVideo];
for (QTCaptureDevice *device in devices) {
if (!strncmp(s->filename, [[device localizedDisplayName] UTF8String], strlen(s->filename))) {
video_device = device;
break;
}
}
if (!video_device) {
av_log(ctx, AV_LOG_ERROR, "Video device not found\n");
goto fail;
}
} else {
video_device = [QTCaptureDevice defaultInputDeviceWithMediaType:QTMediaTypeMuxed];
}
BOOL success = [video_device open:nil];
// Video capture device not found, looking for QTMediaTypeVideo
if (!success) {
video_device = [QTCaptureDevice defaultInputDeviceWithMediaType:QTMediaTypeVideo];
success = [video_device open:nil];
if (!success) {
av_log(s, AV_LOG_ERROR, "No QT capture device found\n");
goto fail;
}
}
NSString* dev_display_name = [video_device localizedDisplayName];
av_log (s, AV_LOG_DEBUG, "'%s' opened\n", [dev_display_name UTF8String]);
// Initialize capture session
ctx->capture_session = [[QTCaptureSession alloc] init];
QTCaptureDeviceInput* capture_dev_input = [[[QTCaptureDeviceInput alloc] initWithDevice:video_device] autorelease];
success = [ctx->capture_session addInput:capture_dev_input error:nil];
if (!success) {
av_log (s, AV_LOG_ERROR, "Failed to add QT capture device to session\n");
goto fail;
}
// Attaching output
// FIXME: Allow for a user defined pixel format
ctx->video_output = [[QTCaptureDecompressedVideoOutput alloc] init];
NSDictionary *captureDictionary = [NSDictionary dictionaryWithObject:
[NSNumber numberWithUnsignedInt:kCVPixelFormatType_24RGB]
forKey:(id)kCVPixelBufferPixelFormatTypeKey];
[ctx->video_output setPixelBufferAttributes:captureDictionary];
ctx->qt_delegate = [[FFMPEG_FrameReceiver alloc] initWithContext:ctx];
[ctx->video_output setDelegate:ctx->qt_delegate];
[ctx->video_output setAutomaticallyDropsLateVideoFrames:YES];
[ctx->video_output setMinimumVideoFrameInterval:1.0/ctx->frame_rate];
success = [ctx->capture_session addOutput:ctx->video_output error:nil];
if (!success) {
av_log (s, AV_LOG_ERROR, "can't add video output to capture session\n");
goto fail;
}
[ctx->capture_session startRunning];
// Take stream info from the first frame.
while (ctx->frames_captured < 1) {
CFRunLoopRunInMode(kCFRunLoopDefaultMode, 0.1, YES);
}
lock_frames(ctx);
AVStream* stream = avformat_new_stream(s, NULL);
if (!stream) {
goto fail;
}
avpriv_set_pts_info(stream, 64, 1, QTKIT_TIMEBASE);
stream->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
stream->codec->codec_type = AVMEDIA_TYPE_VIDEO;
stream->codec->width = (int)CVPixelBufferGetWidth (ctx->current_frame);
stream->codec->height = (int)CVPixelBufferGetHeight(ctx->current_frame);
stream->codec->pix_fmt = AV_PIX_FMT_RGB24;
CVBufferRelease(ctx->current_frame);
ctx->current_frame = nil;
unlock_frames(ctx);
[pool release];
return 0;
fail:
[pool release];
destroy_context(ctx);
return AVERROR(EIO);
}
static int qtkit_read_packet(AVFormatContext *s, AVPacket *pkt)
{
CaptureContext* ctx = (CaptureContext*)s->priv_data;
do {
lock_frames(ctx);
if (ctx->current_frame != nil) {
if (av_new_packet(pkt, (int)CVPixelBufferGetDataSize(ctx->current_frame)) < 0) {
return AVERROR(EIO);
}
pkt->pts = pkt->dts = av_rescale_q(av_gettime() - ctx->first_pts, AV_TIME_BASE_Q, kQTKitTimeBase_q);
pkt->stream_index = 0;
pkt->flags |= AV_PKT_FLAG_KEY;
CVPixelBufferLockBaseAddress(ctx->current_frame, 0);
void* data = CVPixelBufferGetBaseAddress(ctx->current_frame);
memcpy(pkt->data, data, pkt->size);
CVPixelBufferUnlockBaseAddress(ctx->current_frame, 0);
CVBufferRelease(ctx->current_frame);
ctx->current_frame = nil;
} else {
pkt->data = NULL;
pthread_cond_wait(&ctx->frame_wait_cond, &ctx->frame_lock);
}
unlock_frames(ctx);
} while (!pkt->data);
return 0;
}
static int qtkit_close(AVFormatContext *s)
{
CaptureContext* ctx = (CaptureContext*)s->priv_data;
destroy_context(ctx);
return 0;
}
static const AVOption options[] = {
{ "frame_rate", "set frame rate", offsetof(CaptureContext, frame_rate), AV_OPT_TYPE_FLOAT, { .dbl = 30.0 }, 0.1, 30.0, AV_OPT_TYPE_VIDEO_RATE, NULL },
{ "list_devices", "list available devices", offsetof(CaptureContext, list_devices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
{ "true", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
{ "false", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
{ "video_device_index", "select video device by index for devices with same name (starts at 0)", offsetof(CaptureContext, video_device_index), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass qtkit_class = {
.class_name = "QTKit input device",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_qtkit_demuxer = {
.name = "qtkit",
.long_name = NULL_IF_CONFIG_SMALL("QTKit input device"),
.priv_data_size = sizeof(CaptureContext),
.read_header = qtkit_read_header,
.read_packet = qtkit_read_packet,
.read_close = qtkit_close,
.flags = AVFMT_NOFILE,
.priv_class = &qtkit_class,
};

View File

@@ -0,0 +1,375 @@
/*
* Copyright (c) 2011 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* libSDL output device
*/
#include <SDL.h>
#include <SDL_thread.h>
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/time.h"
#include "avdevice.h"
typedef struct {
AVClass *class;
SDL_Surface *surface;
SDL_Overlay *overlay;
char *window_title;
char *icon_title;
int window_width, window_height; /**< size of the window */
int window_fullscreen;
SDL_Rect overlay_rect;
int overlay_fmt;
int sdl_was_already_inited;
SDL_Thread *event_thread;
SDL_mutex *mutex;
SDL_cond *init_cond;
int init_ret; /* return code used to signal initialization errors */
int inited;
int quit;
} SDLContext;
static const struct sdl_overlay_pix_fmt_entry {
enum AVPixelFormat pix_fmt; int overlay_fmt;
} sdl_overlay_pix_fmt_map[] = {
{ AV_PIX_FMT_YUV420P, SDL_IYUV_OVERLAY },
{ AV_PIX_FMT_YUYV422, SDL_YUY2_OVERLAY },
{ AV_PIX_FMT_UYVY422, SDL_UYVY_OVERLAY },
{ AV_PIX_FMT_NONE, 0 },
};
static int sdl_write_trailer(AVFormatContext *s)
{
SDLContext *sdl = s->priv_data;
sdl->quit = 1;
if (sdl->overlay)
SDL_FreeYUVOverlay(sdl->overlay);
sdl->overlay = NULL;
if (sdl->event_thread)
SDL_WaitThread(sdl->event_thread, NULL);
sdl->event_thread = NULL;
if (sdl->mutex)
SDL_DestroyMutex(sdl->mutex);
sdl->mutex = NULL;
if (sdl->init_cond)
SDL_DestroyCond(sdl->init_cond);
sdl->init_cond = NULL;
if (!sdl->sdl_was_already_inited)
SDL_Quit();
return 0;
}
static void compute_overlay_rect(AVFormatContext *s)
{
AVRational sar, dar; /* sample and display aspect ratios */
SDLContext *sdl = s->priv_data;
AVStream *st = s->streams[0];
AVCodecContext *encctx = st->codec;
SDL_Rect *overlay_rect = &sdl->overlay_rect;
/* compute overlay width and height from the codec context information */
sar = st->sample_aspect_ratio.num ? st->sample_aspect_ratio : (AVRational){ 1, 1 };
dar = av_mul_q(sar, (AVRational){ encctx->width, encctx->height });
/* we suppose the screen has a 1/1 sample aspect ratio */
if (sdl->window_width && sdl->window_height) {
/* fit in the window */
if (av_cmp_q(dar, (AVRational){ sdl->window_width, sdl->window_height }) > 0) {
/* fit in width */
overlay_rect->w = sdl->window_width;
overlay_rect->h = av_rescale(overlay_rect->w, dar.den, dar.num);
} else {
/* fit in height */
overlay_rect->h = sdl->window_height;
overlay_rect->w = av_rescale(overlay_rect->h, dar.num, dar.den);
}
} else {
if (sar.num > sar.den) {
overlay_rect->w = encctx->width;
overlay_rect->h = av_rescale(overlay_rect->w, dar.den, dar.num);
} else {
overlay_rect->h = encctx->height;
overlay_rect->w = av_rescale(overlay_rect->h, dar.num, dar.den);
}
sdl->window_width = overlay_rect->w;
sdl->window_height = overlay_rect->h;
}
overlay_rect->x = (sdl->window_width - overlay_rect->w) / 2;
overlay_rect->y = (sdl->window_height - overlay_rect->h) / 2;
}
#define SDL_BASE_FLAGS (SDL_SWSURFACE|SDL_RESIZABLE)
static int event_thread(void *arg)
{
AVFormatContext *s = arg;
SDLContext *sdl = s->priv_data;
int flags = SDL_BASE_FLAGS | (sdl->window_fullscreen ? SDL_FULLSCREEN : 0);
AVStream *st = s->streams[0];
AVCodecContext *encctx = st->codec;
/* initialization */
if (SDL_Init(SDL_INIT_VIDEO) != 0) {
av_log(s, AV_LOG_ERROR, "Unable to initialize SDL: %s\n", SDL_GetError());
sdl->init_ret = AVERROR(EINVAL);
goto init_end;
}
SDL_WM_SetCaption(sdl->window_title, sdl->icon_title);
sdl->surface = SDL_SetVideoMode(sdl->window_width, sdl->window_height,
24, flags);
if (!sdl->surface) {
av_log(sdl, AV_LOG_ERROR, "Unable to set video mode: %s\n", SDL_GetError());
sdl->init_ret = AVERROR(EINVAL);
goto init_end;
}
sdl->overlay = SDL_CreateYUVOverlay(encctx->width, encctx->height,
sdl->overlay_fmt, sdl->surface);
if (!sdl->overlay || sdl->overlay->pitches[0] < encctx->width) {
av_log(s, AV_LOG_ERROR,
"SDL does not support an overlay with size of %dx%d pixels\n",
encctx->width, encctx->height);
sdl->init_ret = AVERROR(EINVAL);
goto init_end;
}
sdl->init_ret = 0;
av_log(s, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s -> w:%d h:%d\n",
encctx->width, encctx->height, av_get_pix_fmt_name(encctx->pix_fmt),
sdl->overlay_rect.w, sdl->overlay_rect.h);
init_end:
SDL_LockMutex(sdl->mutex);
sdl->inited = 1;
SDL_UnlockMutex(sdl->mutex);
SDL_CondSignal(sdl->init_cond);
if (sdl->init_ret < 0)
return sdl->init_ret;
/* event loop */
while (!sdl->quit) {
int ret;
SDL_Event event;
SDL_PumpEvents();
ret = SDL_PeepEvents(&event, 1, SDL_GETEVENT, SDL_ALLEVENTS);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Error when getting SDL event: %s\n", SDL_GetError());
continue;
}
if (ret == 0) {
SDL_Delay(10);
continue;
}
switch (event.type) {
case SDL_KEYDOWN:
switch (event.key.keysym.sym) {
case SDLK_ESCAPE:
case SDLK_q:
sdl->quit = 1;
break;
}
break;
case SDL_QUIT:
sdl->quit = 1;
break;
case SDL_VIDEORESIZE:
sdl->window_width = event.resize.w;
sdl->window_height = event.resize.h;
SDL_LockMutex(sdl->mutex);
sdl->surface = SDL_SetVideoMode(sdl->window_width, sdl->window_height, 24, SDL_BASE_FLAGS);
if (!sdl->surface) {
av_log(s, AV_LOG_ERROR, "Failed to set SDL video mode: %s\n", SDL_GetError());
sdl->quit = 1;
} else {
compute_overlay_rect(s);
}
SDL_UnlockMutex(sdl->mutex);
break;
default:
break;
}
}
return 0;
}
static int sdl_write_header(AVFormatContext *s)
{
SDLContext *sdl = s->priv_data;
AVStream *st = s->streams[0];
AVCodecContext *encctx = st->codec;
int i, ret;
if (!sdl->window_title)
sdl->window_title = av_strdup(s->filename);
if (!sdl->icon_title)
sdl->icon_title = av_strdup(sdl->window_title);
if (SDL_WasInit(SDL_INIT_VIDEO)) {
av_log(s, AV_LOG_ERROR,
"SDL video subsystem was already inited, aborting\n");
sdl->sdl_was_already_inited = 1;
ret = AVERROR(EINVAL);
goto fail;
}
if ( s->nb_streams > 1
|| encctx->codec_type != AVMEDIA_TYPE_VIDEO
|| encctx->codec_id != AV_CODEC_ID_RAWVIDEO) {
av_log(s, AV_LOG_ERROR, "Only supports one rawvideo stream\n");
ret = AVERROR(EINVAL);
goto fail;
}
for (i = 0; sdl_overlay_pix_fmt_map[i].pix_fmt != AV_PIX_FMT_NONE; i++) {
if (sdl_overlay_pix_fmt_map[i].pix_fmt == encctx->pix_fmt) {
sdl->overlay_fmt = sdl_overlay_pix_fmt_map[i].overlay_fmt;
break;
}
}
if (!sdl->overlay_fmt) {
av_log(s, AV_LOG_ERROR,
"Unsupported pixel format '%s', choose one of yuv420p, yuyv422, or uyvy422\n",
av_get_pix_fmt_name(encctx->pix_fmt));
ret = AVERROR(EINVAL);
goto fail;
}
/* compute overlay width and height from the codec context information */
compute_overlay_rect(s);
sdl->init_cond = SDL_CreateCond();
if (!sdl->init_cond) {
av_log(s, AV_LOG_ERROR, "Could not create SDL condition variable: %s\n", SDL_GetError());
ret = AVERROR_EXTERNAL;
goto fail;
}
sdl->mutex = SDL_CreateMutex();
if (!sdl->mutex) {
av_log(s, AV_LOG_ERROR, "Could not create SDL mutex: %s\n", SDL_GetError());
ret = AVERROR_EXTERNAL;
goto fail;
}
sdl->event_thread = SDL_CreateThread(event_thread, s);
if (!sdl->event_thread) {
av_log(s, AV_LOG_ERROR, "Could not create SDL event thread: %s\n", SDL_GetError());
ret = AVERROR_EXTERNAL;
goto fail;
}
/* wait until the video system has been inited */
SDL_LockMutex(sdl->mutex);
while (!sdl->inited) {
SDL_CondWait(sdl->init_cond, sdl->mutex);
}
SDL_UnlockMutex(sdl->mutex);
if (sdl->init_ret < 0) {
ret = sdl->init_ret;
goto fail;
}
return 0;
fail:
sdl_write_trailer(s);
return ret;
}
static int sdl_write_packet(AVFormatContext *s, AVPacket *pkt)
{
SDLContext *sdl = s->priv_data;
AVCodecContext *encctx = s->streams[0]->codec;
AVPicture pict;
int i;
if (sdl->quit) {
sdl_write_trailer(s);
return AVERROR(EIO);
}
avpicture_fill(&pict, pkt->data, encctx->pix_fmt, encctx->width, encctx->height);
SDL_LockMutex(sdl->mutex);
SDL_FillRect(sdl->surface, &sdl->surface->clip_rect,
SDL_MapRGB(sdl->surface->format, 0, 0, 0));
SDL_LockYUVOverlay(sdl->overlay);
for (i = 0; i < 3; i++) {
sdl->overlay->pixels [i] = pict.data [i];
sdl->overlay->pitches[i] = pict.linesize[i];
}
SDL_DisplayYUVOverlay(sdl->overlay, &sdl->overlay_rect);
SDL_UnlockYUVOverlay(sdl->overlay);
SDL_UpdateRect(sdl->surface,
sdl->overlay_rect.x, sdl->overlay_rect.y,
sdl->overlay_rect.w, sdl->overlay_rect.h);
SDL_UnlockMutex(sdl->mutex);
return 0;
}
#define OFFSET(x) offsetof(SDLContext,x)
static const AVOption options[] = {
{ "window_title", "set SDL window title", OFFSET(window_title), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "icon_title", "set SDL iconified window title", OFFSET(icon_title) , AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_size", "set SDL window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_fullscreen", "set SDL window fullscreen", OFFSET(window_fullscreen), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ NULL },
};
static const AVClass sdl_class = {
.class_name = "sdl outdev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
};
AVOutputFormat ff_sdl_muxer = {
.name = "sdl",
.long_name = NULL_IF_CONFIG_SMALL("SDL output device"),
.priv_data_size = sizeof(SDLContext),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = sdl_write_header,
.write_packet = sdl_write_packet,
.write_trailer = sdl_write_trailer,
.flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
.priv_class = &sdl_class,
};

View File

@@ -0,0 +1,120 @@
/*
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include <sndio.h>
#include "avdevice.h"
#include "libavdevice/sndio.h"
static inline void movecb(void *addr, int delta)
{
SndioData *s = addr;
s->hwpos += delta * s->channels * s->bps;
}
av_cold int ff_sndio_open(AVFormatContext *s1, int is_output,
const char *audio_device)
{
SndioData *s = s1->priv_data;
struct sio_hdl *hdl;
struct sio_par par;
hdl = sio_open(audio_device, is_output ? SIO_PLAY : SIO_REC, 0);
if (!hdl) {
av_log(s1, AV_LOG_ERROR, "Could not open sndio device\n");
return AVERROR(EIO);
}
sio_initpar(&par);
par.bits = 16;
par.sig = 1;
par.le = SIO_LE_NATIVE;
if (is_output)
par.pchan = s->channels;
else
par.rchan = s->channels;
par.rate = s->sample_rate;
if (!sio_setpar(hdl, &par) || !sio_getpar(hdl, &par)) {
av_log(s1, AV_LOG_ERROR, "Impossible to set sndio parameters, "
"channels: %d sample rate: %d\n", s->channels, s->sample_rate);
goto fail;
}
if (par.bits != 16 || par.sig != 1 ||
(is_output && (par.pchan != s->channels)) ||
(!is_output && (par.rchan != s->channels)) ||
(par.rate != s->sample_rate)) {
av_log(s1, AV_LOG_ERROR, "Could not set appropriate sndio parameters, "
"channels: %d sample rate: %d\n", s->channels, s->sample_rate);
goto fail;
}
s->buffer_size = par.round * par.bps *
(is_output ? par.pchan : par.rchan);
if (is_output) {
s->buffer = av_malloc(s->buffer_size);
if (!s->buffer) {
av_log(s1, AV_LOG_ERROR, "Could not allocate buffer\n");
goto fail;
}
}
s->codec_id = par.le ? AV_CODEC_ID_PCM_S16LE : AV_CODEC_ID_PCM_S16BE;
s->channels = is_output ? par.pchan : par.rchan;
s->sample_rate = par.rate;
s->bps = par.bps;
sio_onmove(hdl, movecb, s);
if (!sio_start(hdl)) {
av_log(s1, AV_LOG_ERROR, "Could not start sndio\n");
goto fail;
}
s->hdl = hdl;
return 0;
fail:
av_freep(&s->buffer);
if (hdl)
sio_close(hdl);
return AVERROR(EIO);
}
int ff_sndio_close(SndioData *s)
{
av_freep(&s->buffer);
if (s->hdl)
sio_close(s->hdl);
return 0;
}

View File

@@ -0,0 +1,48 @@
/*
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_SNDIO_H
#define AVDEVICE_SNDIO_H
#include <stdint.h>
#include <sndio.h>
#include "libavutil/log.h"
#include "avdevice.h"
typedef struct SndioData {
AVClass *class;
struct sio_hdl *hdl;
enum AVCodecID codec_id;
int64_t hwpos;
int64_t softpos;
uint8_t *buffer;
int bps;
int buffer_size;
int buffer_offset;
int channels;
int sample_rate;
} SndioData;
int ff_sndio_open(AVFormatContext *s1, int is_output, const char *audio_device);
int ff_sndio_close(SndioData *s);
#endif /* AVDEVICE_SNDIO_H */

View File

@@ -0,0 +1,121 @@
/*
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include <sndio.h>
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavdevice/sndio.h"
static av_cold int audio_read_header(AVFormatContext *s1)
{
SndioData *s = s1->priv_data;
AVStream *st;
int ret;
st = avformat_new_stream(s1, NULL);
if (!st)
return AVERROR(ENOMEM);
ret = ff_sndio_open(s1, 0, s1->filename);
if (ret < 0)
return ret;
/* take real parameters */
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_id = s->codec_id;
st->codec->sample_rate = s->sample_rate;
st->codec->channels = s->channels;
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
return 0;
}
static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
SndioData *s = s1->priv_data;
int64_t bdelay, cur_time;
int ret;
if ((ret = av_new_packet(pkt, s->buffer_size)) < 0)
return ret;
ret = sio_read(s->hdl, pkt->data, pkt->size);
if (ret == 0 || sio_eof(s->hdl)) {
av_free_packet(pkt);
return AVERROR_EOF;
}
pkt->size = ret;
s->softpos += ret;
/* compute pts of the start of the packet */
cur_time = av_gettime();
bdelay = ret + s->hwpos - s->softpos;
/* convert to pts */
pkt->pts = cur_time - ((bdelay * 1000000) /
(s->bps * s->channels * s->sample_rate));
return 0;
}
static av_cold int audio_read_close(AVFormatContext *s1)
{
SndioData *s = s1->priv_data;
ff_sndio_close(s);
return 0;
}
static const AVOption options[] = {
{ "sample_rate", "", offsetof(SndioData, sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "channels", "", offsetof(SndioData, channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass sndio_demuxer_class = {
.class_name = "sndio indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_sndio_demuxer = {
.name = "sndio",
.long_name = NULL_IF_CONFIG_SMALL("sndio audio capture"),
.priv_data_size = sizeof(SndioData),
.read_header = audio_read_header,
.read_packet = audio_read_packet,
.read_close = audio_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &sndio_demuxer_class,
};

View File

@@ -0,0 +1,103 @@
/*
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include <sndio.h>
#include "libavutil/internal.h"
#include "libavdevice/avdevice.h"
#include "libavdevice/sndio.h"
static av_cold int audio_write_header(AVFormatContext *s1)
{
SndioData *s = s1->priv_data;
AVStream *st;
int ret;
st = s1->streams[0];
s->sample_rate = st->codec->sample_rate;
s->channels = st->codec->channels;
ret = ff_sndio_open(s1, 1, s1->filename);
return ret;
}
static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
{
SndioData *s = s1->priv_data;
uint8_t *buf= pkt->data;
int size = pkt->size;
int len, ret;
while (size > 0) {
len = FFMIN(s->buffer_size - s->buffer_offset, size);
memcpy(s->buffer + s->buffer_offset, buf, len);
buf += len;
size -= len;
s->buffer_offset += len;
if (s->buffer_offset >= s->buffer_size) {
ret = sio_write(s->hdl, s->buffer, s->buffer_size);
if (ret == 0 || sio_eof(s->hdl))
return AVERROR(EIO);
s->softpos += ret;
s->buffer_offset = 0;
}
}
return 0;
}
static int audio_write_trailer(AVFormatContext *s1)
{
SndioData *s = s1->priv_data;
sio_write(s->hdl, s->buffer, s->buffer_offset);
ff_sndio_close(s);
return 0;
}
static const AVClass sndio_muxer_class = {
.class_name = "sndio outdev",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
};
AVOutputFormat ff_sndio_muxer = {
.name = "sndio",
.long_name = NULL_IF_CONFIG_SMALL("sndio audio playback"),
.priv_data_size = sizeof(SndioData),
/* XXX: we make the assumption that the soundcard accepts this format */
/* XXX: find better solution with "preinit" method, needed also in
other formats */
.audio_codec = AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE),
.video_codec = AV_CODEC_ID_NONE,
.write_header = audio_write_header,
.write_packet = audio_write_packet,
.write_trailer = audio_write_trailer,
.flags = AVFMT_NOFILE,
.priv_class = &sndio_muxer_class,
};

View File

@@ -0,0 +1,168 @@
/*
* Delay Locked Loop based time filter
* Copyright (c) 2009 Samalyse
* Copyright (c) 2009 Michael Niedermayer
* Author: Olivier Guilyardi <olivier samalyse com>
* Michael Niedermayer <michaelni gmx at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/common.h"
#include "libavutil/mem.h"
#include "config.h"
#include "timefilter.h"
struct TimeFilter {
// Delay Locked Loop data. These variables refer to mathematical
// concepts described in: http://www.kokkinizita.net/papers/usingdll.pdf
double cycle_time;
double feedback2_factor;
double feedback3_factor;
double clock_period;
int count;
};
/* 1 - exp(-x) using a 3-order power series */
static double qexpneg(double x)
{
return 1 - 1 / (1 + x * (1 + x / 2 * (1 + x / 3)));
}
TimeFilter *ff_timefilter_new(double time_base,
double period,
double bandwidth)
{
TimeFilter *self = av_mallocz(sizeof(TimeFilter));
double o = 2 * M_PI * bandwidth * period * time_base;
if (!self)
return NULL;
self->clock_period = time_base;
self->feedback2_factor = qexpneg(M_SQRT2 * o);
self->feedback3_factor = qexpneg(o * o) / period;
return self;
}
void ff_timefilter_destroy(TimeFilter *self)
{
av_freep(&self);
}
void ff_timefilter_reset(TimeFilter *self)
{
self->count = 0;
}
double ff_timefilter_update(TimeFilter *self, double system_time, double period)
{
self->count++;
if (self->count == 1) {
self->cycle_time = system_time;
} else {
double loop_error;
self->cycle_time += self->clock_period * period;
loop_error = system_time - self->cycle_time;
self->cycle_time += FFMAX(self->feedback2_factor, 1.0 / self->count) * loop_error;
self->clock_period += self->feedback3_factor * loop_error;
}
return self->cycle_time;
}
double ff_timefilter_eval(TimeFilter *self, double delta)
{
return self->cycle_time + self->clock_period * delta;
}
#ifdef TEST
#include "libavutil/lfg.h"
#define LFG_MAX ((1LL << 32) - 1)
int main(void)
{
AVLFG prng;
double n0, n1;
#define SAMPLES 1000
double ideal[SAMPLES];
double samples[SAMPLES];
double samplet[SAMPLES];
for (n0 = 0; n0 < 40; n0 = 2 * n0 + 1) {
for (n1 = 0; n1 < 10; n1 = 2 * n1 + 1) {
double best_error = 1000000000;
double bestpar0 = n0 ? 1 : 100000;
double bestpar1 = 1;
int better, i;
av_lfg_init(&prng, 123);
for (i = 0; i < SAMPLES; i++) {
samplet[i] = 10 + i + (av_lfg_get(&prng) < LFG_MAX/2 ? 0 : 0.999);
ideal[i] = samplet[i] + n1 * i / (1000);
samples[i] = ideal[i] + n0 * (av_lfg_get(&prng) - LFG_MAX / 2) / (LFG_MAX * 10LL);
if(i && samples[i]<samples[i-1])
samples[i]=samples[i-1]+0.001;
}
do {
double par0, par1;
better = 0;
for (par0 = bestpar0 * 0.8; par0 <= bestpar0 * 1.21; par0 += bestpar0 * 0.05) {
for (par1 = bestpar1 * 0.8; par1 <= bestpar1 * 1.21; par1 += bestpar1 * 0.05) {
double error = 0;
TimeFilter *tf = ff_timefilter_new(1, par0, par1);
if (!tf) {
printf("Could not allocate memory for timefilter.\n");
exit(1);
}
for (i = 0; i < SAMPLES; i++) {
double filtered;
filtered = ff_timefilter_update(tf, samples[i], i ? (samplet[i] - samplet[i-1]) : 1);
if(filtered < 0 || filtered > 1000000000)
printf("filter is unstable\n");
error += (filtered - ideal[i]) * (filtered - ideal[i]);
}
ff_timefilter_destroy(tf);
if (error < best_error) {
best_error = error;
bestpar0 = par0;
bestpar1 = par1;
better = 1;
}
}
}
} while (better);
#if 0
double lastfil = 9;
TimeFilter *tf = ff_timefilter_new(1, bestpar0, bestpar1);
for (i = 0; i < SAMPLES; i++) {
double filtered;
filtered = ff_timefilter_update(tf, samples[i], 1);
printf("%f %f %f %f\n", i - samples[i] + 10, filtered - samples[i],
samples[FFMAX(i, 1)] - samples[FFMAX(i - 1, 0)], filtered - lastfil);
lastfil = filtered;
}
ff_timefilter_destroy(tf);
#else
printf(" [%12f %11f %9f]", bestpar0, bestpar1, best_error);
#endif
}
printf("\n");
}
return 0;
}
#endif

View File

@@ -0,0 +1,110 @@
/*
* Delay Locked Loop based time filter prototypes and declarations
* Copyright (c) 2009 Samalyse
* Copyright (c) 2009 Michael Niedermayer
* Author: Olivier Guilyardi <olivier samalyse com>
* Michael Niedermayer <michaelni gmx at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_TIMEFILTER_H
#define AVDEVICE_TIMEFILTER_H
/**
* Opaque type representing a time filter state
*
* The purpose of this filter is to provide a way to compute accurate time
* stamps that can be compared to wall clock time, especially when dealing
* with two clocks: the system clock and a hardware device clock, such as
* a soundcard.
*/
typedef struct TimeFilter TimeFilter;
/**
* Create a new Delay Locked Loop time filter
*
* feedback2_factor and feedback3_factor are the factors used for the
* multiplications that are respectively performed in the second and third
* feedback paths of the loop.
*
* Unless you know what you are doing, you should set these as follow:
*
* o = 2 * M_PI * bandwidth * period_in_seconds
* feedback2_factor = sqrt(2) * o
* feedback3_factor = o * o
*
* Where bandwidth is up to you to choose. Smaller values will filter out more
* of the jitter, but also take a longer time for the loop to settle. A good
* starting point is something between 0.3 and 3 Hz.
*
* @param time_base period of the hardware clock in seconds
* (for example 1.0/44100)
* @param period expected update interval, in input units
* @param brandwidth filtering bandwidth, in Hz
*
* @return a pointer to a TimeFilter struct, or NULL on error
*
* For more details about these parameters and background concepts please see:
* http://www.kokkinizita.net/papers/usingdll.pdf
*/
TimeFilter * ff_timefilter_new(double clock_period, double feedback2_factor, double feedback3_factor);
/**
* Update the filter
*
* This function must be called in real time, at each process cycle.
*
* @param period the device cycle duration in clock_periods. For example, at
* 44.1kHz and a buffer size of 512 frames, period = 512 when clock_period
* was 1.0/44100, or 512/44100 if clock_period was 1.
*
* system_time, in seconds, should be the value of the system clock time,
* at (or as close as possible to) the moment the device hardware interrupt
* occurred (or any other event the device clock raises at the beginning of a
* cycle).
*
* @return the filtered time, in seconds
*/
double ff_timefilter_update(TimeFilter *self, double system_time, double period);
/**
* Evaluate the filter at a specified time
*
* @param delta difference between the requested time and the current time
* (last call to ff_timefilter_update).
* @return the filtered time
*/
double ff_timefilter_eval(TimeFilter *self, double delta);
/**
* Reset the filter
*
* This function should mainly be called in case of XRUN.
*
* Warning: after calling this, the filter is in an undetermined state until
* the next call to ff_timefilter_update()
*/
void ff_timefilter_reset(TimeFilter *);
/**
* Free all resources associated with the filter
*/
void ff_timefilter_destroy(TimeFilter *);
#endif /* AVDEVICE_TIMEFILTER_H */

View File

@@ -0,0 +1,59 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "internal.h"
#include "libavutil/opt.h"
#include "libavformat/avformat.h"
int ff_alloc_input_device_context(AVFormatContext **avctx, AVInputFormat *iformat, const char *format)
{
AVFormatContext *s;
int ret = 0;
*avctx = NULL;
if (!iformat && !format)
return AVERROR(EINVAL);
if (!(s = avformat_alloc_context()))
return AVERROR(ENOMEM);
if (!iformat)
iformat = av_find_input_format(format);
if (!iformat || !iformat->priv_class || !AV_IS_INPUT_DEVICE(iformat->priv_class->category)) {
ret = AVERROR(EINVAL);
goto error;
}
s->iformat = iformat;
if (s->iformat->priv_data_size > 0) {
s->priv_data = av_mallocz(s->iformat->priv_data_size);
if (!s->priv_data) {
ret = AVERROR(ENOMEM);
goto error;
}
if (s->iformat->priv_class) {
*(const AVClass**)s->priv_data= s->iformat->priv_class;
av_opt_set_defaults(s->priv_data);
}
} else
s->priv_data = NULL;
*avctx = s;
return 0;
error:
avformat_free_context(s);
return ret;
}

View File

@@ -0,0 +1,364 @@
/*
* Linux video grab interface
* Copyright (c) 2000,2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "avdevice.h"
#undef __STRICT_ANSI__ //workaround due to broken kernel headers
#include "config.h"
#include "libavutil/rational.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavformat/internal.h"
#include "libavcodec/dsputil.h"
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/time.h>
#define _LINUX_TIME_H 1
#include <linux/videodev.h>
#include <time.h>
typedef struct {
AVClass *class;
int fd;
int frame_format; /* see VIDEO_PALETTE_xxx */
int use_mmap;
AVRational time_base;
int64_t time_frame;
int frame_size;
struct video_capability video_cap;
struct video_audio audio_saved;
struct video_window video_win;
uint8_t *video_buf;
struct video_mbuf gb_buffers;
struct video_mmap gb_buf;
int gb_frame;
int standard;
} VideoData;
static const struct {
int palette;
int depth;
enum AVPixelFormat pix_fmt;
} video_formats [] = {
{.palette = VIDEO_PALETTE_YUV420P, .depth = 12, .pix_fmt = AV_PIX_FMT_YUV420P },
{.palette = VIDEO_PALETTE_YUV422, .depth = 16, .pix_fmt = AV_PIX_FMT_YUYV422 },
{.palette = VIDEO_PALETTE_UYVY, .depth = 16, .pix_fmt = AV_PIX_FMT_UYVY422 },
{.palette = VIDEO_PALETTE_YUYV, .depth = 16, .pix_fmt = AV_PIX_FMT_YUYV422 },
/* NOTE: v4l uses BGR24, not RGB24 */
{.palette = VIDEO_PALETTE_RGB24, .depth = 24, .pix_fmt = AV_PIX_FMT_BGR24 },
{.palette = VIDEO_PALETTE_RGB565, .depth = 16, .pix_fmt = AV_PIX_FMT_BGR565 },
{.palette = VIDEO_PALETTE_GREY, .depth = 8, .pix_fmt = AV_PIX_FMT_GRAY8 },
};
static int grab_read_header(AVFormatContext *s1, AVFormatParameters *ap)
{
VideoData *s = s1->priv_data;
AVStream *st;
int video_fd;
int desired_palette, desired_depth;
struct video_tuner tuner;
struct video_audio audio;
struct video_picture pict;
int j;
int vformat_num = FF_ARRAY_ELEMS(video_formats);
av_log(s1, AV_LOG_WARNING, "V4L input device is deprecated and will be removed in the next release.");
if (ap->time_base.den <= 0) {
av_log(s1, AV_LOG_ERROR, "Wrong time base (%d)\n", ap->time_base.den);
return -1;
}
s->time_base = ap->time_base;
s->video_win.width = ap->width;
s->video_win.height = ap->height;
st = avformat_new_stream(s1, NULL);
if (!st)
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
video_fd = open(s1->filename, O_RDWR);
if (video_fd < 0) {
av_log(s1, AV_LOG_ERROR, "%s: %s\n", s1->filename, strerror(errno));
goto fail;
}
if (ioctl(video_fd, VIDIOCGCAP, &s->video_cap) < 0) {
av_log(s1, AV_LOG_ERROR, "VIDIOCGCAP: %s\n", strerror(errno));
goto fail;
}
if (!(s->video_cap.type & VID_TYPE_CAPTURE)) {
av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not handle capture\n");
goto fail;
}
/* no values set, autodetect them */
if (s->video_win.width <= 0 || s->video_win.height <= 0) {
if (ioctl(video_fd, VIDIOCGWIN, &s->video_win, sizeof(s->video_win)) < 0) {
av_log(s1, AV_LOG_ERROR, "VIDIOCGWIN: %s\n", strerror(errno));
goto fail;
}
}
if(av_image_check_size(s->video_win.width, s->video_win.height, 0, s1) < 0)
return -1;
desired_palette = -1;
desired_depth = -1;
for (j = 0; j < vformat_num; j++) {
if (ap->pix_fmt == video_formats[j].pix_fmt) {
desired_palette = video_formats[j].palette;
desired_depth = video_formats[j].depth;
break;
}
}
/* set tv standard */
if (!ioctl(video_fd, VIDIOCGTUNER, &tuner)) {
tuner.mode = s->standard;
ioctl(video_fd, VIDIOCSTUNER, &tuner);
}
/* unmute audio */
audio.audio = 0;
ioctl(video_fd, VIDIOCGAUDIO, &audio);
memcpy(&s->audio_saved, &audio, sizeof(audio));
audio.flags &= ~VIDEO_AUDIO_MUTE;
ioctl(video_fd, VIDIOCSAUDIO, &audio);
ioctl(video_fd, VIDIOCGPICT, &pict);
ff_dlog(s1, "v4l: colour=%d hue=%d brightness=%d constrast=%d whiteness=%d\n",
pict.colour, pict.hue, pict.brightness, pict.contrast, pict.whiteness);
/* try to choose a suitable video format */
pict.palette = desired_palette;
pict.depth= desired_depth;
if (desired_palette == -1 || ioctl(video_fd, VIDIOCSPICT, &pict) < 0) {
for (j = 0; j < vformat_num; j++) {
pict.palette = video_formats[j].palette;
pict.depth = video_formats[j].depth;
if (-1 != ioctl(video_fd, VIDIOCSPICT, &pict))
break;
}
if (j >= vformat_num)
goto fail1;
}
if (ioctl(video_fd, VIDIOCGMBUF, &s->gb_buffers) < 0) {
/* try to use read based access */
int val;
s->video_win.x = 0;
s->video_win.y = 0;
s->video_win.chromakey = -1;
s->video_win.flags = 0;
if (ioctl(video_fd, VIDIOCSWIN, s->video_win) < 0) {
av_log(s1, AV_LOG_ERROR, "VIDIOCSWIN: %s\n", strerror(errno));
goto fail;
}
s->frame_format = pict.palette;
val = 1;
if (ioctl(video_fd, VIDIOCCAPTURE, &val) < 0) {
av_log(s1, AV_LOG_ERROR, "VIDIOCCAPTURE: %s\n", strerror(errno));
goto fail;
}
s->time_frame = av_gettime() * s->time_base.den / s->time_base.num;
s->use_mmap = 0;
} else {
s->video_buf = mmap(0, s->gb_buffers.size, PROT_READ|PROT_WRITE, MAP_SHARED, video_fd, 0);
if ((unsigned char*)-1 == s->video_buf) {
s->video_buf = mmap(0, s->gb_buffers.size, PROT_READ|PROT_WRITE, MAP_PRIVATE, video_fd, 0);
if ((unsigned char*)-1 == s->video_buf) {
av_log(s1, AV_LOG_ERROR, "mmap: %s\n", strerror(errno));
goto fail;
}
}
s->gb_frame = 0;
s->time_frame = av_gettime() * s->time_base.den / s->time_base.num;
/* start to grab the first frame */
s->gb_buf.frame = s->gb_frame % s->gb_buffers.frames;
s->gb_buf.height = s->video_win.height;
s->gb_buf.width = s->video_win.width;
s->gb_buf.format = pict.palette;
if (ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
if (errno != EAGAIN) {
fail1:
av_log(s1, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno));
} else {
av_log(s1, AV_LOG_ERROR, "Fatal: grab device does not receive any video signal\n");
}
goto fail;
}
for (j = 1; j < s->gb_buffers.frames; j++) {
s->gb_buf.frame = j;
ioctl(video_fd, VIDIOCMCAPTURE, &s->gb_buf);
}
s->frame_format = s->gb_buf.format;
s->use_mmap = 1;
}
for (j = 0; j < vformat_num; j++) {
if (s->frame_format == video_formats[j].palette) {
s->frame_size = s->video_win.width * s->video_win.height * video_formats[j].depth / 8;
st->codec->pix_fmt = video_formats[j].pix_fmt;
break;
}
}
if (j >= vformat_num)
goto fail;
s->fd = video_fd;
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->width = s->video_win.width;
st->codec->height = s->video_win.height;
st->codec->time_base = s->time_base;
st->codec->bit_rate = s->frame_size * 1/av_q2d(st->codec->time_base) * 8;
return 0;
fail:
if (video_fd >= 0)
close(video_fd);
return AVERROR(EIO);
}
static int v4l_mm_read_picture(VideoData *s, uint8_t *buf)
{
uint8_t *ptr;
while (ioctl(s->fd, VIDIOCSYNC, &s->gb_frame) < 0 &&
(errno == EAGAIN || errno == EINTR));
ptr = s->video_buf + s->gb_buffers.offsets[s->gb_frame];
memcpy(buf, ptr, s->frame_size);
/* Setup to capture the next frame */
s->gb_buf.frame = s->gb_frame;
if (ioctl(s->fd, VIDIOCMCAPTURE, &s->gb_buf) < 0) {
if (errno == EAGAIN)
av_log(NULL, AV_LOG_ERROR, "Cannot Sync\n");
else
av_log(NULL, AV_LOG_ERROR, "VIDIOCMCAPTURE: %s\n", strerror(errno));
return AVERROR(EIO);
}
/* This is now the grabbing frame */
s->gb_frame = (s->gb_frame + 1) % s->gb_buffers.frames;
return s->frame_size;
}
static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
VideoData *s = s1->priv_data;
int64_t curtime, delay;
struct timespec ts;
/* Calculate the time of the next frame */
s->time_frame += INT64_C(1000000);
/* wait based on the frame rate */
for(;;) {
curtime = av_gettime();
delay = s->time_frame * s->time_base.num / s->time_base.den - curtime;
if (delay <= 0) {
if (delay < INT64_C(-1000000) * s->time_base.num / s->time_base.den) {
/* printf("grabbing is %d frames late (dropping)\n", (int) -(delay / 16666)); */
s->time_frame += INT64_C(1000000);
}
break;
}
ts.tv_sec = delay / 1000000;
ts.tv_nsec = (delay % 1000000) * 1000;
nanosleep(&ts, NULL);
}
if (av_new_packet(pkt, s->frame_size) < 0)
return AVERROR(EIO);
pkt->pts = curtime;
/* read one frame */
if (s->use_mmap) {
return v4l_mm_read_picture(s, pkt->data);
} else {
if (read(s->fd, pkt->data, pkt->size) != pkt->size)
return AVERROR(EIO);
return s->frame_size;
}
}
static int grab_read_close(AVFormatContext *s1)
{
VideoData *s = s1->priv_data;
if (s->use_mmap)
munmap(s->video_buf, s->gb_buffers.size);
/* mute audio. we must force it because the BTTV driver does not
return its state correctly */
s->audio_saved.flags |= VIDEO_AUDIO_MUTE;
ioctl(s->fd, VIDIOCSAUDIO, &s->audio_saved);
close(s->fd);
return 0;
}
static const AVOption options[] = {
{ "standard", "", offsetof(VideoData, standard), AV_OPT_TYPE_INT, {.i64 = VIDEO_MODE_NTSC}, VIDEO_MODE_PAL, VIDEO_MODE_NTSC, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PAL", "", 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_MODE_PAL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "SECAM", "", 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_MODE_SECAM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSC", "", 0, AV_OPT_TYPE_CONST, {.i64 = VIDEO_MODE_NTSC}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ NULL },
};
static const AVClass v4l_class = {
.class_name = "V4L indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_v4l_demuxer = {
.name = "video4linux,v4l",
.long_name = NULL_IF_CONFIG_SMALL("Video4Linux device grab"),
.priv_data_size = sizeof(VideoData),
.read_header = grab_read_header,
.read_packet = grab_read_packet,
.read_close = grab_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &v4l_class,
};

View File

@@ -0,0 +1,105 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "v4l2-common.h"
const struct fmt_map ff_fmt_conversion_table[] = {
//ff_fmt codec_id v4l2_fmt
{ AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV420 },
{ AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YVU420 },
{ AV_PIX_FMT_YUV422P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV422P },
{ AV_PIX_FMT_YUYV422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUYV },
{ AV_PIX_FMT_UYVY422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_UYVY },
{ AV_PIX_FMT_YUV411P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV411P },
{ AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV410 },
{ AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YVU410 },
{ AV_PIX_FMT_RGB555LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555 },
{ AV_PIX_FMT_RGB555BE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555X },
{ AV_PIX_FMT_RGB565LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565 },
{ AV_PIX_FMT_RGB565BE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565X },
{ AV_PIX_FMT_BGR24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR24 },
{ AV_PIX_FMT_RGB24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB24 },
{ AV_PIX_FMT_BGR0, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR32 },
{ AV_PIX_FMT_0RGB, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB32 },
{ AV_PIX_FMT_GRAY8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_GREY },
#ifdef V4L2_PIX_FMT_Y16
{ AV_PIX_FMT_GRAY16LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_Y16 },
#endif
{ AV_PIX_FMT_NV12, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_NV12 },
{ AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_MJPEG },
{ AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_JPEG },
#ifdef V4L2_PIX_FMT_H264
{ AV_PIX_FMT_NONE, AV_CODEC_ID_H264, V4L2_PIX_FMT_H264 },
#endif
#ifdef V4L2_PIX_FMT_MPEG4
{ AV_PIX_FMT_NONE, AV_CODEC_ID_MPEG4, V4L2_PIX_FMT_MPEG4 },
#endif
#ifdef V4L2_PIX_FMT_CPIA1
{ AV_PIX_FMT_NONE, AV_CODEC_ID_CPIA, V4L2_PIX_FMT_CPIA1 },
#endif
#ifdef V4L2_PIX_FMT_SRGGB8
{ AV_PIX_FMT_BAYER_BGGR8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SBGGR8 },
{ AV_PIX_FMT_BAYER_GBRG8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SGBRG8 },
{ AV_PIX_FMT_BAYER_GRBG8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SGRBG8 },
{ AV_PIX_FMT_BAYER_RGGB8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SRGGB8 },
#endif
{ AV_PIX_FMT_NONE, AV_CODEC_ID_NONE, 0 },
};
uint32_t ff_fmt_ff2v4l(enum AVPixelFormat pix_fmt, enum AVCodecID codec_id)
{
int i;
for (i = 0; ff_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
if ((codec_id == AV_CODEC_ID_NONE ||
ff_fmt_conversion_table[i].codec_id == codec_id) &&
(pix_fmt == AV_PIX_FMT_NONE ||
ff_fmt_conversion_table[i].ff_fmt == pix_fmt)) {
return ff_fmt_conversion_table[i].v4l2_fmt;
}
}
return 0;
}
enum AVPixelFormat ff_fmt_v4l2ff(uint32_t v4l2_fmt, enum AVCodecID codec_id)
{
int i;
for (i = 0; ff_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
if (ff_fmt_conversion_table[i].v4l2_fmt == v4l2_fmt &&
ff_fmt_conversion_table[i].codec_id == codec_id) {
return ff_fmt_conversion_table[i].ff_fmt;
}
}
return AV_PIX_FMT_NONE;
}
enum AVCodecID ff_fmt_v4l2codec(uint32_t v4l2_fmt)
{
int i;
for (i = 0; ff_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
if (ff_fmt_conversion_table[i].v4l2_fmt == v4l2_fmt) {
return ff_fmt_conversion_table[i].codec_id;
}
}
return AV_CODEC_ID_NONE;
}

View File

@@ -0,0 +1,62 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_V4L2_COMMON_H
#define AVDEVICE_V4L2_COMMON_H
#undef __STRICT_ANSI__ //workaround due to broken kernel headers
#include "config.h"
#include "libavformat/internal.h"
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/time.h>
#if HAVE_SYS_VIDEOIO_H
#include <sys/videoio.h>
#else
#if HAVE_ASM_TYPES_H
#include <asm/types.h>
#endif
#include <linux/videodev2.h>
#endif
#include "libavutil/atomic.h"
#include "libavutil/avassert.h"
#include "libavutil/imgutils.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "avdevice.h"
#include "timefilter.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/time.h"
#include "libavutil/avstring.h"
struct fmt_map {
enum AVPixelFormat ff_fmt;
enum AVCodecID codec_id;
uint32_t v4l2_fmt;
};
extern const struct fmt_map ff_fmt_conversion_table[];
uint32_t ff_fmt_ff2v4l(enum AVPixelFormat pix_fmt, enum AVCodecID codec_id);
enum AVPixelFormat ff_fmt_v4l2ff(uint32_t v4l2_fmt, enum AVCodecID codec_id);
enum AVCodecID ff_fmt_v4l2codec(uint32_t v4l2_fmt);
#endif /* AVDEVICE_V4L2_COMMON_H */

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,119 @@
/*
* Copyright (c) 2013 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "v4l2-common.h"
#include "avdevice.h"
typedef struct {
AVClass *class;
int fd;
} V4L2Context;
static av_cold int write_header(AVFormatContext *s1)
{
int res = 0, flags = O_RDWR;
struct v4l2_format fmt = {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT
};
V4L2Context *s = s1->priv_data;
AVCodecContext *enc_ctx;
uint32_t v4l2_pixfmt;
if (s1->flags & AVFMT_FLAG_NONBLOCK)
flags |= O_NONBLOCK;
s->fd = open(s1->filename, flags);
if (s->fd < 0) {
res = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "Unable to open V4L2 device '%s'\n", s1->filename);
return res;
}
if (s1->nb_streams != 1 ||
s1->streams[0]->codec->codec_type != AVMEDIA_TYPE_VIDEO ||
s1->streams[0]->codec->codec_id != AV_CODEC_ID_RAWVIDEO) {
av_log(s1, AV_LOG_ERROR,
"V4L2 output device supports only a single raw video stream\n");
return AVERROR(EINVAL);
}
enc_ctx = s1->streams[0]->codec;
v4l2_pixfmt = ff_fmt_ff2v4l(enc_ctx->pix_fmt, AV_CODEC_ID_RAWVIDEO);
if (!v4l2_pixfmt) { // XXX: try to force them one by one?
av_log(s1, AV_LOG_ERROR, "Unknown V4L2 pixel format equivalent for %s\n",
av_get_pix_fmt_name(enc_ctx->pix_fmt));
return AVERROR(EINVAL);
}
if (ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) {
res = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n", av_err2str(res));
return res;
}
fmt.fmt.pix.width = enc_ctx->width;
fmt.fmt.pix.height = enc_ctx->height;
fmt.fmt.pix.pixelformat = v4l2_pixfmt;
fmt.fmt.pix.sizeimage = av_image_get_buffer_size(enc_ctx->pix_fmt, enc_ctx->width, enc_ctx->height, 1);
if (ioctl(s->fd, VIDIOC_S_FMT, &fmt) < 0) {
res = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_S_FMT): %s\n", av_err2str(res));
return res;
}
return res;
}
static int write_packet(AVFormatContext *s1, AVPacket *pkt)
{
const V4L2Context *s = s1->priv_data;
if (write(s->fd, pkt->data, pkt->size) == -1)
return AVERROR(errno);
return 0;
}
static int write_trailer(AVFormatContext *s1)
{
const V4L2Context *s = s1->priv_data;
close(s->fd);
return 0;
}
static const AVClass v4l2_class = {
.class_name = "V4L2 outdev",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
};
AVOutputFormat ff_v4l2_muxer = {
.name = "v4l2",
.long_name = NULL_IF_CONFIG_SMALL("Video4Linux2 output device"),
.priv_data_size = sizeof(V4L2Context),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = write_header,
.write_packet = write_packet,
.write_trailer = write_trailer,
.flags = AVFMT_NOFILE,
.priv_class = &v4l2_class,
};

View File

@@ -0,0 +1,50 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_VERSION_H
#define AVDEVICE_VERSION_H
/**
* @file
* @ingroup lavd
* Libavdevice version macros
*/
#include "libavutil/version.h"
#define LIBAVDEVICE_VERSION_MAJOR 56
#define LIBAVDEVICE_VERSION_MINOR 4
#define LIBAVDEVICE_VERSION_MICRO 100
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
LIBAVDEVICE_VERSION_MINOR, \
LIBAVDEVICE_VERSION_MICRO)
#define LIBAVDEVICE_VERSION AV_VERSION(LIBAVDEVICE_VERSION_MAJOR, \
LIBAVDEVICE_VERSION_MINOR, \
LIBAVDEVICE_VERSION_MICRO)
#define LIBAVDEVICE_BUILD LIBAVDEVICE_VERSION_INT
#define LIBAVDEVICE_IDENT "Lavd" AV_STRINGIFY(LIBAVDEVICE_VERSION)
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*/
#endif /* AVDEVICE_VERSION_H */

View File

@@ -0,0 +1,490 @@
/*
* VFW capture interface
* Copyright (c) 2006-2008 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavformat/internal.h"
// windows.h must no be included before winsock2.h, and libavformat internal
// headers may include winsock2.h
#include <windows.h>
// windows.h needs to be included before vfw.h
#include <vfw.h>
#include "avdevice.h"
/* Some obsolete versions of MinGW32 before 4.0.0 lack this. */
#ifndef HWND_MESSAGE
#define HWND_MESSAGE ((HWND) -3)
#endif
struct vfw_ctx {
const AVClass *class;
HWND hwnd;
HANDLE mutex;
HANDLE event;
AVPacketList *pktl;
unsigned int curbufsize;
unsigned int frame_num;
char *video_size; /**< A string describing video size, set by a private option. */
char *framerate; /**< Set by a private option. */
};
static enum AVPixelFormat vfw_pixfmt(DWORD biCompression, WORD biBitCount)
{
switch(biCompression) {
case MKTAG('U', 'Y', 'V', 'Y'):
return AV_PIX_FMT_UYVY422;
case MKTAG('Y', 'U', 'Y', '2'):
return AV_PIX_FMT_YUYV422;
case MKTAG('I', '4', '2', '0'):
return AV_PIX_FMT_YUV420P;
case BI_RGB:
switch(biBitCount) { /* 1-8 are untested */
case 1:
return AV_PIX_FMT_MONOWHITE;
case 4:
return AV_PIX_FMT_RGB4;
case 8:
return AV_PIX_FMT_RGB8;
case 16:
return AV_PIX_FMT_RGB555;
case 24:
return AV_PIX_FMT_BGR24;
case 32:
return AV_PIX_FMT_RGB32;
}
}
return AV_PIX_FMT_NONE;
}
static enum AVCodecID vfw_codecid(DWORD biCompression)
{
switch(biCompression) {
case MKTAG('d', 'v', 's', 'd'):
return AV_CODEC_ID_DVVIDEO;
case MKTAG('M', 'J', 'P', 'G'):
case MKTAG('m', 'j', 'p', 'g'):
return AV_CODEC_ID_MJPEG;
}
return AV_CODEC_ID_NONE;
}
#define dstruct(pctx, sname, var, type) \
av_log(pctx, AV_LOG_DEBUG, #var":\t%"type"\n", sname->var)
static void dump_captureparms(AVFormatContext *s, CAPTUREPARMS *cparms)
{
av_log(s, AV_LOG_DEBUG, "CAPTUREPARMS\n");
dstruct(s, cparms, dwRequestMicroSecPerFrame, "lu");
dstruct(s, cparms, fMakeUserHitOKToCapture, "d");
dstruct(s, cparms, wPercentDropForError, "u");
dstruct(s, cparms, fYield, "d");
dstruct(s, cparms, dwIndexSize, "lu");
dstruct(s, cparms, wChunkGranularity, "u");
dstruct(s, cparms, fUsingDOSMemory, "d");
dstruct(s, cparms, wNumVideoRequested, "u");
dstruct(s, cparms, fCaptureAudio, "d");
dstruct(s, cparms, wNumAudioRequested, "u");
dstruct(s, cparms, vKeyAbort, "u");
dstruct(s, cparms, fAbortLeftMouse, "d");
dstruct(s, cparms, fAbortRightMouse, "d");
dstruct(s, cparms, fLimitEnabled, "d");
dstruct(s, cparms, wTimeLimit, "u");
dstruct(s, cparms, fMCIControl, "d");
dstruct(s, cparms, fStepMCIDevice, "d");
dstruct(s, cparms, dwMCIStartTime, "lu");
dstruct(s, cparms, dwMCIStopTime, "lu");
dstruct(s, cparms, fStepCaptureAt2x, "d");
dstruct(s, cparms, wStepCaptureAverageFrames, "u");
dstruct(s, cparms, dwAudioBufferSize, "lu");
dstruct(s, cparms, fDisableWriteCache, "d");
dstruct(s, cparms, AVStreamMaster, "u");
}
static void dump_videohdr(AVFormatContext *s, VIDEOHDR *vhdr)
{
#ifdef DEBUG
av_log(s, AV_LOG_DEBUG, "VIDEOHDR\n");
dstruct(s, vhdr, lpData, "p");
dstruct(s, vhdr, dwBufferLength, "lu");
dstruct(s, vhdr, dwBytesUsed, "lu");
dstruct(s, vhdr, dwTimeCaptured, "lu");
dstruct(s, vhdr, dwUser, "lu");
dstruct(s, vhdr, dwFlags, "lu");
dstruct(s, vhdr, dwReserved[0], "lu");
dstruct(s, vhdr, dwReserved[1], "lu");
dstruct(s, vhdr, dwReserved[2], "lu");
dstruct(s, vhdr, dwReserved[3], "lu");
#endif
}
static void dump_bih(AVFormatContext *s, BITMAPINFOHEADER *bih)
{
av_log(s, AV_LOG_DEBUG, "BITMAPINFOHEADER\n");
dstruct(s, bih, biSize, "lu");
dstruct(s, bih, biWidth, "ld");
dstruct(s, bih, biHeight, "ld");
dstruct(s, bih, biPlanes, "d");
dstruct(s, bih, biBitCount, "d");
dstruct(s, bih, biCompression, "lu");
av_log(s, AV_LOG_DEBUG, " biCompression:\t\"%.4s\"\n",
(char*) &bih->biCompression);
dstruct(s, bih, biSizeImage, "lu");
dstruct(s, bih, biXPelsPerMeter, "lu");
dstruct(s, bih, biYPelsPerMeter, "lu");
dstruct(s, bih, biClrUsed, "lu");
dstruct(s, bih, biClrImportant, "lu");
}
static int shall_we_drop(AVFormatContext *s)
{
struct vfw_ctx *ctx = s->priv_data;
static const uint8_t dropscore[] = {62, 75, 87, 100};
const int ndropscores = FF_ARRAY_ELEMS(dropscore);
unsigned int buffer_fullness = (ctx->curbufsize*100)/s->max_picture_buffer;
if(dropscore[++ctx->frame_num%ndropscores] <= buffer_fullness) {
av_log(s, AV_LOG_ERROR,
"real-time buffer %d%% full! frame dropped!\n", buffer_fullness);
return 1;
}
return 0;
}
static LRESULT CALLBACK videostream_cb(HWND hwnd, LPVIDEOHDR vdhdr)
{
AVFormatContext *s;
struct vfw_ctx *ctx;
AVPacketList **ppktl, *pktl_next;
s = (AVFormatContext *) GetWindowLongPtr(hwnd, GWLP_USERDATA);
ctx = s->priv_data;
dump_videohdr(s, vdhdr);
if(shall_we_drop(s))
return FALSE;
WaitForSingleObject(ctx->mutex, INFINITE);
pktl_next = av_mallocz(sizeof(AVPacketList));
if(!pktl_next)
goto fail;
if(av_new_packet(&pktl_next->pkt, vdhdr->dwBytesUsed) < 0) {
av_free(pktl_next);
goto fail;
}
pktl_next->pkt.pts = vdhdr->dwTimeCaptured;
memcpy(pktl_next->pkt.data, vdhdr->lpData, vdhdr->dwBytesUsed);
for(ppktl = &ctx->pktl ; *ppktl ; ppktl = &(*ppktl)->next);
*ppktl = pktl_next;
ctx->curbufsize += vdhdr->dwBytesUsed;
SetEvent(ctx->event);
ReleaseMutex(ctx->mutex);
return TRUE;
fail:
ReleaseMutex(ctx->mutex);
return FALSE;
}
static int vfw_read_close(AVFormatContext *s)
{
struct vfw_ctx *ctx = s->priv_data;
AVPacketList *pktl;
if(ctx->hwnd) {
SendMessage(ctx->hwnd, WM_CAP_SET_CALLBACK_VIDEOSTREAM, 0, 0);
SendMessage(ctx->hwnd, WM_CAP_DRIVER_DISCONNECT, 0, 0);
DestroyWindow(ctx->hwnd);
}
if(ctx->mutex)
CloseHandle(ctx->mutex);
if(ctx->event)
CloseHandle(ctx->event);
pktl = ctx->pktl;
while (pktl) {
AVPacketList *next = pktl->next;
av_free_packet(&pktl->pkt);
av_free(pktl);
pktl = next;
}
return 0;
}
static int vfw_read_header(AVFormatContext *s)
{
struct vfw_ctx *ctx = s->priv_data;
AVCodecContext *codec;
AVStream *st;
int devnum;
int bisize;
BITMAPINFO *bi = NULL;
CAPTUREPARMS cparms;
DWORD biCompression;
WORD biBitCount;
int ret;
AVRational framerate_q;
if (!strcmp(s->filename, "list")) {
for (devnum = 0; devnum <= 9; devnum++) {
char driver_name[256];
char driver_ver[256];
ret = capGetDriverDescription(devnum,
driver_name, sizeof(driver_name),
driver_ver, sizeof(driver_ver));
if (ret) {
av_log(s, AV_LOG_INFO, "Driver %d\n", devnum);
av_log(s, AV_LOG_INFO, " %s\n", driver_name);
av_log(s, AV_LOG_INFO, " %s\n", driver_ver);
}
}
return AVERROR(EIO);
}
ctx->hwnd = capCreateCaptureWindow(NULL, 0, 0, 0, 0, 0, HWND_MESSAGE, 0);
if(!ctx->hwnd) {
av_log(s, AV_LOG_ERROR, "Could not create capture window.\n");
return AVERROR(EIO);
}
/* If atoi fails, devnum==0 and the default device is used */
devnum = atoi(s->filename);
ret = SendMessage(ctx->hwnd, WM_CAP_DRIVER_CONNECT, devnum, 0);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not connect to device.\n");
DestroyWindow(ctx->hwnd);
return AVERROR(ENODEV);
}
SendMessage(ctx->hwnd, WM_CAP_SET_OVERLAY, 0, 0);
SendMessage(ctx->hwnd, WM_CAP_SET_PREVIEW, 0, 0);
ret = SendMessage(ctx->hwnd, WM_CAP_SET_CALLBACK_VIDEOSTREAM, 0,
(LPARAM) videostream_cb);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not set video stream callback.\n");
goto fail;
}
SetWindowLongPtr(ctx->hwnd, GWLP_USERDATA, (LONG_PTR) s);
st = avformat_new_stream(s, NULL);
if(!st) {
vfw_read_close(s);
return AVERROR(ENOMEM);
}
/* Set video format */
bisize = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, 0, 0);
if(!bisize)
goto fail;
bi = av_malloc(bisize);
if(!bi) {
vfw_read_close(s);
return AVERROR(ENOMEM);
}
ret = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, bisize, (LPARAM) bi);
if(!ret)
goto fail;
dump_bih(s, &bi->bmiHeader);
ret = av_parse_video_rate(&framerate_q, ctx->framerate);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", ctx->framerate);
goto fail;
}
if (ctx->video_size) {
ret = av_parse_video_size(&bi->bmiHeader.biWidth, &bi->bmiHeader.biHeight, ctx->video_size);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n");
goto fail;
}
}
if (0) {
/* For testing yet unsupported compressions
* Copy these values from user-supplied verbose information */
bi->bmiHeader.biWidth = 320;
bi->bmiHeader.biHeight = 240;
bi->bmiHeader.biPlanes = 1;
bi->bmiHeader.biBitCount = 12;
bi->bmiHeader.biCompression = MKTAG('I','4','2','0');
bi->bmiHeader.biSizeImage = 115200;
dump_bih(s, &bi->bmiHeader);
}
ret = SendMessage(ctx->hwnd, WM_CAP_SET_VIDEOFORMAT, bisize, (LPARAM) bi);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not set Video Format.\n");
goto fail;
}
biCompression = bi->bmiHeader.biCompression;
biBitCount = bi->bmiHeader.biBitCount;
/* Set sequence setup */
ret = SendMessage(ctx->hwnd, WM_CAP_GET_SEQUENCE_SETUP, sizeof(cparms),
(LPARAM) &cparms);
if(!ret)
goto fail;
dump_captureparms(s, &cparms);
cparms.fYield = 1; // Spawn a background thread
cparms.dwRequestMicroSecPerFrame =
(framerate_q.den*1000000) / framerate_q.num;
cparms.fAbortLeftMouse = 0;
cparms.fAbortRightMouse = 0;
cparms.fCaptureAudio = 0;
cparms.vKeyAbort = 0;
ret = SendMessage(ctx->hwnd, WM_CAP_SET_SEQUENCE_SETUP, sizeof(cparms),
(LPARAM) &cparms);
if(!ret)
goto fail;
codec = st->codec;
codec->time_base = av_inv_q(framerate_q);
codec->codec_type = AVMEDIA_TYPE_VIDEO;
codec->width = bi->bmiHeader.biWidth;
codec->height = bi->bmiHeader.biHeight;
codec->pix_fmt = vfw_pixfmt(biCompression, biBitCount);
if(codec->pix_fmt == AV_PIX_FMT_NONE) {
codec->codec_id = vfw_codecid(biCompression);
if(codec->codec_id == AV_CODEC_ID_NONE) {
av_log(s, AV_LOG_ERROR, "Unknown compression type. "
"Please report verbose (-v 9) debug information.\n");
vfw_read_close(s);
return AVERROR_PATCHWELCOME;
}
codec->bits_per_coded_sample = biBitCount;
} else {
codec->codec_id = AV_CODEC_ID_RAWVIDEO;
if(biCompression == BI_RGB) {
codec->bits_per_coded_sample = biBitCount;
codec->extradata = av_malloc(9 + AV_INPUT_BUFFER_PADDING_SIZE);
if (codec->extradata) {
codec->extradata_size = 9;
memcpy(codec->extradata, "BottomUp", 9);
}
}
}
av_freep(&bi);
avpriv_set_pts_info(st, 32, 1, 1000);
ctx->mutex = CreateMutex(NULL, 0, NULL);
if(!ctx->mutex) {
av_log(s, AV_LOG_ERROR, "Could not create Mutex.\n" );
goto fail;
}
ctx->event = CreateEvent(NULL, 1, 0, NULL);
if(!ctx->event) {
av_log(s, AV_LOG_ERROR, "Could not create Event.\n" );
goto fail;
}
ret = SendMessage(ctx->hwnd, WM_CAP_SEQUENCE_NOFILE, 0, 0);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not start capture sequence.\n" );
goto fail;
}
return 0;
fail:
av_freep(&bi);
vfw_read_close(s);
return AVERROR(EIO);
}
static int vfw_read_packet(AVFormatContext *s, AVPacket *pkt)
{
struct vfw_ctx *ctx = s->priv_data;
AVPacketList *pktl = NULL;
while(!pktl) {
WaitForSingleObject(ctx->mutex, INFINITE);
pktl = ctx->pktl;
if(ctx->pktl) {
*pkt = ctx->pktl->pkt;
ctx->pktl = ctx->pktl->next;
av_free(pktl);
}
ResetEvent(ctx->event);
ReleaseMutex(ctx->mutex);
if(!pktl) {
if(s->flags & AVFMT_FLAG_NONBLOCK) {
return AVERROR(EAGAIN);
} else {
WaitForSingleObject(ctx->event, INFINITE);
}
}
}
ctx->curbufsize -= pkt->size;
return pkt->size;
}
#define OFFSET(x) offsetof(struct vfw_ctx, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC },
{ NULL },
};
static const AVClass vfw_class = {
.class_name = "VFW indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT
};
AVInputFormat ff_vfwcap_demuxer = {
.name = "vfwcap",
.long_name = NULL_IF_CONFIG_SMALL("VfW video capture"),
.priv_data_size = sizeof(struct vfw_ctx),
.read_header = vfw_read_header,
.read_packet = vfw_read_packet,
.read_close = vfw_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &vfw_class,
};

View File

@@ -0,0 +1,695 @@
/*
* X11 video grab interface
*
* This file is part of FFmpeg.
*
* FFmpeg integration:
* Copyright (C) 2006 Clemens Fruhwirth <clemens@endorphin.org>
* Edouard Gomez <ed.gomez@free.fr>
*
* This file contains code from grab.c:
* Copyright (c) 2000-2001 Fabrice Bellard
*
* This file contains code from the xvidcap project:
* Copyright (C) 1997-1998 Rasca, Berlin
* 2003-2004 Karl H. Beckers, Frankfurt
*
* FFmpeg is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* X11 frame device demuxer
* @author Clemens Fruhwirth <clemens@endorphin.org>
* @author Edouard Gomez <ed.gomez@free.fr>
*/
#include "config.h"
#include <time.h>
#include <sys/shm.h>
#include <X11/cursorfont.h>
#include <X11/X.h>
#include <X11/Xlib.h>
#include <X11/Xlibint.h>
#include <X11/Xproto.h>
#include <X11/Xutil.h>
#include <X11/extensions/shape.h>
#include <X11/extensions/Xfixes.h>
#include <X11/extensions/XShm.h>
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/time.h"
#include "libavformat/internal.h"
#include "avdevice.h"
/** X11 device demuxer context */
typedef struct X11GrabContext {
const AVClass *class; /**< Class for private options. */
int frame_size; /**< Size in bytes of a grabbed frame */
AVRational time_base; /**< Time base */
int64_t time_frame; /**< Current time */
int width; /**< Width of the grab frame */
int height; /**< Height of the grab frame */
int x_off; /**< Horizontal top-left corner coordinate */
int y_off; /**< Vertical top-left corner coordinate */
Display *dpy; /**< X11 display from which x11grab grabs frames */
XImage *image; /**< X11 image holding the grab */
int use_shm; /**< !0 when using XShm extension */
XShmSegmentInfo shminfo; /**< When using XShm, keeps track of XShm infos */
int draw_mouse; /**< Set by a private option. */
int follow_mouse; /**< Set by a private option. */
int show_region; /**< set by a private option. */
AVRational framerate; /**< Set by a private option. */
int palette_changed;
uint32_t palette[256];
Cursor c;
Window region_win; /**< This is used by show_region option. */
} X11GrabContext;
#define REGION_WIN_BORDER 3
/**
* Draw grabbing region window
*
* @param s x11grab context
*/
static void x11grab_draw_region_win(X11GrabContext *s)
{
Display *dpy = s->dpy;
Window win = s->region_win;
int screen = DefaultScreen(dpy);
GC gc = XCreateGC(dpy, win, 0, 0);
XSetForeground(dpy, gc, WhitePixel(dpy, screen));
XSetBackground(dpy, gc, BlackPixel(dpy, screen));
XSetLineAttributes(dpy, gc, REGION_WIN_BORDER, LineDoubleDash, 0, 0);
XDrawRectangle(dpy, win, gc, 1, 1,
(s->width + REGION_WIN_BORDER * 2) - 1 * 2 - 1,
(s->height + REGION_WIN_BORDER * 2) - 1 * 2 - 1);
XFreeGC(dpy, gc);
}
/**
* Initialize grabbing region window
*
* @param s x11grab context
*/
static void x11grab_region_win_init(X11GrabContext *s)
{
Display *dpy = s->dpy;
XRectangle rect;
XSetWindowAttributes attribs = { .override_redirect = True };
int screen = DefaultScreen(dpy);
s->region_win = XCreateWindow(dpy, RootWindow(dpy, screen),
s->x_off - REGION_WIN_BORDER,
s->y_off - REGION_WIN_BORDER,
s->width + REGION_WIN_BORDER * 2,
s->height + REGION_WIN_BORDER * 2,
0, CopyFromParent,
InputOutput, CopyFromParent,
CWOverrideRedirect, &attribs);
rect.x = 0;
rect.y = 0;
rect.width = s->width;
rect.height = s->height;
XShapeCombineRectangles(dpy, s->region_win,
ShapeBounding, REGION_WIN_BORDER, REGION_WIN_BORDER,
&rect, 1, ShapeSubtract, 0);
XMapWindow(dpy, s->region_win);
XSelectInput(dpy, s->region_win, ExposureMask | StructureNotifyMask);
x11grab_draw_region_win(s);
}
static int setup_shm(AVFormatContext *s, Display *dpy, XImage **image)
{
X11GrabContext *g = s->priv_data;
int scr = XDefaultScreen(dpy);
XImage *img = XShmCreateImage(dpy, DefaultVisual(dpy, scr),
DefaultDepth(dpy, scr), ZPixmap, NULL,
&g->shminfo, g->width, g->height);
g->shminfo.shmid = shmget(IPC_PRIVATE, img->bytes_per_line * img->height,
IPC_CREAT | 0777);
if (g->shminfo.shmid == -1) {
av_log(s, AV_LOG_ERROR, "Cannot get shared memory!\n");
return AVERROR(ENOMEM);
}
g->shminfo.shmaddr = img->data = shmat(g->shminfo.shmid, 0, 0);
g->shminfo.readOnly = False;
if (!XShmAttach(dpy, &g->shminfo)) {
av_log(s, AV_LOG_ERROR, "Failed to attach shared memory!\n");
/* needs some better error subroutine :) */
return AVERROR(EIO);
}
*image = img;
return 0;
}
static int setup_mouse(Display *dpy, int screen)
{
int ev_ret, ev_err;
if (XFixesQueryExtension(dpy, &ev_ret, &ev_err)) {
Window root = RootWindow(dpy, screen);
XFixesSelectCursorInput(dpy, root, XFixesDisplayCursorNotifyMask);
return 0;
}
return AVERROR(ENOSYS);
}
static int pixfmt_from_image(AVFormatContext *s, XImage *image, int *pix_fmt)
{
av_log(s, AV_LOG_DEBUG,
"Image r 0x%.6lx g 0x%.6lx b 0x%.6lx and depth %i\n",
image->red_mask,
image->green_mask,
image->blue_mask,
image->bits_per_pixel);
*pix_fmt = AV_PIX_FMT_NONE;
switch (image->bits_per_pixel) {
case 8:
*pix_fmt = AV_PIX_FMT_PAL8;
break;
case 16:
if (image->red_mask == 0xf800 &&
image->green_mask == 0x07e0 &&
image->blue_mask == 0x001f) {
*pix_fmt = AV_PIX_FMT_RGB565;
} else if (image->red_mask == 0x7c00 &&
image->green_mask == 0x03e0 &&
image->blue_mask == 0x001f) {
*pix_fmt = AV_PIX_FMT_RGB555;
}
break;
case 24:
if (image->red_mask == 0xff0000 &&
image->green_mask == 0x00ff00 &&
image->blue_mask == 0x0000ff) {
*pix_fmt = AV_PIX_FMT_BGR24;
} else if (image->red_mask == 0x0000ff &&
image->green_mask == 0x00ff00 &&
image->blue_mask == 0xff0000) {
*pix_fmt = AV_PIX_FMT_RGB24;
}
break;
case 32:
if (image->red_mask == 0xff0000 &&
image->green_mask == 0x00ff00 &&
image->blue_mask == 0x0000ff ) {
*pix_fmt = AV_PIX_FMT_0RGB32;
}
break;
}
if (*pix_fmt == AV_PIX_FMT_NONE) {
av_log(s, AV_LOG_ERROR,
"XImages with RGB mask 0x%.6lx 0x%.6lx 0x%.6lx and depth %i "
"are currently not supported.\n",
image->red_mask,
image->green_mask,
image->blue_mask,
image->bits_per_pixel);
return AVERROR_PATCHWELCOME;
}
return 0;
}
/**
* Initialize the x11 grab device demuxer (public device demuxer API).
*
* @param s1 Context from avformat core
* @return <ul>
* <li>AVERROR(ENOMEM) no memory left</li>
* <li>AVERROR(EIO) other failure case</li>
* <li>0 success</li>
* </ul>
*/
static int x11grab_read_header(AVFormatContext *s1)
{
X11GrabContext *x11grab = s1->priv_data;
Display *dpy;
AVStream *st = NULL;
XImage *image;
int x_off = 0, y_off = 0, ret = 0, screen, use_shm = 0;
char *dpyname, *offset;
Colormap color_map;
XColor color[256];
int i;
dpyname = av_strdup(s1->filename);
if (!dpyname)
goto out;
offset = strchr(dpyname, '+');
if (offset) {
sscanf(offset, "%d,%d", &x_off, &y_off);
if (strstr(offset, "nomouse")) {
av_log(s1, AV_LOG_WARNING,
"'nomouse' specification in argument is deprecated: "
"use 'draw_mouse' option with value 0 instead\n");
x11grab->draw_mouse = 0;
}
*offset = 0;
}
av_log(s1, AV_LOG_INFO,
"device: %s -> display: %s x: %d y: %d width: %d height: %d\n",
s1->filename, dpyname, x_off, y_off, x11grab->width, x11grab->height);
dpy = XOpenDisplay(dpyname);
av_freep(&dpyname);
if (!dpy) {
av_log(s1, AV_LOG_ERROR, "Could not open X display.\n");
ret = AVERROR(EIO);
goto out;
}
st = avformat_new_stream(s1, NULL);
if (!st) {
ret = AVERROR(ENOMEM);
goto out;
}
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
screen = DefaultScreen(dpy);
if (x11grab->follow_mouse) {
int screen_w, screen_h;
Window w;
screen_w = DisplayWidth(dpy, screen);
screen_h = DisplayHeight(dpy, screen);
XQueryPointer(dpy, RootWindow(dpy, screen), &w, &w, &x_off, &y_off,
&ret, &ret, &ret);
x_off -= x11grab->width / 2;
y_off -= x11grab->height / 2;
x_off = av_clip(x_off, 0, screen_w - x11grab->width);
y_off = av_clip(y_off, 0, screen_h - x11grab->height);
av_log(s1, AV_LOG_INFO,
"followmouse is enabled, resetting grabbing region to x: %d y: %d\n",
x_off, y_off);
}
if (x11grab->use_shm) {
use_shm = XShmQueryExtension(dpy);
av_log(s1, AV_LOG_INFO,
"shared memory extension %sfound\n", use_shm ? "" : "not ");
}
if (use_shm && setup_shm(s1, dpy, &image) < 0) {
av_log(s1, AV_LOG_WARNING, "Falling back to XGetImage\n");
use_shm = 0;
}
if (!use_shm) {
image = XGetImage(dpy, RootWindow(dpy, screen),
x_off, y_off,
x11grab->width, x11grab->height,
AllPlanes, ZPixmap);
}
if (x11grab->draw_mouse && setup_mouse(dpy, screen) < 0) {
av_log(s1, AV_LOG_WARNING,
"XFixes not available, cannot draw the mouse cursor\n");
x11grab->draw_mouse = 0;
}
x11grab->frame_size = x11grab->width * x11grab->height * image->bits_per_pixel / 8;
x11grab->dpy = dpy;
x11grab->time_base = av_inv_q(x11grab->framerate);
x11grab->time_frame = av_gettime() / av_q2d(x11grab->time_base);
x11grab->x_off = x_off;
x11grab->y_off = y_off;
x11grab->image = image;
x11grab->use_shm = use_shm;
ret = pixfmt_from_image(s1, image, &st->codec->pix_fmt);
if (ret < 0)
goto out;
if (st->codec->pix_fmt == AV_PIX_FMT_PAL8) {
color_map = DefaultColormap(dpy, screen);
for (i = 0; i < 256; ++i)
color[i].pixel = i;
XQueryColors(dpy, color_map, color, 256);
for (i = 0; i < 256; ++i)
x11grab->palette[i] = (color[i].red & 0xFF00) << 8 |
(color[i].green & 0xFF00) |
(color[i].blue & 0xFF00) >> 8;
x11grab->palette_changed = 1;
}
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->width = x11grab->width;
st->codec->height = x11grab->height;
st->codec->time_base = x11grab->time_base;
st->codec->bit_rate = x11grab->frame_size * 1 / av_q2d(x11grab->time_base) * 8;
out:
av_free(dpyname);
return ret;
}
/**
* Paint a mouse pointer in an X11 image.
*
* @param image image to paint the mouse pointer to
* @param s context used to retrieve original grabbing rectangle
* coordinates
*/
static void paint_mouse_pointer(XImage *image, AVFormatContext *s1)
{
X11GrabContext *s = s1->priv_data;
int x_off = s->x_off;
int y_off = s->y_off;
int width = s->width;
int height = s->height;
Display *dpy = s->dpy;
XFixesCursorImage *xcim;
int x, y;
int line, column;
int to_line, to_column;
int pixstride = image->bits_per_pixel >> 3;
/* Warning: in its insanity, xlib provides unsigned image data through a
* char* pointer, so we have to make it uint8_t to make things not break.
* Anyone who performs further investigation of the xlib API likely risks
* permanent brain damage. */
uint8_t *pix = image->data;
Window root;
XSetWindowAttributes attr;
/* Code doesn't currently support 16-bit or PAL8 */
if (image->bits_per_pixel != 24 && image->bits_per_pixel != 32)
return;
if (!s->c)
s->c = XCreateFontCursor(dpy, XC_left_ptr);
root = DefaultRootWindow(dpy);
attr.cursor = s->c;
XChangeWindowAttributes(dpy, root, CWCursor, &attr);
xcim = XFixesGetCursorImage(dpy);
if (!xcim) {
av_log(s1, AV_LOG_WARNING,
"XFixesGetCursorImage failed\n");
return;
}
x = xcim->x - xcim->xhot;
y = xcim->y - xcim->yhot;
to_line = FFMIN((y + xcim->height), (height + y_off));
to_column = FFMIN((x + xcim->width), (width + x_off));
for (line = FFMAX(y, y_off); line < to_line; line++) {
for (column = FFMAX(x, x_off); column < to_column; column++) {
int xcim_addr = (line - y) * xcim->width + column - x;
int image_addr = ((line - y_off) * width + column - x_off) * pixstride;
int r = (uint8_t)(xcim->pixels[xcim_addr] >> 0);
int g = (uint8_t)(xcim->pixels[xcim_addr] >> 8);
int b = (uint8_t)(xcim->pixels[xcim_addr] >> 16);
int a = (uint8_t)(xcim->pixels[xcim_addr] >> 24);
if (a == 255) {
pix[image_addr + 0] = r;
pix[image_addr + 1] = g;
pix[image_addr + 2] = b;
} else if (a) {
/* pixel values from XFixesGetCursorImage come premultiplied by alpha */
pix[image_addr + 0] = r + (pix[image_addr + 0] * (255 - a) + 255 / 2) / 255;
pix[image_addr + 1] = g + (pix[image_addr + 1] * (255 - a) + 255 / 2) / 255;
pix[image_addr + 2] = b + (pix[image_addr + 2] * (255 - a) + 255 / 2) / 255;
}
}
}
XFree(xcim);
xcim = NULL;
}
/**
* Read new data in the image structure.
*
* @param dpy X11 display to grab from
* @param d
* @param image Image where the grab will be put
* @param x Top-Left grabbing rectangle horizontal coordinate
* @param y Top-Left grabbing rectangle vertical coordinate
* @return 0 if error, !0 if successful
*/
static int xget_zpixmap(Display *dpy, Drawable d, XImage *image, int x, int y)
{
xGetImageReply rep;
xGetImageReq *req;
long nbytes;
if (!image)
return 0;
LockDisplay(dpy);
GetReq(GetImage, req);
/* First set up the standard stuff in the request */
req->drawable = d;
req->x = x;
req->y = y;
req->width = image->width;
req->height = image->height;
req->planeMask = (unsigned int)AllPlanes;
req->format = ZPixmap;
if (!_XReply(dpy, (xReply *)&rep, 0, xFalse) || !rep.length) {
UnlockDisplay(dpy);
SyncHandle();
return 0;
}
nbytes = (long)rep.length << 2;
_XReadPad(dpy, image->data, nbytes);
UnlockDisplay(dpy);
SyncHandle();
return 1;
}
/**
* Grab a frame from x11 (public device demuxer API).
*
* @param s1 Context from avformat core
* @param pkt Packet holding the brabbed frame
* @return frame size in bytes
*/
static int x11grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
X11GrabContext *s = s1->priv_data;
Display *dpy = s->dpy;
XImage *image = s->image;
int x_off = s->x_off;
int y_off = s->y_off;
int follow_mouse = s->follow_mouse;
int screen, pointer_x, pointer_y, _, same_screen = 1;
Window w, root;
int64_t curtime, delay;
struct timespec ts;
/* Calculate the time of the next frame */
s->time_frame += INT64_C(1000000);
/* wait based on the frame rate */
for (;;) {
curtime = av_gettime();
delay = s->time_frame * av_q2d(s->time_base) - curtime;
if (delay <= 0) {
if (delay < INT64_C(-1000000) * av_q2d(s->time_base))
s->time_frame += INT64_C(1000000);
break;
}
ts.tv_sec = delay / 1000000;
ts.tv_nsec = (delay % 1000000) * 1000;
nanosleep(&ts, NULL);
}
av_init_packet(pkt);
pkt->data = image->data;
pkt->size = s->frame_size;
pkt->pts = curtime;
if (s->palette_changed) {
uint8_t *pal = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
AVPALETTE_SIZE);
if (!pal) {
av_log(s, AV_LOG_ERROR, "Cannot append palette to packet\n");
} else {
memcpy(pal, s->palette, AVPALETTE_SIZE);
s->palette_changed = 0;
}
}
screen = DefaultScreen(dpy);
root = RootWindow(dpy, screen);
if (follow_mouse || s->draw_mouse)
same_screen = XQueryPointer(dpy, root, &w, &w,
&pointer_x, &pointer_y, &_, &_, &_);
if (follow_mouse && same_screen) {
int screen_w, screen_h;
screen_w = DisplayWidth(dpy, screen);
screen_h = DisplayHeight(dpy, screen);
if (follow_mouse == -1) {
// follow the mouse, put it at center of grabbing region
x_off += pointer_x - s->width / 2 - x_off;
y_off += pointer_y - s->height / 2 - y_off;
} else {
// follow the mouse, but only move the grabbing region when mouse
// reaches within certain pixels to the edge.
if (pointer_x > x_off + s->width - follow_mouse)
x_off += pointer_x - (x_off + s->width - follow_mouse);
else if (pointer_x < x_off + follow_mouse)
x_off -= (x_off + follow_mouse) - pointer_x;
if (pointer_y > y_off + s->height - follow_mouse)
y_off += pointer_y - (y_off + s->height - follow_mouse);
else if (pointer_y < y_off + follow_mouse)
y_off -= (y_off + follow_mouse) - pointer_y;
}
// adjust grabbing region position if it goes out of screen.
s->x_off = x_off = av_clip(x_off, 0, screen_w - s->width);
s->y_off = y_off = av_clip(y_off, 0, screen_h - s->height);
if (s->show_region && s->region_win)
XMoveWindow(dpy, s->region_win,
s->x_off - REGION_WIN_BORDER,
s->y_off - REGION_WIN_BORDER);
}
if (s->show_region && same_screen) {
if (s->region_win) {
XEvent evt = { .type = NoEventMask };
// Clean up the events, and do the initial draw or redraw.
while (XCheckMaskEvent(dpy, ExposureMask | StructureNotifyMask,
&evt))
;
if (evt.type)
x11grab_draw_region_win(s);
} else {
x11grab_region_win_init(s);
}
}
if (s->use_shm) {
if (!XShmGetImage(dpy, root, image, x_off, y_off, AllPlanes))
av_log(s1, AV_LOG_INFO, "XShmGetImage() failed\n");
} else {
if (!xget_zpixmap(dpy, root, image, x_off, y_off))
av_log(s1, AV_LOG_INFO, "XGetZPixmap() failed\n");
}
if (s->draw_mouse && same_screen)
paint_mouse_pointer(image, s1);
return s->frame_size;
}
/**
* Close x11 frame grabber (public device demuxer API).
*
* @param s1 Context from avformat core
* @return 0 success, !0 failure
*/
static int x11grab_read_close(AVFormatContext *s1)
{
X11GrabContext *x11grab = s1->priv_data;
/* Detach cleanly from shared mem */
if (x11grab->use_shm) {
XShmDetach(x11grab->dpy, &x11grab->shminfo);
shmdt(x11grab->shminfo.shmaddr);
shmctl(x11grab->shminfo.shmid, IPC_RMID, NULL);
}
/* Destroy X11 image */
if (x11grab->image) {
XDestroyImage(x11grab->image);
x11grab->image = NULL;
}
if (x11grab->region_win)
XDestroyWindow(x11grab->dpy, x11grab->region_win);
/* Free X11 display */
XCloseDisplay(x11grab->dpy);
return 0;
}
#define OFFSET(x) offsetof(X11GrabContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "grab_x", "Initial x coordinate.", OFFSET(x_off), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, DEC },
{ "grab_y", "Initial y coordinate.", OFFSET(y_off), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, DEC },
{ "draw_mouse", "draw the mouse pointer", OFFSET(draw_mouse), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, DEC },
{ "follow_mouse", "move the grabbing region when the mouse pointer reaches within specified amount of pixels to the edge of region",
OFFSET(follow_mouse), AV_OPT_TYPE_INT, {.i64 = 0}, -1, INT_MAX, DEC, "follow_mouse" },
{ "centered", "keep the mouse pointer at the center of grabbing region when following",
0, AV_OPT_TYPE_CONST, {.i64 = -1}, INT_MIN, INT_MAX, DEC, "follow_mouse" },
{ "framerate", "set video frame rate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "ntsc"}, 0, 0, DEC },
{ "show_region", "show the grabbing region", OFFSET(show_region), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, DEC },
{ "video_size", "set video frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = "vga"}, 0, 0, DEC },
{ "use_shm", "use MIT-SHM extension", OFFSET(use_shm), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, DEC },
{ NULL },
};
static const AVClass x11_class = {
.class_name = "X11grab indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
/** x11 grabber device demuxer declaration */
AVInputFormat ff_x11grab_demuxer = {
.name = "x11grab",
.long_name = NULL_IF_CONFIG_SMALL("X11grab"),
.priv_data_size = sizeof(X11GrabContext),
.read_header = x11grab_read_header,
.read_packet = x11grab_read_packet,
.read_close = x11grab_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &x11_class,
};

View File

@@ -0,0 +1,698 @@
/*
* XCB input grabber
* Copyright (C) 2014 Luca Barbato <lu_zero@gentoo.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <stdlib.h>
#include <xcb/xcb.h>
#if CONFIG_LIBXCB_XFIXES
#include <xcb/xfixes.h>
#endif
#if CONFIG_LIBXCB_SHM
#include <sys/shm.h>
#include <xcb/shm.h>
#endif
#if CONFIG_LIBXCB_SHAPE
#include <xcb/shape.h>
#endif
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/time.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
typedef struct XCBGrabContext {
const AVClass *class;
xcb_connection_t *conn;
xcb_screen_t *screen;
xcb_window_t window;
#if CONFIG_LIBXCB_SHM
xcb_shm_seg_t segment;
#endif
int64_t time_frame;
AVRational time_base;
int x, y;
int width, height;
int frame_size;
int bpp;
int draw_mouse;
int follow_mouse;
int show_region;
int region_border;
int centered;
const char *video_size;
const char *framerate;
int has_shm;
} XCBGrabContext;
#define FOLLOW_CENTER -1
#define OFFSET(x) offsetof(XCBGrabContext, x)
#define D AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "x", "Initial x coordinate.", OFFSET(x), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, D },
{ "y", "Initial y coordinate.", OFFSET(y), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, D },
{ "grab_x", "Initial x coordinate.", OFFSET(x), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, D },
{ "grab_y", "Initial y coordinate.", OFFSET(y), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, D },
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = "vga" }, 0, 0, D },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "ntsc" }, 0, 0, D },
{ "draw_mouse", "Draw the mouse pointer.", OFFSET(draw_mouse), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, D },
{ "follow_mouse", "Move the grabbing region when the mouse pointer reaches within specified amount of pixels to the edge of region.",
OFFSET(follow_mouse), AV_OPT_TYPE_INT, { .i64 = 0 }, FOLLOW_CENTER, INT_MAX, D, "follow_mouse" },
{ "centered", "Keep the mouse pointer at the center of grabbing region when following.", 0, AV_OPT_TYPE_CONST, { .i64 = -1 }, INT_MIN, INT_MAX, D, "follow_mouse" },
{ "show_region", "Show the grabbing region.", OFFSET(show_region), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, D },
{ "region_border", "Set the region border thickness.", OFFSET(region_border), AV_OPT_TYPE_INT, { .i64 = 3 }, 1, 128, D },
{ NULL },
};
static const AVClass xcbgrab_class = {
.class_name = "xcbgrab indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
static int xcbgrab_reposition(AVFormatContext *s,
xcb_query_pointer_reply_t *p,
xcb_get_geometry_reply_t *geo)
{
XCBGrabContext *c = s->priv_data;
int x = c->x, y = c->y;
int w = c->width, h = c->height, f = c->follow_mouse;
int p_x, p_y;
if (!p || !geo)
return AVERROR(EIO);
p_x = p->win_x;
p_y = p->win_y;
if (f == FOLLOW_CENTER) {
x = p_x - w / 2;
y = p_y - h / 2;
} else {
int left = x + f;
int right = x + w - f;
int top = y + f;
int bottom = y + h + f;
if (p_x > right) {
x += p_x - right;
} else if (p_x < left) {
x -= left - p_x;
}
if (p_y > bottom) {
y += p_y - bottom;
} else if (p_y < top) {
y -= top - p_y;
}
}
c->x = FFMIN(FFMAX(0, x), geo->width - w);
c->y = FFMIN(FFMAX(0, y), geo->height - h);
return 0;
}
static int xcbgrab_frame(AVFormatContext *s, AVPacket *pkt)
{
XCBGrabContext *c = s->priv_data;
xcb_get_image_cookie_t iq;
xcb_get_image_reply_t *img;
xcb_drawable_t drawable = c->screen->root;
xcb_generic_error_t *e = NULL;
uint8_t *data;
int length, ret;
iq = xcb_get_image(c->conn, XCB_IMAGE_FORMAT_Z_PIXMAP, drawable,
c->x, c->y, c->width, c->height, ~0);
img = xcb_get_image_reply(c->conn, iq, &e);
if (e) {
av_log(s, AV_LOG_ERROR,
"Cannot get the image data "
"event_error: response_type:%u error_code:%u "
"sequence:%u resource_id:%u minor_code:%u major_code:%u.\n",
e->response_type, e->error_code,
e->sequence, e->resource_id, e->minor_code, e->major_code);
return AVERROR(EACCES);
}
if (!img)
return AVERROR(EAGAIN);
data = xcb_get_image_data(img);
length = xcb_get_image_data_length(img);
ret = av_new_packet(pkt, length);
if (!ret)
memcpy(pkt->data, data, length);
free(img);
return ret;
}
static void wait_frame(AVFormatContext *s, AVPacket *pkt)
{
XCBGrabContext *c = s->priv_data;
int64_t curtime, delay;
int64_t frame_time = av_rescale_q(1, c->time_base, AV_TIME_BASE_Q);
c->time_frame += frame_time;
for (;;) {
curtime = av_gettime();
delay = c->time_frame - curtime;
if (delay <= 0)
break;
av_usleep(delay);
}
pkt->pts = curtime;
}
#if CONFIG_LIBXCB_SHM
static int check_shm(xcb_connection_t *conn)
{
xcb_shm_query_version_cookie_t cookie = xcb_shm_query_version(conn);
xcb_shm_query_version_reply_t *reply;
reply = xcb_shm_query_version_reply(conn, cookie, NULL);
if (reply) {
free(reply);
return 1;
}
return 0;
}
static void dealloc_shm(void *unused, uint8_t *data)
{
shmdt(data);
}
static int xcbgrab_frame_shm(AVFormatContext *s, AVPacket *pkt)
{
XCBGrabContext *c = s->priv_data;
xcb_shm_get_image_cookie_t iq;
xcb_shm_get_image_reply_t *img;
xcb_drawable_t drawable = c->screen->root;
uint8_t *data;
int size = c->frame_size + AV_INPUT_BUFFER_PADDING_SIZE;
int id = shmget(IPC_PRIVATE, size, IPC_CREAT | 0777);
xcb_generic_error_t *e = NULL;
if (id == -1) {
char errbuf[1024];
int err = AVERROR(errno);
av_strerror(err, errbuf, sizeof(errbuf));
av_log(s, AV_LOG_ERROR, "Cannot get %d bytes of shared memory: %s.\n",
size, errbuf);
return err;
}
xcb_shm_attach(c->conn, c->segment, id, 0);
iq = xcb_shm_get_image(c->conn, drawable,
c->x, c->y, c->width, c->height, ~0,
XCB_IMAGE_FORMAT_Z_PIXMAP, c->segment, 0);
xcb_shm_detach(c->conn, c->segment);
img = xcb_shm_get_image_reply(c->conn, iq, &e);
xcb_flush(c->conn);
if (e) {
av_log(s, AV_LOG_ERROR,
"Cannot get the image data "
"event_error: response_type:%u error_code:%u "
"sequence:%u resource_id:%u minor_code:%u major_code:%u.\n",
e->response_type, e->error_code,
e->sequence, e->resource_id, e->minor_code, e->major_code);
shmctl(id, IPC_RMID, 0);
return AVERROR(EACCES);
}
free(img);
data = shmat(id, NULL, 0);
shmctl(id, IPC_RMID, 0);
if ((intptr_t)data == -1)
return AVERROR(errno);
pkt->buf = av_buffer_create(data, size, dealloc_shm, NULL, 0);
if (!pkt->buf) {
shmdt(data);
return AVERROR(ENOMEM);
}
pkt->data = pkt->buf->data;
pkt->size = c->frame_size;
return 0;
}
#endif /* CONFIG_LIBXCB_SHM */
#if CONFIG_LIBXCB_XFIXES
static int check_xfixes(xcb_connection_t *conn)
{
xcb_xfixes_query_version_cookie_t cookie;
xcb_xfixes_query_version_reply_t *reply;
cookie = xcb_xfixes_query_version(conn, XCB_XFIXES_MAJOR_VERSION,
XCB_XFIXES_MINOR_VERSION);
reply = xcb_xfixes_query_version_reply(conn, cookie, NULL);
if (reply) {
free(reply);
return 1;
}
return 0;
}
#define BLEND(target, source, alpha) \
(target) + ((source) * (255 - (alpha)) + 255 / 2) / 255
static void xcbgrab_draw_mouse(AVFormatContext *s, AVPacket *pkt,
xcb_query_pointer_reply_t *p,
xcb_get_geometry_reply_t *geo)
{
XCBGrabContext *gr = s->priv_data;
uint32_t *cursor;
uint8_t *image = pkt->data;
int stride = gr->bpp / 8;
xcb_xfixes_get_cursor_image_cookie_t cc;
xcb_xfixes_get_cursor_image_reply_t *ci;
int cx, cy, x, y, w, h, c_off, i_off;
cc = xcb_xfixes_get_cursor_image(gr->conn);
ci = xcb_xfixes_get_cursor_image_reply(gr->conn, cc, NULL);
if (!ci)
return;
cursor = xcb_xfixes_get_cursor_image_cursor_image(ci);
if (!cursor)
return;
cx = ci->x - ci->xhot;
cy = ci->y - ci->yhot;
x = FFMAX(cx, gr->x);
y = FFMAX(cy, gr->y);
w = FFMIN(cx + ci->width, gr->x + gr->width) - x;
h = FFMIN(cy + ci->height, gr->y + gr->height) - y;
c_off = x - cx;
i_off = x - gr->x;
cursor += (y - cy) * ci->width;
image += (y - gr->y) * gr->width * stride;
for (y = 0; y < h; y++) {
cursor += c_off;
image += i_off * stride;
for (x = 0; x < w; x++, cursor++, image += stride) {
int r, g, b, a;
r = *cursor & 0xff;
g = (*cursor >> 8) & 0xff;
b = (*cursor >> 16) & 0xff;
a = (*cursor >> 24) & 0xff;
if (!a)
continue;
if (a == 255) {
image[0] = r;
image[1] = g;
image[2] = b;
} else {
image[0] = BLEND(r, image[0], a);
image[1] = BLEND(g, image[1], a);
image[2] = BLEND(b, image[2], a);
}
}
cursor += ci->width - w - c_off;
image += (gr->width - w - i_off) * stride;
}
free(ci);
}
#endif /* CONFIG_LIBXCB_XFIXES */
static void xcbgrab_update_region(AVFormatContext *s)
{
XCBGrabContext *c = s->priv_data;
const uint32_t args[] = { c->x - c->region_border,
c->y - c->region_border };
xcb_configure_window(c->conn,
c->window,
XCB_CONFIG_WINDOW_X | XCB_CONFIG_WINDOW_Y,
args);
}
static int xcbgrab_read_packet(AVFormatContext *s, AVPacket *pkt)
{
XCBGrabContext *c = s->priv_data;
xcb_query_pointer_cookie_t pc;
xcb_get_geometry_cookie_t gc;
xcb_query_pointer_reply_t *p = NULL;
xcb_get_geometry_reply_t *geo = NULL;
int ret = 0;
wait_frame(s, pkt);
if (c->follow_mouse || c->draw_mouse) {
pc = xcb_query_pointer(c->conn, c->screen->root);
gc = xcb_get_geometry(c->conn, c->screen->root);
p = xcb_query_pointer_reply(c->conn, pc, NULL);
geo = xcb_get_geometry_reply(c->conn, gc, NULL);
}
if (c->follow_mouse && p->same_screen)
xcbgrab_reposition(s, p, geo);
if (c->show_region)
xcbgrab_update_region(s);
#if CONFIG_LIBXCB_SHM
if (c->has_shm && xcbgrab_frame_shm(s, pkt) < 0)
c->has_shm = 0;
#endif
if (!c->has_shm)
ret = xcbgrab_frame(s, pkt);
#if CONFIG_LIBXCB_XFIXES
if (ret >= 0 && c->draw_mouse && p->same_screen)
xcbgrab_draw_mouse(s, pkt, p, geo);
#endif
free(p);
free(geo);
return ret;
}
static av_cold int xcbgrab_read_close(AVFormatContext *s)
{
XCBGrabContext *ctx = s->priv_data;
xcb_disconnect(ctx->conn);
return 0;
}
static xcb_screen_t *get_screen(const xcb_setup_t *setup, int screen_num)
{
xcb_screen_iterator_t it = xcb_setup_roots_iterator(setup);
xcb_screen_t *screen = NULL;
for (; it.rem > 0; xcb_screen_next (&it)) {
if (!screen_num) {
screen = it.data;
break;
}
screen_num--;
}
return screen;
}
static int pixfmt_from_pixmap_format(AVFormatContext *s, int depth,
int *pix_fmt)
{
XCBGrabContext *c = s->priv_data;
const xcb_setup_t *setup = xcb_get_setup(c->conn);
const xcb_format_t *fmt = xcb_setup_pixmap_formats(setup);
int length = xcb_setup_pixmap_formats_length(setup);
*pix_fmt = 0;
while (length--) {
if (fmt->depth == depth) {
switch (depth) {
case 32:
if (fmt->bits_per_pixel == 32)
*pix_fmt = AV_PIX_FMT_0RGB;
break;
case 24:
if (fmt->bits_per_pixel == 32)
*pix_fmt = AV_PIX_FMT_0RGB32;
else if (fmt->bits_per_pixel == 24)
*pix_fmt = AV_PIX_FMT_RGB24;
break;
case 16:
if (fmt->bits_per_pixel == 16)
*pix_fmt = AV_PIX_FMT_RGB565;
break;
case 15:
if (fmt->bits_per_pixel == 16)
*pix_fmt = AV_PIX_FMT_RGB555;
break;
case 8:
if (fmt->bits_per_pixel == 8)
*pix_fmt = AV_PIX_FMT_RGB8;
break;
}
}
if (*pix_fmt) {
c->bpp = fmt->bits_per_pixel;
c->frame_size = c->width * c->height * fmt->bits_per_pixel / 8;
return 0;
}
fmt++;
}
av_log(s, AV_LOG_ERROR, "Pixmap format not mappable.\n");
return AVERROR_PATCHWELCOME;
}
static int create_stream(AVFormatContext *s)
{
XCBGrabContext *c = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
xcb_get_geometry_cookie_t gc;
xcb_get_geometry_reply_t *geo;
int ret;
if (!st)
return AVERROR(ENOMEM);
ret = av_parse_video_size(&c->width, &c->height, c->video_size);
if (ret < 0)
return ret;
ret = av_parse_video_rate(&st->avg_frame_rate, c->framerate);
if (ret < 0)
return ret;
avpriv_set_pts_info(st, 64, 1, 1000000);
gc = xcb_get_geometry(c->conn, c->screen->root);
geo = xcb_get_geometry_reply(c->conn, gc, NULL);
if (c->x + c->width > geo->width ||
c->y + c->height > geo->height) {
av_log(s, AV_LOG_ERROR,
"Capture area %dx%d at position %d.%d "
"outside the screen size %dx%d\n",
c->width, c->height,
c->x, c->y,
geo->width, geo->height);
return AVERROR(EINVAL);
}
c->time_base = (AVRational){ st->avg_frame_rate.den,
st->avg_frame_rate.num };
c->time_frame = av_gettime();
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->width = c->width;
st->codec->height = c->height;
st->codec->time_base = c->time_base;
ret = pixfmt_from_pixmap_format(s, geo->depth, &st->codec->pix_fmt);
free(geo);
return ret;
}
static void draw_rectangle(AVFormatContext *s)
{
XCBGrabContext *c = s->priv_data;
xcb_gcontext_t gc = xcb_generate_id(c->conn);
uint32_t mask = XCB_GC_FOREGROUND |
XCB_GC_BACKGROUND |
XCB_GC_LINE_WIDTH |
XCB_GC_LINE_STYLE |
XCB_GC_FILL_STYLE;
uint32_t values[] = { c->screen->black_pixel,
c->screen->white_pixel,
c->region_border,
XCB_LINE_STYLE_DOUBLE_DASH,
XCB_FILL_STYLE_SOLID };
xcb_rectangle_t r = { 1, 1,
c->width + c->region_border * 2 - 3,
c->height + c->region_border * 2 - 3 };
xcb_create_gc(c->conn, gc, c->window, mask, values);
xcb_poly_rectangle(c->conn, c->window, gc, 1, &r);
}
static void setup_window(AVFormatContext *s)
{
XCBGrabContext *c = s->priv_data;
uint32_t mask = XCB_CW_OVERRIDE_REDIRECT | XCB_CW_EVENT_MASK;
uint32_t values[] = { 1,
XCB_EVENT_MASK_EXPOSURE |
XCB_EVENT_MASK_STRUCTURE_NOTIFY };
xcb_rectangle_t rect = { 0, 0, c->width, c->height };
c->window = xcb_generate_id(c->conn);
xcb_create_window(c->conn, XCB_COPY_FROM_PARENT,
c->window,
c->screen->root,
c->x - c->region_border,
c->y - c->region_border,
c->width + c->region_border * 2,
c->height + c->region_border * 2,
0,
XCB_WINDOW_CLASS_INPUT_OUTPUT,
XCB_COPY_FROM_PARENT,
mask, values);
#if CONFIG_LIBXCB_SHAPE
xcb_shape_rectangles(c->conn, XCB_SHAPE_SO_SUBTRACT,
XCB_SHAPE_SK_BOUNDING, XCB_CLIP_ORDERING_UNSORTED,
c->window,
c->region_border, c->region_border,
1, &rect);
#endif
xcb_map_window(c->conn, c->window);
draw_rectangle(s);
}
static av_cold int xcbgrab_read_header(AVFormatContext *s)
{
XCBGrabContext *c = s->priv_data;
int screen_num, ret;
const xcb_setup_t *setup;
char *display_name = av_strdup(s->filename);
if (!display_name)
return AVERROR(ENOMEM);
if (!sscanf(s->filename, "%[^+]+%d,%d", display_name, &c->x, &c->y)) {
*display_name = 0;
sscanf(s->filename, "+%d,%d", &c->x, &c->y);
}
c->conn = xcb_connect(display_name[0] ? display_name : NULL, &screen_num);
av_freep(&display_name);
if ((ret = xcb_connection_has_error(c->conn))) {
av_log(s, AV_LOG_ERROR, "Cannot open display %s, error %d.\n",
s->filename[0] ? s->filename : "default", ret);
return AVERROR(EIO);
}
setup = xcb_get_setup(c->conn);
c->screen = get_screen(setup, screen_num);
if (!c->screen) {
av_log(s, AV_LOG_ERROR, "The screen %d does not exist.\n",
screen_num);
xcbgrab_read_close(s);
return AVERROR(EIO);
}
ret = create_stream(s);
if (ret < 0) {
xcbgrab_read_close(s);
return ret;
}
#if CONFIG_LIBXCB_SHM
if ((c->has_shm = check_shm(c->conn)))
c->segment = xcb_generate_id(c->conn);
#endif
#if CONFIG_LIBXCB_XFIXES
if (c->draw_mouse) {
if (!(c->draw_mouse = check_xfixes(c->conn))) {
av_log(s, AV_LOG_WARNING,
"XFixes not available, cannot draw the mouse.\n");
}
if (c->bpp < 24) {
avpriv_report_missing_feature(s, "%d bits per pixel screen",
c->bpp);
c->draw_mouse = 0;
}
}
#endif
if (c->show_region)
setup_window(s);
return 0;
}
AVInputFormat ff_x11grab_xcb_demuxer = {
.name = "x11grab",
.long_name = NULL_IF_CONFIG_SMALL("X11 screen capture, using XCB"),
.priv_data_size = sizeof(XCBGrabContext),
.read_header = xcbgrab_read_header,
.read_packet = xcbgrab_read_packet,
.read_close = xcbgrab_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &xcbgrab_class,
};

View File

@@ -0,0 +1,383 @@
/*
* Copyright (c) 2013 Jeff Moguillansky
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* XVideo output device
*
* TODO:
* - add support to more formats
*/
#include <X11/Xlib.h>
#include <X11/extensions/Xv.h>
#include <X11/extensions/XShm.h>
#include <X11/extensions/Xvlib.h>
#include <sys/shm.h>
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavformat/internal.h"
#include "avdevice.h"
typedef struct {
AVClass *class;
GC gc;
Window window;
int64_t window_id;
char *window_title;
int window_width, window_height;
int window_x, window_y;
int dest_x, dest_y; /**< display area position */
unsigned int dest_w, dest_h; /**< display area dimensions */
Display* display;
char *display_name;
XvImage* yuv_image;
enum AVPixelFormat image_format;
int image_width, image_height;
XShmSegmentInfo yuv_shminfo;
int xv_port;
Atom wm_delete_message;
} XVContext;
typedef struct XVTagFormatMap
{
int tag;
enum AVPixelFormat format;
} XVTagFormatMap;
static const XVTagFormatMap tag_codec_map[] = {
{ MKTAG('I','4','2','0'), AV_PIX_FMT_YUV420P },
{ MKTAG('U','Y','V','Y'), AV_PIX_FMT_UYVY422 },
{ MKTAG('Y','U','Y','2'), AV_PIX_FMT_YUYV422 },
{ 0, AV_PIX_FMT_NONE }
};
static int xv_get_tag_from_format(enum AVPixelFormat format)
{
const XVTagFormatMap *m = tag_codec_map;
int i;
for (i = 0; m->tag; m = &tag_codec_map[++i]) {
if (m->format == format)
return m->tag;
}
return 0;
}
static int xv_write_trailer(AVFormatContext *s)
{
XVContext *xv = s->priv_data;
if (xv->display) {
XShmDetach(xv->display, &xv->yuv_shminfo);
if (xv->yuv_image)
shmdt(xv->yuv_image->data);
XFree(xv->yuv_image);
if (xv->gc)
XFreeGC(xv->display, xv->gc);
XCloseDisplay(xv->display);
}
return 0;
}
static int xv_write_header(AVFormatContext *s)
{
XVContext *xv = s->priv_data;
unsigned int num_adaptors;
XvAdaptorInfo *ai;
XvImageFormatValues *fv;
XColor fgcolor;
XWindowAttributes window_attrs;
int num_formats = 0, j, tag, ret;
AVCodecContext *encctx = s->streams[0]->codec;
if ( s->nb_streams > 1
|| encctx->codec_type != AVMEDIA_TYPE_VIDEO
|| encctx->codec_id != AV_CODEC_ID_RAWVIDEO) {
av_log(s, AV_LOG_ERROR, "Only supports one rawvideo stream\n");
return AVERROR(EINVAL);
}
if (!(tag = xv_get_tag_from_format(encctx->pix_fmt))) {
av_log(s, AV_LOG_ERROR,
"Unsupported pixel format '%s', only yuv420p, uyvy422, yuyv422 are currently supported\n",
av_get_pix_fmt_name(encctx->pix_fmt));
return AVERROR_PATCHWELCOME;
}
xv->image_format = encctx->pix_fmt;
xv->display = XOpenDisplay(xv->display_name);
if (!xv->display) {
av_log(s, AV_LOG_ERROR, "Could not open the X11 display '%s'\n", xv->display_name);
return AVERROR(EINVAL);
}
xv->image_width = encctx->width;
xv->image_height = encctx->height;
if (!xv->window_width && !xv->window_height) {
AVRational sar = encctx->sample_aspect_ratio;
xv->window_width = encctx->width;
xv->window_height = encctx->height;
if (sar.num) {
if (sar.num > sar.den)
xv->window_width = av_rescale(xv->window_width, sar.num, sar.den);
if (sar.num < sar.den)
xv->window_height = av_rescale(xv->window_height, sar.den, sar.num);
}
}
if (!xv->window_id) {
xv->window = XCreateSimpleWindow(xv->display, DefaultRootWindow(xv->display),
xv->window_x, xv->window_y,
xv->window_width, xv->window_height,
0, 0, 0);
if (!xv->window_title) {
if (!(xv->window_title = av_strdup(s->filename))) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
XStoreName(xv->display, xv->window, xv->window_title);
xv->wm_delete_message = XInternAtom(xv->display, "WM_DELETE_WINDOW", False);
XSetWMProtocols(xv->display, xv->window, &xv->wm_delete_message, 1);
XMapWindow(xv->display, xv->window);
} else
xv->window = xv->window_id;
if (XvQueryAdaptors(xv->display, DefaultRootWindow(xv->display), &num_adaptors, &ai) != Success) {
ret = AVERROR_EXTERNAL;
goto fail;
}
if (!num_adaptors) {
av_log(s, AV_LOG_ERROR, "No X-Video adaptors present\n");
return AVERROR(ENODEV);
}
xv->xv_port = ai[0].base_id;
XvFreeAdaptorInfo(ai);
fv = XvListImageFormats(xv->display, xv->xv_port, &num_formats);
if (!fv) {
ret = AVERROR_EXTERNAL;
goto fail;
}
for (j = 0; j < num_formats; j++) {
if (fv[j].id == tag) {
break;
}
}
XFree(fv);
if (j >= num_formats) {
av_log(s, AV_LOG_ERROR,
"Device does not support pixel format %s, aborting\n",
av_get_pix_fmt_name(encctx->pix_fmt));
ret = AVERROR(EINVAL);
goto fail;
}
xv->gc = XCreateGC(xv->display, xv->window, 0, 0);
xv->image_width = encctx->width;
xv->image_height = encctx->height;
xv->yuv_image = XvShmCreateImage(xv->display, xv->xv_port, tag, 0,
xv->image_width, xv->image_height, &xv->yuv_shminfo);
xv->yuv_shminfo.shmid = shmget(IPC_PRIVATE, xv->yuv_image->data_size,
IPC_CREAT | 0777);
xv->yuv_shminfo.shmaddr = (char *)shmat(xv->yuv_shminfo.shmid, 0, 0);
xv->yuv_image->data = xv->yuv_shminfo.shmaddr;
xv->yuv_shminfo.readOnly = False;
XShmAttach(xv->display, &xv->yuv_shminfo);
XSync(xv->display, False);
shmctl(xv->yuv_shminfo.shmid, IPC_RMID, 0);
XGetWindowAttributes(xv->display, xv->window, &window_attrs);
fgcolor.red = fgcolor.green = fgcolor.blue = 0;
fgcolor.flags = DoRed | DoGreen | DoBlue;
XAllocColor(xv->display, window_attrs.colormap, &fgcolor);
XSetForeground(xv->display, xv->gc, fgcolor.pixel);
//force display area recalculation at first frame
xv->window_width = xv->window_height = 0;
return 0;
fail:
xv_write_trailer(s);
return ret;
}
static void compute_display_area(AVFormatContext *s)
{
XVContext *xv = s->priv_data;
AVRational sar, dar; /* sample and display aspect ratios */
AVStream *st = s->streams[0];
AVCodecContext *encctx = st->codec;
/* compute overlay width and height from the codec context information */
sar = st->sample_aspect_ratio.num ? st->sample_aspect_ratio : (AVRational){ 1, 1 };
dar = av_mul_q(sar, (AVRational){ encctx->width, encctx->height });
/* we suppose the screen has a 1/1 sample aspect ratio */
/* fit in the window */
if (av_cmp_q(dar, (AVRational){ xv->dest_w, xv->dest_h }) > 0) {
/* fit in width */
xv->dest_y = xv->dest_h;
xv->dest_x = 0;
xv->dest_h = av_rescale(xv->dest_w, dar.den, dar.num);
xv->dest_y -= xv->dest_h;
xv->dest_y /= 2;
} else {
/* fit in height */
xv->dest_x = xv->dest_w;
xv->dest_y = 0;
xv->dest_w = av_rescale(xv->dest_h, dar.num, dar.den);
xv->dest_x -= xv->dest_w;
xv->dest_x /= 2;
}
}
static int xv_repaint(AVFormatContext *s)
{
XVContext *xv = s->priv_data;
XWindowAttributes window_attrs;
XGetWindowAttributes(xv->display, xv->window, &window_attrs);
if (window_attrs.width != xv->window_width || window_attrs.height != xv->window_height) {
XRectangle rect[2];
xv->dest_w = window_attrs.width;
xv->dest_h = window_attrs.height;
compute_display_area(s);
if (xv->dest_x) {
rect[0].width = rect[1].width = xv->dest_x;
rect[0].height = rect[1].height = window_attrs.height;
rect[0].y = rect[1].y = 0;
rect[0].x = 0;
rect[1].x = xv->dest_w + xv->dest_x;
XFillRectangles(xv->display, xv->window, xv->gc, rect, 2);
}
if (xv->dest_y) {
rect[0].width = rect[1].width = window_attrs.width;
rect[0].height = rect[1].height = xv->dest_y;
rect[0].x = rect[1].x = 0;
rect[0].y = 0;
rect[1].y = xv->dest_h + xv->dest_y;
XFillRectangles(xv->display, xv->window, xv->gc, rect, 2);
}
}
if (XvShmPutImage(xv->display, xv->xv_port, xv->window, xv->gc,
xv->yuv_image, 0, 0, xv->image_width, xv->image_height,
xv->dest_x, xv->dest_y, xv->dest_w, xv->dest_h, True) != Success) {
av_log(s, AV_LOG_ERROR, "Could not copy image to XV shared memory buffer\n");
return AVERROR_EXTERNAL;
}
return 0;
}
static int write_picture(AVFormatContext *s, AVPicture *pict)
{
XVContext *xv = s->priv_data;
XvImage *img = xv->yuv_image;
uint8_t *data[3] = {
img->data + img->offsets[0],
img->data + img->offsets[1],
img->data + img->offsets[2]
};
/* Check messages. Window might get closed. */
if (!xv->window_id) {
XEvent event;
while (XPending(xv->display)) {
XNextEvent(xv->display, &event);
if (event.type == ClientMessage && event.xclient.data.l[0] == xv->wm_delete_message) {
av_log(xv, AV_LOG_DEBUG, "Window close event.\n");
return AVERROR(EPIPE);
}
}
}
av_image_copy(data, img->pitches, (const uint8_t **)pict->data, pict->linesize,
xv->image_format, img->width, img->height);
return xv_repaint(s);
}
static int xv_write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVPicture pict;
AVCodecContext *ctx = s->streams[0]->codec;
avpicture_fill(&pict, pkt->data, ctx->pix_fmt, ctx->width, ctx->height);
return write_picture(s, &pict);
}
static int xv_write_frame(AVFormatContext *s, int stream_index, AVFrame **frame,
unsigned flags)
{
/* xv_write_header() should have accepted only supported formats */
if ((flags & AV_WRITE_UNCODED_FRAME_QUERY))
return 0;
return write_picture(s, (AVPicture *)*frame);
}
static int xv_control_message(AVFormatContext *s, int type, void *data, size_t data_size)
{
switch(type) {
case AV_APP_TO_DEV_WINDOW_REPAINT:
return xv_repaint(s);
default:
break;
}
return AVERROR(ENOSYS);
}
#define OFFSET(x) offsetof(XVContext, x)
static const AVOption options[] = {
{ "display_name", "set display name", OFFSET(display_name), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_id", "set existing window id", OFFSET(window_id), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_size", "set window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_title", "set window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_x", "set window x offset", OFFSET(window_x), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_y", "set window y offset", OFFSET(window_y), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ NULL }
};
static const AVClass xv_class = {
.class_name = "xvideo outdev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
};
AVOutputFormat ff_xv_muxer = {
.name = "xv",
.long_name = NULL_IF_CONFIG_SMALL("XV (XVideo) output device"),
.priv_data_size = sizeof(XVContext),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = xv_write_header,
.write_packet = xv_write_packet,
.write_uncoded_frame = xv_write_frame,
.write_trailer = xv_write_trailer,
.control_message = xv_control_message,
.flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
.priv_class = &xv_class,
};