Fplay: ffmpeg-2.5 compatible edition

git-svn-id: svn://kolibrios.org@6117 a494cfbc-eb01-0410-851d-a64ba20cac60
This commit is contained in:
Sergey Semyonov (Serge) 2016-02-01 15:10:02 +00:00
parent eaf749c10f
commit 7218b51863
6 changed files with 759 additions and 139 deletions

View File

@ -19,7 +19,7 @@ INCLUDES+=-I$(SDK_DIR)/sources/freetype/include
#-I$(SDK_DIR)/sources/vaapi/libva-1.4.1
#DEFINES= -DDEBUG=1 -DPACKAGE_NAME=\"Fplay-vaapi\"
LIBS:= -lsync -lavdevice.dll -lavformat.dll -lavcodec.dll -lavutil.dll -lswscale.dll
LIBS:= -lavdevice.dll -lavformat.dll -lavcodec.dll -lavutil.dll -lswscale.dll
LIBS+= -lswresample.dll -lsound -lpixlib3 -lfreetype.dll -lva.dll -lgcc -lc.dll -lapp
LIBPATH:= -L$(LIB_DIR) -L/home/autobuild/tools/win32/mingw32/lib

View File

@ -23,8 +23,6 @@ volatile enum player_state player_state;
volatile enum player_state decoder_state;
volatile enum player_state sound_state;
extern mutex_t driver_lock;
static SNDBUF hBuff;
static int snd_format;
@ -108,7 +106,7 @@ int decode_audio(AVCodecContext *ctx, queue_t *qa)
if (!aFrame)
{
if (!(aFrame = avcodec_alloc_frame()))
if (!(aFrame = av_frame_alloc()))
return -1;
} else
avcodec_get_frame_defaults(aFrame);

View File

@ -25,14 +25,7 @@ uint32_t win_width, win_height;
void decoder();
int fplay_init_context(AVCodecContext *avctx);
AVFormatContext *pFormatCtx;
AVCodecContext *pCodecCtx;
AVCodecContext *aCodecCtx;
AVCodec *pCodec;
AVCodec *aCodec;
AVFrame *pFrame;
int videoStream;
int audioStream;
int have_sound = 0;
@ -44,8 +37,7 @@ char *movie_file;
void flush_video();
queue_t q_video;
queue_t q_audio;
int64_t rewind_pos;
int64_t stream_duration;
@ -54,15 +46,17 @@ int threads_running = DECODER_THREAD;
extern double audio_base;
double get_audio_base()
double get_audio_base(vst_t* vst)
{
return (double)av_q2d(pFormatCtx->streams[audioStream]->time_base)*1000;
return (double)av_q2d(vst->fCtx->streams[vst->aStream]->time_base)*1000;
};
int main( int argc, char *argv[])
{
int i;
static vst_t vst;
int i, ret;
char *file_name, *dot;
if(argc < 2)
@ -84,16 +78,16 @@ int main( int argc, char *argv[])
avdevice_register_all();
av_register_all();
if( avformat_open_input(&pFormatCtx, movie_file, NULL, NULL) < 0)
if( avformat_open_input(&vst.fCtx, movie_file, NULL, NULL) < 0)
{
printf("Cannot open file %s\n\r", movie_file);
return -1; // Couldn't open file
};
pFormatCtx->flags |= AVFMT_FLAG_GENPTS;
vst.fCtx->flags |= AVFMT_FLAG_GENPTS;
// Retrieve stream information
if(avformat_find_stream_info(pFormatCtx, NULL)<0)
if(avformat_find_stream_info(vst.fCtx, NULL) < 0)
{
printf("Cannot find streams\n\r");
return -1;
@ -109,32 +103,33 @@ int main( int argc, char *argv[])
}
else movie_file = file_name;
stream_duration = pFormatCtx->duration;
stream_duration = vst.fCtx->duration;
// Find the first video stream
videoStream=-1;
audioStream=-1;
for(i=0; i < pFormatCtx->nb_streams; i++)
{
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO
&& videoStream < 0)
{
videoStream=i;
video_time_base = pFormatCtx->streams[i]->time_base;
if(stream_duration == 0)
stream_duration = pFormatCtx->streams[i]->duration;
vst.vStream = -1;
vst.aStream = -1;
}
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
audioStream < 0)
for(i=0; i < vst.fCtx->nb_streams; i++)
{
audioStream=i;
if(vst.fCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO
&& vst.vStream < 0)
{
vst.vStream = i;
video_time_base = vst.fCtx->streams[i]->time_base;
if(stream_duration == 0)
stream_duration = pFormatCtx->streams[i]->duration;
stream_duration = vst.fCtx->streams[i]->duration;
}
if(vst.fCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
vst.aStream < 0)
{
vst.aStream = i;
if(stream_duration == 0)
stream_duration = vst.fCtx->streams[i]->duration;
}
}
if(videoStream==-1)
if(vst.vStream==-1)
{
printf("Video stream not detected\n\r");
return -1; // Didn't find a video stream
@ -143,59 +138,60 @@ int main( int argc, char *argv[])
// __asm__ __volatile__("int3");
// Get a pointer to the codec context for the video stream
pCodecCtx = pFormatCtx->streams[videoStream]->codec;
aCodecCtx = pFormatCtx->streams[audioStream]->codec;
vst.vCtx = vst.fCtx->streams[vst.vStream]->codec;
vst.aCtx = vst.fCtx->streams[vst.aStream]->codec;
// Find the decoder for the video stream
vst.vCodec = avcodec_find_decoder(vst.vCtx->codec_id);
printf("codec id %x name %s\n",vst.vCtx->codec_id, vst.vCodec->name);
printf("ctx->pix_fmt %d\n", vst.vCtx->pix_fmt);
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
// printf("ctx->pix_fmt %d\n", pCodecCtx->pix_fmt);
if(pCodec==NULL) {
if(vst.vCodec == NULL)
{
printf("Unsupported codec with id %d for input stream %d\n",
pCodecCtx->codec_id, videoStream);
vst.vCtx->codec_id, vst.vStream);
return -1; // Codec not found
}
if(avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
if(avcodec_open2(vst.vCtx, vst.vCodec, NULL) < 0)
{
printf("Error while opening codec for input stream %d\n",
videoStream);
vst.vStream);
return -1; // Could not open codec
};
// printf("ctx->pix_fmt %d\n", pCodecCtx->pix_fmt);
mutex_init(&q_video.lock);
mutex_init(&q_audio.lock);
mutex_init(&vst.q_video.lock);
mutex_init(&vst.q_audio.lock);
mutex_init(&vst.gpu_lock);
if (aCodecCtx->channels > 0)
aCodecCtx->request_channels = FFMIN(2, aCodecCtx->channels);
if (vst.aCtx->channels > 0)
vst.aCtx->request_channels = FFMIN(2, vst.aCtx->channels);
else
aCodecCtx->request_channels = 2;
vst.aCtx->request_channels = 2;
aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
vst.aCodec = avcodec_find_decoder(vst.aCtx->codec_id);
if(aCodec)
if(vst.aCodec)
{
if(avcodec_open2(aCodecCtx, aCodec, NULL) >= 0 )
if(avcodec_open2(vst.aCtx, vst.aCodec, NULL) >= 0 )
{
WAVEHEADER whdr;
int fmt;
int channels;
printf("audio stream rate %d channels %d format %d\n",
aCodecCtx->sample_rate, aCodecCtx->channels, aCodecCtx->sample_fmt );
vst.aCtx->sample_rate, vst.aCtx->channels, vst.aCtx->sample_fmt );
whdr.riff_id = 0x46464952;
whdr.riff_format = 0x45564157;
whdr.wFormatTag = 0x01;
whdr.nSamplesPerSec = aCodecCtx->sample_rate;
whdr.nSamplesPerSec = vst.aCtx->sample_rate;
whdr.nChannels = 2;
whdr.wBitsPerSample = 16;
sample_rate = aCodecCtx->sample_rate;
sample_rate = vst.aCtx->sample_rate;
fmt = test_wav(&whdr);
@ -222,10 +218,10 @@ int main( int argc, char *argv[])
}
else printf("Unsupported audio codec!\n");
if( !init_video(pCodecCtx))
if(!init_video(&vst))
return 0;
decoder();
decoder(&vst);
// Free the YUV frame
av_free(pFrame);
@ -240,31 +236,31 @@ int main( int argc, char *argv[])
if(astream.lock.handle)
mutex_destroy(&astream.lock);
mutex_destroy(&q_video.lock);
mutex_destroy(&q_audio.lock);
mutex_destroy(&vst.q_video.lock);
mutex_destroy(&vst.q_audio.lock);
return 0;
}
static int load_frame()
static int load_frame(vst_t *vst)
{
AVPacket packet;
int err;
err = av_read_frame(pFormatCtx, &packet);
err = av_read_frame(vst->fCtx, &packet);
if( err == 0)
{
if(packet.stream_index==videoStream)
put_packet(&q_video, &packet);
else if( (packet.stream_index == audioStream) &&
if(packet.stream_index == vst->vStream)
put_packet(&vst->q_video, &packet);
else if( (packet.stream_index == vst->aStream) &&
(have_sound != 0) )
{
put_packet(&q_audio, &packet);
put_packet(&vst->q_audio, &packet);
if(audio_base == -1.0)
{
if (packet.pts != AV_NOPTS_VALUE)
audio_base = get_audio_base() * packet.pts;
audio_base = get_audio_base(vst) * packet.pts;
// printf("audio base %f\n", audio_base);
};
}
@ -278,30 +274,28 @@ static int load_frame()
static int fill_queue()
static int fill_queue(vst_t* vst)
{
int err = 0;
AVPacket packet;
while( (q_video.size < 4*1024*1024) &&
!err )
err = load_frame();
while( (vst->q_video.size < 4*1024*1024) && !err )
err = load_frame(vst);
return err;
};
static void flush_all()
static void flush_all(vst_t* vst)
{
AVPacket packet;
avcodec_flush_buffers(pCodecCtx);
avcodec_flush_buffers(aCodecCtx);
while( get_packet(&q_video, &packet) != 0)
avcodec_flush_buffers(vst->vCtx);
avcodec_flush_buffers(vst->aCtx);
while( get_packet(&vst->q_video, &packet) != 0)
av_free_packet(&packet);
while( get_packet(&q_audio, &packet)!= 0)
while( get_packet(&vst->q_audio, &packet)!= 0)
av_free_packet(&packet);
flush_video();
@ -309,7 +303,7 @@ static void flush_all()
astream.count = 0;
};
void decoder()
void decoder(vst_t* vst)
{
int eof;
AVPacket packet;
@ -326,17 +320,17 @@ void decoder()
switch(decoder_state)
{
case PREPARE:
eof = fill_queue();
eof = fill_queue(vst);
do
{
if( (q_video.size < 4*1024*1024) &&
if( (vst->q_video.size < 4*1024*1024) &&
(eof == 0) )
{
eof = load_frame();
eof = load_frame(vst);
}
decode_video(pCodecCtx, &q_video);
ret = decode_audio(aCodecCtx, &q_audio);
decode_video(vst);
ret = decode_audio(vst->aCtx, &vst->q_audio);
}while(astream.count < resampler_size*2 &&
ret == 1);
@ -345,13 +339,13 @@ void decoder()
player_state = PLAY;
case PLAY:
if( (q_video.size < 4*1024*1024) &&
if( (vst->q_video.size < 4*1024*1024) &&
(eof == 0) )
{
eof = load_frame();
eof = load_frame(vst);
}
vret = decode_video(pCodecCtx, &q_video);
aret = decode_audio(aCodecCtx, &q_audio);
vret = decode_video(vst);
aret = decode_audio(vst->aCtx, &vst->q_audio);
ret = vret | aret;
if( eof && !ret)
@ -362,10 +356,10 @@ void decoder()
if( (vret & aret) == -1)
{
if( (q_video.size < 4*1024*1024) &&
if( (vst->q_video.size < 4*1024*1024) &&
(eof == 0) )
{
eof = load_frame();
eof = load_frame(vst);
yield();
continue;
};
@ -385,14 +379,14 @@ void decoder()
while(sound_state != STOP)
delay(1);
flush_all();
flush_all(vst);
if (pFormatCtx->start_time != AV_NOPTS_VALUE)
rewind_pos = pFormatCtx->start_time;
if (vst->fCtx->start_time != AV_NOPTS_VALUE)
rewind_pos = vst->fCtx->start_time;
else
rewind_pos = 0;
ret = avformat_seek_file(pFormatCtx, -1, INT64_MIN,
ret = avformat_seek_file(vst->fCtx, -1, INT64_MIN,
rewind_pos, INT64_MAX, 0);
decoder_state = STOP;
@ -402,7 +396,7 @@ void decoder()
while(sound_state != STOP)
yield();
flush_all();
flush_all(vst);
int opts = 0;
if(rewind_pos < 0)
{
@ -410,27 +404,23 @@ void decoder()
opts = AVSEEK_FLAG_BACKWARD;
};
if (pFormatCtx->start_time != AV_NOPTS_VALUE)
rewind_pos += pFormatCtx->start_time;
if (vst->fCtx->start_time != AV_NOPTS_VALUE)
rewind_pos += vst->fCtx->start_time;
// printf("rewind %8"PRId64"\n", rewind_pos);
min_pos = rewind_pos - 1000000;
max_pos = rewind_pos + 1000000;
ret = avformat_seek_file(pFormatCtx, -1, INT64_MIN,
ret = avformat_seek_file(vst->fCtx, -1, INT64_MIN,
rewind_pos, INT64_MAX, 0);
if (ret < 0)
{
printf("could not seek to position %f\n",
(double)rewind_pos / AV_TIME_BASE);
}
// printf("restart\n");
decoder_state = PREPARE;
break;
}
};
};

View File

@ -1,6 +1,6 @@
#include "pixlib3.h"
#include <libsync.h>
#include "pixlib3.h"
#define BLACK_MAGIC_SOUND
#define BLACK_MAGIC_VIDEO
@ -9,6 +9,7 @@ typedef unsigned int color_t;
typedef unsigned int count_t;
typedef struct render render_t;
typedef struct vstate vst_t;
#define HAS_LEFT (1<<0)
#define HAS_TOP (1<<1)
@ -17,6 +18,7 @@ typedef struct render render_t;
struct render
{
vst_t *vst;
uint32_t caps;
uint32_t ctx_width;
uint32_t ctx_height;
@ -65,7 +67,6 @@ enum player_state
#define ID_VOL_LEVEL 103
#define ID_VOL_CTRL 104
typedef struct
{
mutex_t lock;
@ -95,6 +96,24 @@ typedef struct {
int put_packet(queue_t *q, AVPacket *pkt);
int get_packet(queue_t *q, AVPacket *pkt);
struct vstate
{
AVFormatContext *fCtx; /* format context */
AVCodecContext *vCtx; /* video decoder context */
AVCodecContext *aCtx; /* audio decoder context */
AVCodec *vCodec; /* video codec */
AVCodec *aCodec; /* audio codec */
int vStream; /* video stream index */
int aStream; /* audio stream index */
queue_t q_video; /* video packets queue */
queue_t q_audio; /* audio packets queue */
mutex_t gpu_lock; /* gpu access lock. libdrm not yet thread safe :( */
};
#define DECODER_THREAD 1
#define AUDIO_THREAD 2
@ -104,7 +123,7 @@ extern int threads_running;
extern astream_t astream;
extern AVRational video_time_base;
render_t *create_render(window_t *win, AVCodecContext *ctx, uint32_t flags);
render_t *create_render(vst_t *vst, window_t *win, uint32_t flags);
void destroy_render(render_t *render);
int init_render(render_t *render, int width, int height);
void render_adjust_size(render_t *render, window_t *win);
@ -116,10 +135,11 @@ int init_audio(int format);
int audio_thread(void *param);
void set_audio_volume(int left, int right);
int init_video(AVCodecContext *ctx);
int init_video(vst_t* vst);
int video_thread(void *param);
int decode_video(AVCodecContext *ctx, queue_t *qv);
void decoder(vst_t *vst);
int decode_video(vst_t* vst);
int decode_audio(AVCodecContext *ctx, queue_t *qa);
double get_master_clock(void);

612
contrib/media/fplay/vaapi.c Normal file
View File

@ -0,0 +1,612 @@
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
#include <fcntl.h>
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
#include <libswscale/swscale.h>
#include <libavcodec/vaapi.h>
#include <va/va.h>
#include <va/va_drmcommon.h>
#include <va/drm/va_drm.h>
#include <kos32sys.h>
#include "winlib/winlib.h"
#include "fplay.h"
extern int dfx;
struct hw_profile
{
enum AVCodecID av_codec;
int ff_profile;
uint64_t va_profile;
};
#define ENTER() printf("enter %s\n",__FUNCTION__)
#define LEAVE() printf("leave %s\n",__FUNCTION__)
#define FAIL() printf("fail %s\n",__FUNCTION__)
#if DEBUG
# define D(x) x
# define bug printf
#else
# define D(x)
#endif
#undef ARRAY_ELEMS
#define ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0]))
static int drm_fd = 0;
static struct vaapi_context *v_context;
static VASurfaceID v_surface_id[4];
#define HAS_HEVC VA_CHECK_VERSION(0, 38, 0)
#define HAS_VP9 (VA_CHECK_VERSION(0, 38, 1) && defined(FF_PROFILE_VP9_0))
#define PE(av_codec_id, ff_profile, vdp_profile) \
{AV_CODEC_ID_ ## av_codec_id, FF_PROFILE_ ## ff_profile, \
VAProfile ## vdp_profile}
static const struct hw_profile profiles[] = {
PE(MPEG2VIDEO, MPEG2_MAIN, MPEG2Main),
PE(MPEG2VIDEO, MPEG2_SIMPLE, MPEG2Simple),
PE(MPEG4, MPEG4_ADVANCED_SIMPLE, MPEG4AdvancedSimple),
PE(MPEG4, MPEG4_MAIN, MPEG4Main),
PE(MPEG4, MPEG4_SIMPLE, MPEG4Simple),
PE(H264, H264_HIGH, H264High),
PE(H264, H264_MAIN, H264Main),
PE(H264, H264_BASELINE, H264Baseline),
PE(VC1, VC1_ADVANCED, VC1Advanced),
PE(VC1, VC1_MAIN, VC1Main),
PE(VC1, VC1_SIMPLE, VC1Simple),
PE(WMV3, VC1_ADVANCED, VC1Advanced),
PE(WMV3, VC1_MAIN, VC1Main),
PE(WMV3, VC1_SIMPLE, VC1Simple),
#if HAS_HEVC
PE(HEVC, HEVC_MAIN, HEVCMain),
PE(HEVC, HEVC_MAIN_10, HEVCMain10),
#endif
#if HAS_VP9
PE(VP9, VP9_0, VP9Profile0),
#endif
{0}
};
int va_check_codec_support(enum AVCodecID id)
{
for (int n = 0; profiles[n].av_codec; n++) {
if (profiles[n].av_codec == id)
return 1;
}
return 0;
}
static int vaapi_check_status(VAStatus status, const char *msg)
{
if (status != VA_STATUS_SUCCESS) {
fprintf(stderr, "[%s] %s: %s\n", PACKAGE_NAME, msg, vaErrorStr(status));
return 0;
}
return 1;
};
static const char *string_of_VADisplayAttribType(VADisplayAttribType type)
{
switch (type) {
#define TYPE(type) \
case VADisplayAttrib##type: return "VADisplayAttrib" #type
TYPE(Brightness);
TYPE(Contrast);
TYPE(Hue);
TYPE(Saturation);
TYPE(BackgroundColor);
#if !VA_CHECK_VERSION(0,34,0)
TYPE(DirectSurface);
#endif
#if VA_CHECK_VERSION(0,32,0)
TYPE(Rotation);
#endif
#undef TYPE
default: break;
}
return "<unknown>";
}
static const char *string_of_VAProfile(VAProfile profile)
{
switch (profile) {
#define PROFILE(profile) \
case VAProfile##profile: return "VAProfile" #profile
PROFILE(MPEG2Simple);
PROFILE(MPEG2Main);
PROFILE(MPEG4Simple);
PROFILE(MPEG4AdvancedSimple);
PROFILE(MPEG4Main);
#if VA_CHECK_VERSION(0,32,0)
PROFILE(JPEGBaseline);
PROFILE(H263Baseline);
PROFILE(H264ConstrainedBaseline);
#endif
PROFILE(H264Baseline);
PROFILE(H264Main);
PROFILE(H264High);
PROFILE(VC1Simple);
PROFILE(VC1Main);
PROFILE(VC1Advanced);
#undef PROFILE
default: break;
}
return "<unknown>";
}
static const char *string_of_VAEntrypoint(VAEntrypoint entrypoint)
{
switch (entrypoint) {
#define ENTRYPOINT(entrypoint) \
case VAEntrypoint##entrypoint: return "VAEntrypoint" #entrypoint
ENTRYPOINT(VLD);
ENTRYPOINT(IZZ);
ENTRYPOINT(IDCT);
ENTRYPOINT(MoComp);
ENTRYPOINT(Deblocking);
#if VA_CHECK_VERSION(0,32,0)
ENTRYPOINT(EncSlice);
ENTRYPOINT(EncPicture);
#endif
#undef ENTRYPOINT
default: break;
}
return "<unknown>";
}
VADisplay va_open_display(void)
{
VADisplay va_dpy;
drm_fd = get_service("DISPLAY");
if (drm_fd == 0)
return NULL;
va_dpy = vaGetDisplayDRM(drm_fd);
if (va_dpy)
return va_dpy;
drm_fd = 0;
return NULL;
};
int vaapi_init(VADisplay display)
{
struct vaapi_context *vaapi;
int major_version, minor_version;
int i, num_display_attrs, max_display_attrs;
VADisplayAttribute *display_attrs = NULL;
VAStatus status;
if (v_context)
return 0;
if (!display)
goto error;
D(bug("VA display %p\n", display));
status = vaInitialize(display, &major_version, &minor_version);
if (!vaapi_check_status(status, "vaInitialize()"))
goto error;
D(bug("VA API version %d.%d\n", major_version, minor_version));
max_display_attrs = vaMaxNumDisplayAttributes(display);
display_attrs = malloc(max_display_attrs * sizeof(display_attrs[0]));
if (!display_attrs)
goto error;
num_display_attrs = 0; /* XXX: workaround old GMA500 bug */
status = vaQueryDisplayAttributes(display, display_attrs, &num_display_attrs);
if (!vaapi_check_status(status, "vaQueryDisplayAttributes()"))
goto error;
D(bug("%d display attributes available\n", num_display_attrs));
for (i = 0; i < num_display_attrs; i++) {
VADisplayAttribute * const display_attr = &display_attrs[i];
D(bug(" %-32s (%s/%s) min %d max %d value 0x%x\n",
string_of_VADisplayAttribType(display_attr->type),
(display_attr->flags & VA_DISPLAY_ATTRIB_GETTABLE) ? "get" : "---",
(display_attr->flags & VA_DISPLAY_ATTRIB_SETTABLE) ? "set" : "---",
display_attr->min_value,
display_attr->max_value,
display_attr->value));
}
if ((vaapi = calloc(1, sizeof(*vaapi))) == NULL)
goto error;
vaapi->display = display;
vaapi->config_id = VA_INVALID_ID;
vaapi->context_id = VA_INVALID_ID;
// vaapi->pic_param_buf_id = VA_INVALID_ID;
// vaapi->iq_matrix_buf_id = VA_INVALID_ID;
// vaapi->bitplane_buf_id = VA_INVALID_ID;
v_context = vaapi;
return 0;
error:
free(display_attrs);
return -1;
}
static int has_profile(struct vaapi_context *vaapi, VAProfile profile)
{
VAProfile *profiles;
int n_profiles;
VAStatus status;
int i;
profiles = calloc(vaMaxNumProfiles(vaapi->display), sizeof(profiles[0]));
status = vaQueryConfigProfiles(vaapi->display,profiles,&n_profiles);
if (!vaapi_check_status(status, "vaQueryConfigProfiles()"))
return 0;
D(bug("%d profiles available\n", n_profiles));
for (i = 0; i < n_profiles; i++)
{
if (profiles[i] == profile)
return 1;
}
return 0;
}
static int has_entrypoint(struct vaapi_context *vaapi, VAProfile profile, VAEntrypoint entrypoint)
{
VAEntrypoint *entrypoints;
int n_entrypoints;
VAStatus status;
int i;
entrypoints = calloc(vaMaxNumEntrypoints(vaapi->display), sizeof(entrypoints[0]));
status = vaQueryConfigEntrypoints(vaapi->display, profile,
entrypoints, &n_entrypoints);
if (!vaapi_check_status(status, "vaQueryConfigEntrypoints()"))
return 0;
D(bug("%d entrypoints available for %s\n", n_entrypoints,
string_of_VAProfile(profile)));
for (i = 0; i < n_entrypoints; i++)
{
if (entrypoints[i] == entrypoint)
return 1;
}
return 0;
}
static int vaapi_init_decoder(VAProfile profile,
VAEntrypoint entrypoint,
unsigned int picture_width,
unsigned int picture_height)
{
struct vaapi_context* const vaapi = v_context;
VAConfigAttrib attrib;
VAConfigID config_id = VA_INVALID_ID;
VAContextID context_id = VA_INVALID_ID;
VAStatus status;
ENTER();
if (!vaapi)
{
FAIL();
return -1;
};
if (!has_profile(vaapi, profile))
{
FAIL();
return -1;
};
if (!has_entrypoint(vaapi, profile, entrypoint))
{
FAIL();
return -1;
};
if (vaapi->config_id != VA_INVALID_ID)
vaDestroyConfig(vaapi->display, vaapi->config_id);
attrib.type = VAConfigAttribRTFormat;
printf("vaGetConfigAttributes\n");
status = vaGetConfigAttributes(vaapi->display, profile, entrypoint,
&attrib, 1);
if (!vaapi_check_status(status, "vaGetConfigAttributes()"))
{
FAIL();
return -1;
}
if ((attrib.value & VA_RT_FORMAT_YUV420) == 0)
{
printf("Chroma format not supported.\n");
FAIL();
return -1;
};
printf("vaCreateConfig\n");
status = vaCreateConfig(vaapi->display, profile, entrypoint,
&attrib, 1, &config_id);
if (!vaapi_check_status(status, "vaCreateConfig()"))
{
FAIL();
return -1;
}
printf("vaCreateSurfaces %dx%d\n",picture_width,picture_height);
status = vaCreateSurfaces(vaapi->display, VA_RT_FORMAT_YUV420, picture_width, picture_height,
v_surface_id,4,NULL,0);
printf("v_surface_id_3 %x\n", v_surface_id[3]);
if (!vaapi_check_status(status, "vaCreateSurfaces()"))
{
FAIL();
return -1;
};
{
VAImage vaimage;
VABufferInfo info = {0};
vaDeriveImage(vaapi->display,v_surface_id[0],&vaimage);
printf("vaDeriveImage: %x fourcc: %x\n"
"offset0: %d pitch0: %d\n"
"offset1: %d pitch1: %d\n"
"offset2: %d pitch2: %d\n",
vaimage.buf, vaimage.format.fourcc,
vaimage.offsets[0],vaimage.pitches[0],
vaimage.offsets[1],vaimage.pitches[1],
vaimage.offsets[2],vaimage.pitches[2]);
info.mem_type = VA_SURFACE_ATTRIB_MEM_TYPE_KERNEL_DRM;
vaAcquireBufferHandle(vaapi->display, vaimage.buf, &info);
printf("vaAcquireBufferHandle: %x type: %x\n"
"mem type: %x mem size: %x\n",
info.handle, info.type, info.mem_type, info.mem_size);
vaReleaseBufferHandle(vaapi->display, vaimage.buf);
vaDestroyImage(vaapi->display,vaimage.image_id);
};
printf("vaCreateContext %dx%d\n",picture_width,picture_height);
status = vaCreateContext(vaapi->display, config_id,
picture_width, picture_height,
VA_PROGRESSIVE,
v_surface_id, 4,
&context_id);
if (!vaapi_check_status(status, "vaCreateContext()"))
{
FAIL();
return -1;
};
vaapi->config_id = config_id;
vaapi->context_id = context_id;
LEAVE();
return 0;
}
static enum PixelFormat get_format(struct AVCodecContext *avctx,
const enum AVPixelFormat *fmt)
{
int i, profile;
ENTER();
// for (int i = 0; fmt[i] != AV_PIX_FMT_NONE; i++)
// printf(" %s", av_get_pix_fmt_name(fmt[i]));
for (i = 0; fmt[i] != PIX_FMT_NONE; i++) {
printf("pixformat %x\n", fmt[i]);
if (fmt[i] != AV_PIX_FMT_VAAPI_VLD)
continue;
switch (avctx->codec_id)
{
case CODEC_ID_MPEG2VIDEO:
profile = VAProfileMPEG2Main;
break;
case CODEC_ID_MPEG4:
case CODEC_ID_H263:
profile = VAProfileMPEG4AdvancedSimple;
break;
case CODEC_ID_H264:
profile = VAProfileH264High;
break;
case CODEC_ID_WMV3:
profile = VAProfileVC1Main;
break;
case CODEC_ID_VC1:
profile = VAProfileVC1Advanced;
break;
default:
profile = -1;
break;
}
if (profile >= 0) {
if (vaapi_init_decoder(profile, VAEntrypointVLD, avctx->width, avctx->height) == 0)
{
avctx->hwaccel_context = v_context;
LEAVE();
return fmt[i]; ;
}
}
}
FAIL();
return PIX_FMT_NONE;
}
struct av_surface
{
int w;
int h;
VASurfaceID id;
};
static void av_release_buffer(void *opaque, uint8_t *data)
{
struct av_surface surface = *(struct av_surface*)data;
// VDPAUContext *ctx = opaque;
// ctx->video_surface_destroy(surface);
av_freep(&data);
}
static int get_buffer2(AVCodecContext *avctx, AVFrame *pic, int flags)
{
void *surface = (void *)(uintptr_t)v_surface_id[dfx];
// printf("%s surface %x\n", __FUNCTION__, surface);
// pic->type= FF_BUFFER_TYPE_USER;
pic->data[3] = surface;
struct av_surface *avsurface;
surface = av_malloc(sizeof(*avsurface));
if (!surface)
return AVERROR(ENOMEM);
pic->buf[0] = av_buffer_create((uint8_t*)avsurface, sizeof(*avsurface),
av_release_buffer, avctx,
AV_BUFFER_FLAG_READONLY);
return 0;
}
struct vaapi_context va_context_storage;
int fplay_init_context(AVCodecContext *avctx)
{
ENTER();
avctx->thread_count = 1;
avctx->get_format = get_format;
avctx->get_buffer2 = get_buffer2;
LEAVE();
return 0;
}
int fplay_vaapi_init(void)
{
VADisplay dpy;
dpy = va_open_display();
if (vaapi_init(dpy) < 0)
return -1;
return 0;
}
struct SwsContext *vacvt_ctx;
void va_sync()
{
struct vaapi_context* const vaapi = v_context;
vaSyncSurface(vaapi->display,v_surface_id[dfx]);
};
void va_convert_picture(int width, int height, AVPicture *pic)
{
uint8_t *src_data[4];
int src_linesize[4];
VAImage vaimage;
VAStatus status;
uint8_t *vdata;
struct vaapi_context* const vaapi = v_context;
va_sync();
status = vaDeriveImage(vaapi->display,v_surface_id[dfx],&vaimage);
if (!vaapi_check_status(status, "vaDeriveImage()"))
{
FAIL();
return;
};
static int once = 2;
if(once && dfx == 0)
{
VABufferInfo info = {0};
printf("vaDeriveImage: %x fourcc: %x\n"
"offset0: %d pitch0: %d\n"
"offset1: %d pitch1: %d\n"
"offset2: %d pitch2: %d\n",
vaimage.buf, vaimage.format.fourcc,
vaimage.offsets[0],vaimage.pitches[0],
vaimage.offsets[1],vaimage.pitches[1],
vaimage.offsets[2],vaimage.pitches[2]);
info.mem_type = VA_SURFACE_ATTRIB_MEM_TYPE_KERNEL_DRM;
status = vaAcquireBufferHandle(vaapi->display, vaimage.buf, &info);
if (vaapi_check_status(status, "vaAcquireBufferHandle()"))
{
printf("vaAcquireBufferHandle: %x type: %x\n"
"mem type: %x mem size: %d\n",
info.handle, info.type, info.mem_type, info.mem_size);
vaReleaseBufferHandle(vaapi->display, vaimage.buf);
}
once--;
};
src_linesize[0] = vaimage.pitches[0];
src_linesize[1] = vaimage.pitches[1];
src_linesize[2] = vaimage.pitches[2];
src_linesize[3] = 0;
status = vaMapBuffer(vaapi->display,vaimage.buf,(void **)&vdata);
if (!vaapi_check_status(status, "vaMapBuffer()"))
{
FAIL();
return;
};
// printf("vdata: %x offset0: %d offset1: %d offset2: %d\n", vdata,
// vaimage.offsets[0],
// vaimage.offsets[1],
// vaimage.offsets[2]);
src_data[0] = vdata + vaimage.offsets[0];
src_data[1] = vdata + vaimage.offsets[1];
src_data[2] = vdata + vaimage.offsets[2];
src_data[3] = 0;
vacvt_ctx = sws_getCachedContext(vacvt_ctx, width, height, AV_PIX_FMT_NV12,
width, height, AV_PIX_FMT_BGRA,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
if(vacvt_ctx == NULL)
{
printf("Cannot initialize the conversion context!\n");
return ;
};
// __asm__ volatile ("int3");
sws_scale(vacvt_ctx, (const uint8_t* const *)src_data, src_linesize, 0, height, pic->data, pic->linesize);
vaUnmapBuffer (vaapi->display, vaimage.buf);
vaDestroyImage(vaapi->display, vaimage.image_id);
}

View File

@ -47,8 +47,6 @@ int height;
AVRational video_time_base;
AVFrame *Frame;
extern mutex_t driver_lock;
void get_client_rect(rect_t *rc);
void flush_video()
@ -65,14 +63,14 @@ void flush_video()
dfx = 0;
};
int init_video(AVCodecContext *ctx)
int init_video(vst_t *vst)
{
int i;
width = ctx->width;
height = ctx->height;
width = vst->vCtx->width;
height = vst->vCtx->height;
Frame = avcodec_alloc_frame();
Frame = av_frame_alloc();
if ( Frame == NULL )
{
printf("Cannot alloc video frame\n\r");
@ -83,8 +81,8 @@ int init_video(AVCodecContext *ctx)
{
int ret;
ret = avpicture_alloc(&frames[i].picture, ctx->pix_fmt,
ctx->width, ctx->height);
ret = avpicture_alloc(&frames[i].picture, vst->vCtx->pix_fmt,
vst->vCtx->width, vst->vCtx->height);
if ( ret != 0 )
{
printf("Cannot alloc video buffer\n\r");
@ -95,13 +93,13 @@ int init_video(AVCodecContext *ctx)
frames[i].ready = 0;
};
create_thread(video_thread, ctx, 1024*1024);
create_thread(video_thread, vst, 1024*1024);
delay(50);
return 1;
};
int decode_video(AVCodecContext *ctx, queue_t *qv)
int decode_video(vst_t* vst)
{
AVPacket pkt;
double pts;
@ -111,7 +109,7 @@ int decode_video(AVCodecContext *ctx, queue_t *qv)
if(frames[dfx].ready != 0 )
return -1;
if( get_packet(qv, &pkt) == 0 )
if( get_packet(&vst->q_video, &pkt) == 0 )
return 0;
/*
@ -132,9 +130,11 @@ int decode_video(AVCodecContext *ctx, queue_t *qv)
{
frameFinished = 0;
ctx->reordered_opaque = pkt.pts;
vst->vCtx->reordered_opaque = pkt.pts;
if(avcodec_decode_video2(ctx, Frame, &frameFinished, &pkt) <= 0)
mutex_lock(&vst->gpu_lock);
if(avcodec_decode_video2(vst->vCtx, Frame, &frameFinished, &pkt) <= 0)
printf("video decoder error\n");
if(frameFinished)
@ -155,7 +155,7 @@ int decode_video(AVCodecContext *ctx, queue_t *qv)
av_image_copy(dst_pic->data, dst_pic->linesize,
(const uint8_t**)Frame->data,
Frame->linesize, ctx->pix_fmt, ctx->width, ctx->height);
Frame->linesize, vst->vCtx->pix_fmt, vst->vCtx->width, vst->vCtx->height);
frames[dfx].pts = pts*1000.0;
@ -165,6 +165,8 @@ int decode_video(AVCodecContext *ctx, queue_t *qv)
dfx&= 3;
frames_count++;
};
mutex_unlock(&vst->gpu_lock);
};
av_free_packet(&pkt);
@ -386,9 +388,6 @@ int MainWindowProc(ctrl_t *ctrl, uint32_t msg, uint32_t arg1, uint32_t arg2)
#define VERSION_A 1
extern queue_t q_video;
extern queue_t q_audio;
void render_time(render_t *render)
{
progress_t *prg = main_render->win->panel.prg;
@ -547,7 +546,7 @@ extern char *movie_file;
int video_thread(void *param)
{
AVCodecContext *ctx = param;
vst_t *vst = param;
window_t *MainWindow;
init_winlib();
@ -559,9 +558,7 @@ int video_thread(void *param)
show_window(MainWindow, NORMAL);
// __asm__ __volatile__("int3");
main_render = create_render(MainWindow, ctx, HW_TEX_BLIT|HW_BIT_BLIT);
main_render = create_render(vst, MainWindow, HW_TEX_BLIT|HW_BIT_BLIT);
if( main_render == NULL)
{
printf("Cannot create render\n\r");
@ -587,7 +584,7 @@ int video_thread(void *param)
void draw_hw_picture(render_t *render, AVPicture *picture);
void draw_sw_picture(render_t *render, AVPicture *picture);
render_t *create_render(window_t *win, AVCodecContext *ctx, uint32_t flags)
render_t *create_render(vst_t *vst, window_t *win, uint32_t flags)
{
render_t *render;
@ -600,11 +597,12 @@ render_t *create_render(window_t *win, AVCodecContext *ctx, uint32_t flags)
render = (render_t*)malloc(sizeof(render_t));
memset(render, 0, sizeof(render_t));
render->vst = vst;
render->win = win;
render->ctx_width = ctx->width;
render->ctx_height = ctx->height;
render->ctx_format = ctx->pix_fmt;
render->ctx_width = vst->vCtx->width;
render->ctx_height = vst->vCtx->height;
render->ctx_format = vst->vCtx->pix_fmt;
render->caps = pxInit(1);
@ -845,7 +843,7 @@ void draw_hw_picture(render_t *render, AVPicture *picture)
cvt_ctx = sws_getCachedContext(cvt_ctx,
render->ctx_width, render->ctx_height, render->ctx_format,
dst_width, dst_height, PIX_FMT_BGRA,
dst_width, dst_height, AV_PIX_FMT_BGRA,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
if(cvt_ctx == NULL)
{
@ -877,6 +875,7 @@ void draw_hw_picture(render_t *render, AVPicture *picture)
picture->linesize, 0, render->ctx_height, data, linesize);
// printf("sws_scale\n");
mutex_lock(&render->vst->gpu_lock);
if(render->caps & HW_TEX_BLIT)
{
@ -899,6 +898,7 @@ void draw_hw_picture(render_t *render, AVPicture *picture)
CAPTION_HEIGHT+render->rcvideo.t,
render->rcvideo.r, render->rcvideo.b, 0, 0);
};
mutex_unlock(&render->vst->gpu_lock);
render->last_bitmap = bitmap;
render->target++;
@ -920,7 +920,7 @@ void draw_sw_picture(render_t *render, AVPicture *picture)
render->ctx_width, render->ctx_height,
render->ctx_format,
render->rcvideo.r, render->rcvideo.b,
PIX_FMT_BGRA, SWS_FAST_BILINEAR, NULL, NULL, NULL);
AV_PIX_FMT_BGRA, SWS_FAST_BILINEAR, NULL, NULL, NULL);
if(cvt_ctx == NULL)
{
printf("Cannot initialize the conversion context!\n");